repo_name
string
path
string
copies
string
size
string
content
string
license
string
omnirom/android_kernel_oppo_msm8974
drivers/media/video/pvrusb2/pvrusb2-io.c
12332
18789
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "pvrusb2-io.h" #include "pvrusb2-debug.h" #include <linux/errno.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/mutex.h> static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state); #define BUFFER_SIG 0x47653271 // #define SANITY_CHECK_BUFFERS #ifdef SANITY_CHECK_BUFFERS #define BUFFER_CHECK(bp) do { \ if ((bp)->signature != BUFFER_SIG) { \ pvr2_trace(PVR2_TRACE_ERROR_LEGS, \ "Buffer %p is bad at %s:%d", \ (bp),__FILE__,__LINE__); \ pvr2_buffer_describe(bp,"BadSig"); \ BUG(); \ } \ } while (0) #else #define BUFFER_CHECK(bp) do {} while(0) #endif struct pvr2_stream { /* Buffers queued for reading */ struct list_head queued_list; unsigned int q_count; unsigned int q_bcount; /* Buffers with retrieved data */ struct list_head ready_list; unsigned int r_count; unsigned int r_bcount; /* Buffers available for use */ struct list_head idle_list; unsigned int i_count; unsigned int i_bcount; /* Pointers to all buffers */ struct pvr2_buffer **buffers; /* Array size of buffers */ unsigned int buffer_slot_count; /* Total buffers actually in circulation */ unsigned int buffer_total_count; /* Designed number of buffers to be in circulation */ unsigned int buffer_target_count; /* Executed when ready list become non-empty */ pvr2_stream_callback callback_func; void *callback_data; /* Context for transfer endpoint */ struct usb_device *dev; int endpoint; /* Overhead for mutex enforcement */ spinlock_t list_lock; struct mutex mutex; /* Tracking state for tolerating errors */ unsigned int fail_count; unsigned int fail_tolerance; unsigned int buffers_processed; unsigned int buffers_failed; unsigned int bytes_processed; }; struct pvr2_buffer { int id; int signature; enum pvr2_buffer_state state; void *ptr; /* Pointer to storage area */ unsigned int max_count; /* Size of storage area */ unsigned int used_count; /* Amount of valid data in storage area */ int status; /* Transfer result status */ struct pvr2_stream *stream; struct list_head list_overhead; struct urb *purb; }; static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st) { switch (st) { case pvr2_buffer_state_none: return "none"; case pvr2_buffer_state_idle: return "idle"; case pvr2_buffer_state_queued: return "queued"; case pvr2_buffer_state_ready: return "ready"; } return "unknown"; } #ifdef SANITY_CHECK_BUFFERS static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg) { pvr2_trace(PVR2_TRACE_INFO, "buffer%s%s %p state=%s id=%d status=%d" " stream=%p purb=%p sig=0x%x", (msg ? " " : ""), (msg ? msg : ""), bp, (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"), (bp ? bp->id : 0), (bp ? bp->status : 0), (bp ? bp->stream : NULL), (bp ? bp->purb : NULL), (bp ? bp->signature : 0)); } #endif /* SANITY_CHECK_BUFFERS */ static void pvr2_buffer_remove(struct pvr2_buffer *bp) { unsigned int *cnt; unsigned int *bcnt; unsigned int ccnt; struct pvr2_stream *sp = bp->stream; switch (bp->state) { case pvr2_buffer_state_idle: cnt = &sp->i_count; bcnt = &sp->i_bcount; ccnt = bp->max_count; break; case pvr2_buffer_state_queued: cnt = &sp->q_count; bcnt = &sp->q_bcount; ccnt = bp->max_count; break; case pvr2_buffer_state_ready: cnt = &sp->r_count; bcnt = &sp->r_bcount; ccnt = bp->used_count; break; default: return; } list_del_init(&bp->list_overhead); (*cnt)--; (*bcnt) -= ccnt; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/" " bufferPool %8s dec cap=%07d cnt=%02d", pvr2_buffer_state_decode(bp->state),*bcnt,*cnt); bp->state = pvr2_buffer_state_none; } static void pvr2_buffer_set_none(struct pvr2_buffer *bp) { unsigned long irq_flags; struct pvr2_stream *sp; BUFFER_CHECK(bp); sp = bp->stream; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", bp, pvr2_buffer_state_decode(bp->state), pvr2_buffer_state_decode(pvr2_buffer_state_none)); spin_lock_irqsave(&sp->list_lock,irq_flags); pvr2_buffer_remove(bp); spin_unlock_irqrestore(&sp->list_lock,irq_flags); } static int pvr2_buffer_set_ready(struct pvr2_buffer *bp) { int fl; unsigned long irq_flags; struct pvr2_stream *sp; BUFFER_CHECK(bp); sp = bp->stream; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", bp, pvr2_buffer_state_decode(bp->state), pvr2_buffer_state_decode(pvr2_buffer_state_ready)); spin_lock_irqsave(&sp->list_lock,irq_flags); fl = (sp->r_count == 0); pvr2_buffer_remove(bp); list_add_tail(&bp->list_overhead,&sp->ready_list); bp->state = pvr2_buffer_state_ready; (sp->r_count)++; sp->r_bcount += bp->used_count; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/" " bufferPool %8s inc cap=%07d cnt=%02d", pvr2_buffer_state_decode(bp->state), sp->r_bcount,sp->r_count); spin_unlock_irqrestore(&sp->list_lock,irq_flags); return fl; } static void pvr2_buffer_set_idle(struct pvr2_buffer *bp) { unsigned long irq_flags; struct pvr2_stream *sp; BUFFER_CHECK(bp); sp = bp->stream; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", bp, pvr2_buffer_state_decode(bp->state), pvr2_buffer_state_decode(pvr2_buffer_state_idle)); spin_lock_irqsave(&sp->list_lock,irq_flags); pvr2_buffer_remove(bp); list_add_tail(&bp->list_overhead,&sp->idle_list); bp->state = pvr2_buffer_state_idle; (sp->i_count)++; sp->i_bcount += bp->max_count; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/" " bufferPool %8s inc cap=%07d cnt=%02d", pvr2_buffer_state_decode(bp->state), sp->i_bcount,sp->i_count); spin_unlock_irqrestore(&sp->list_lock,irq_flags); } static void pvr2_buffer_set_queued(struct pvr2_buffer *bp) { unsigned long irq_flags; struct pvr2_stream *sp; BUFFER_CHECK(bp); sp = bp->stream; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", bp, pvr2_buffer_state_decode(bp->state), pvr2_buffer_state_decode(pvr2_buffer_state_queued)); spin_lock_irqsave(&sp->list_lock,irq_flags); pvr2_buffer_remove(bp); list_add_tail(&bp->list_overhead,&sp->queued_list); bp->state = pvr2_buffer_state_queued; (sp->q_count)++; sp->q_bcount += bp->max_count; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/" " bufferPool %8s inc cap=%07d cnt=%02d", pvr2_buffer_state_decode(bp->state), sp->q_bcount,sp->q_count); spin_unlock_irqrestore(&sp->list_lock,irq_flags); } static void pvr2_buffer_wipe(struct pvr2_buffer *bp) { if (bp->state == pvr2_buffer_state_queued) { usb_kill_urb(bp->purb); } } static int pvr2_buffer_init(struct pvr2_buffer *bp, struct pvr2_stream *sp, unsigned int id) { memset(bp,0,sizeof(*bp)); bp->signature = BUFFER_SIG; bp->id = id; pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp); bp->stream = sp; bp->state = pvr2_buffer_state_none; INIT_LIST_HEAD(&bp->list_overhead); bp->purb = usb_alloc_urb(0,GFP_KERNEL); if (! bp->purb) return -ENOMEM; #ifdef SANITY_CHECK_BUFFERS pvr2_buffer_describe(bp,"create"); #endif return 0; } static void pvr2_buffer_done(struct pvr2_buffer *bp) { #ifdef SANITY_CHECK_BUFFERS pvr2_buffer_describe(bp,"delete"); #endif pvr2_buffer_wipe(bp); pvr2_buffer_set_none(bp); bp->signature = 0; bp->stream = NULL; usb_free_urb(bp->purb); pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/" " bufferDone %p",bp); } static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt) { int ret; unsigned int scnt; /* Allocate buffers pointer array in multiples of 32 entries */ if (cnt == sp->buffer_total_count) return 0; pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ poolResize " " stream=%p cur=%d adj=%+d", sp, sp->buffer_total_count, cnt-sp->buffer_total_count); scnt = cnt & ~0x1f; if (cnt > scnt) scnt += 0x20; if (cnt > sp->buffer_total_count) { if (scnt > sp->buffer_slot_count) { struct pvr2_buffer **nb; nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL); if (!nb) return -ENOMEM; if (sp->buffer_slot_count) { memcpy(nb,sp->buffers, sp->buffer_slot_count * sizeof(*nb)); kfree(sp->buffers); } sp->buffers = nb; sp->buffer_slot_count = scnt; } while (sp->buffer_total_count < cnt) { struct pvr2_buffer *bp; bp = kmalloc(sizeof(*bp),GFP_KERNEL); if (!bp) return -ENOMEM; ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count); if (ret) { kfree(bp); return -ENOMEM; } sp->buffers[sp->buffer_total_count] = bp; (sp->buffer_total_count)++; pvr2_buffer_set_idle(bp); } } else { while (sp->buffer_total_count > cnt) { struct pvr2_buffer *bp; bp = sp->buffers[sp->buffer_total_count - 1]; /* Paranoia */ sp->buffers[sp->buffer_total_count - 1] = NULL; (sp->buffer_total_count)--; pvr2_buffer_done(bp); kfree(bp); } if (scnt < sp->buffer_slot_count) { struct pvr2_buffer **nb = NULL; if (scnt) { nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL); if (!nb) return -ENOMEM; memcpy(nb,sp->buffers,scnt * sizeof(*nb)); } kfree(sp->buffers); sp->buffers = nb; sp->buffer_slot_count = scnt; } } return 0; } static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp) { struct pvr2_buffer *bp; unsigned int cnt; if (sp->buffer_total_count == sp->buffer_target_count) return 0; pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/" " poolCheck stream=%p cur=%d tgt=%d", sp,sp->buffer_total_count,sp->buffer_target_count); if (sp->buffer_total_count < sp->buffer_target_count) { return pvr2_stream_buffer_count(sp,sp->buffer_target_count); } cnt = 0; while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) { bp = sp->buffers[sp->buffer_total_count - (cnt + 1)]; if (bp->state != pvr2_buffer_state_idle) break; cnt++; } if (cnt) { pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt); } return 0; } static void pvr2_stream_internal_flush(struct pvr2_stream *sp) { struct list_head *lp; struct pvr2_buffer *bp1; while ((lp = sp->queued_list.next) != &sp->queued_list) { bp1 = list_entry(lp,struct pvr2_buffer,list_overhead); pvr2_buffer_wipe(bp1); /* At this point, we should be guaranteed that no completion callback may happen on this buffer. But it's possible that it might have completed after we noticed it but before we wiped it. So double check its status here first. */ if (bp1->state != pvr2_buffer_state_queued) continue; pvr2_buffer_set_idle(bp1); } if (sp->buffer_total_count != sp->buffer_target_count) { pvr2_stream_achieve_buffer_count(sp); } } static void pvr2_stream_init(struct pvr2_stream *sp) { spin_lock_init(&sp->list_lock); mutex_init(&sp->mutex); INIT_LIST_HEAD(&sp->queued_list); INIT_LIST_HEAD(&sp->ready_list); INIT_LIST_HEAD(&sp->idle_list); } static void pvr2_stream_done(struct pvr2_stream *sp) { mutex_lock(&sp->mutex); do { pvr2_stream_internal_flush(sp); pvr2_stream_buffer_count(sp,0); } while (0); mutex_unlock(&sp->mutex); } static void buffer_complete(struct urb *urb) { struct pvr2_buffer *bp = urb->context; struct pvr2_stream *sp; unsigned long irq_flags; BUFFER_CHECK(bp); sp = bp->stream; bp->used_count = 0; bp->status = 0; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d", bp,urb->status,urb->actual_length); spin_lock_irqsave(&sp->list_lock,irq_flags); if ((!(urb->status)) || (urb->status == -ENOENT) || (urb->status == -ECONNRESET) || (urb->status == -ESHUTDOWN)) { (sp->buffers_processed)++; sp->bytes_processed += urb->actual_length; bp->used_count = urb->actual_length; if (sp->fail_count) { pvr2_trace(PVR2_TRACE_TOLERANCE, "stream %p transfer ok" " - fail count reset",sp); sp->fail_count = 0; } } else if (sp->fail_count < sp->fail_tolerance) { // We can tolerate this error, because we're below the // threshold... (sp->fail_count)++; (sp->buffers_failed)++; pvr2_trace(PVR2_TRACE_TOLERANCE, "stream %p ignoring error %d" " - fail count increased to %u", sp,urb->status,sp->fail_count); } else { (sp->buffers_failed)++; bp->status = urb->status; } spin_unlock_irqrestore(&sp->list_lock,irq_flags); pvr2_buffer_set_ready(bp); if (sp && sp->callback_func) { sp->callback_func(sp->callback_data); } } struct pvr2_stream *pvr2_stream_create(void) { struct pvr2_stream *sp; sp = kzalloc(sizeof(*sp),GFP_KERNEL); if (!sp) return sp; pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp); pvr2_stream_init(sp); return sp; } void pvr2_stream_destroy(struct pvr2_stream *sp) { if (!sp) return; pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp); pvr2_stream_done(sp); kfree(sp); } void pvr2_stream_setup(struct pvr2_stream *sp, struct usb_device *dev, int endpoint, unsigned int tolerance) { mutex_lock(&sp->mutex); do { pvr2_stream_internal_flush(sp); sp->dev = dev; sp->endpoint = endpoint; sp->fail_tolerance = tolerance; } while(0); mutex_unlock(&sp->mutex); } void pvr2_stream_set_callback(struct pvr2_stream *sp, pvr2_stream_callback func, void *data) { unsigned long irq_flags; mutex_lock(&sp->mutex); do { spin_lock_irqsave(&sp->list_lock,irq_flags); sp->callback_data = data; sp->callback_func = func; spin_unlock_irqrestore(&sp->list_lock,irq_flags); } while(0); mutex_unlock(&sp->mutex); } void pvr2_stream_get_stats(struct pvr2_stream *sp, struct pvr2_stream_stats *stats, int zero_counts) { unsigned long irq_flags; spin_lock_irqsave(&sp->list_lock,irq_flags); if (stats) { stats->buffers_in_queue = sp->q_count; stats->buffers_in_idle = sp->i_count; stats->buffers_in_ready = sp->r_count; stats->buffers_processed = sp->buffers_processed; stats->buffers_failed = sp->buffers_failed; stats->bytes_processed = sp->bytes_processed; } if (zero_counts) { sp->buffers_processed = 0; sp->buffers_failed = 0; sp->bytes_processed = 0; } spin_unlock_irqrestore(&sp->list_lock,irq_flags); } /* Query / set the nominal buffer count */ int pvr2_stream_get_buffer_count(struct pvr2_stream *sp) { return sp->buffer_target_count; } int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt) { int ret; if (sp->buffer_target_count == cnt) return 0; mutex_lock(&sp->mutex); do { sp->buffer_target_count = cnt; ret = pvr2_stream_achieve_buffer_count(sp); } while(0); mutex_unlock(&sp->mutex); return ret; } struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp) { struct list_head *lp = sp->idle_list.next; if (lp == &sp->idle_list) return NULL; return list_entry(lp,struct pvr2_buffer,list_overhead); } struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp) { struct list_head *lp = sp->ready_list.next; if (lp == &sp->ready_list) return NULL; return list_entry(lp,struct pvr2_buffer,list_overhead); } struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id) { if (id < 0) return NULL; if (id >= sp->buffer_total_count) return NULL; return sp->buffers[id]; } int pvr2_stream_get_ready_count(struct pvr2_stream *sp) { return sp->r_count; } void pvr2_stream_kill(struct pvr2_stream *sp) { struct pvr2_buffer *bp; mutex_lock(&sp->mutex); do { pvr2_stream_internal_flush(sp); while ((bp = pvr2_stream_get_ready_buffer(sp)) != NULL) { pvr2_buffer_set_idle(bp); } if (sp->buffer_total_count != sp->buffer_target_count) { pvr2_stream_achieve_buffer_count(sp); } } while(0); mutex_unlock(&sp->mutex); } int pvr2_buffer_queue(struct pvr2_buffer *bp) { #undef SEED_BUFFER #ifdef SEED_BUFFER unsigned int idx; unsigned int val; #endif int ret = 0; struct pvr2_stream *sp; if (!bp) return -EINVAL; sp = bp->stream; mutex_lock(&sp->mutex); do { pvr2_buffer_wipe(bp); if (!sp->dev) { ret = -EIO; break; } pvr2_buffer_set_queued(bp); #ifdef SEED_BUFFER for (idx = 0; idx < (bp->max_count) / 4; idx++) { val = bp->id << 24; val |= idx; ((unsigned int *)(bp->ptr))[idx] = val; } #endif bp->status = -EINPROGRESS; usb_fill_bulk_urb(bp->purb, // struct urb *urb sp->dev, // struct usb_device *dev // endpoint (below) usb_rcvbulkpipe(sp->dev,sp->endpoint), bp->ptr, // void *transfer_buffer bp->max_count, // int buffer_length buffer_complete, bp); usb_submit_urb(bp->purb,GFP_KERNEL); } while(0); mutex_unlock(&sp->mutex); return ret; } int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt) { int ret = 0; unsigned long irq_flags; struct pvr2_stream *sp; if (!bp) return -EINVAL; sp = bp->stream; mutex_lock(&sp->mutex); do { spin_lock_irqsave(&sp->list_lock,irq_flags); if (bp->state != pvr2_buffer_state_idle) { ret = -EPERM; } else { bp->ptr = ptr; bp->stream->i_bcount -= bp->max_count; bp->max_count = cnt; bp->stream->i_bcount += bp->max_count; pvr2_trace(PVR2_TRACE_BUF_FLOW, "/*---TRACE_FLOW---*/ bufferPool " " %8s cap cap=%07d cnt=%02d", pvr2_buffer_state_decode( pvr2_buffer_state_idle), bp->stream->i_bcount,bp->stream->i_count); } spin_unlock_irqrestore(&sp->list_lock,irq_flags); } while(0); mutex_unlock(&sp->mutex); return ret; } unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp) { return bp->used_count; } int pvr2_buffer_get_status(struct pvr2_buffer *bp) { return bp->status; } int pvr2_buffer_get_id(struct pvr2_buffer *bp) { return bp->id; } /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 75 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
dirkbehme/linux-renesas-rcar-gen3
drivers/staging/lustre/lustre/llite/xattr_cache.c
45
13384
/* * Copyright 2012 Xyratex Technology Limited * * Copyright (c) 2013, 2015, Intel Corporation. * * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com> * */ #define DEBUG_SUBSYSTEM S_LLITE #include <linux/fs.h> #include <linux/sched.h> #include <linux/mm.h> #include "../include/obd_support.h" #include "../include/lustre_lite.h" #include "../include/lustre_dlm.h" #include "../include/lustre_ver.h" #include "llite_internal.h" /* If we ever have hundreds of extended attributes, we might want to consider * using a hash or a tree structure instead of list for faster lookups. */ struct ll_xattr_entry { struct list_head xe_list; /* protected with * lli_xattrs_list_rwsem */ char *xe_name; /* xattr name, \0-terminated */ char *xe_value; /* xattr value */ unsigned xe_namelen; /* strlen(xe_name) + 1 */ unsigned xe_vallen; /* xattr value length */ }; static struct kmem_cache *xattr_kmem; static struct lu_kmem_descr xattr_caches[] = { { .ckd_cache = &xattr_kmem, .ckd_name = "xattr_kmem", .ckd_size = sizeof(struct ll_xattr_entry) }, { .ckd_cache = NULL } }; int ll_xattr_init(void) { return lu_kmem_init(xattr_caches); } void ll_xattr_fini(void) { lu_kmem_fini(xattr_caches); } /** * Initializes xattr cache for an inode. * * This initializes the xattr list and marks cache presence. */ static void ll_xattr_cache_init(struct ll_inode_info *lli) { LASSERT(lli != NULL); INIT_LIST_HEAD(&lli->lli_xattrs); lli->lli_flags |= LLIF_XATTR_CACHE; } /** * This looks for a specific extended attribute. * * Find in @cache and return @xattr_name attribute in @xattr, * for the NULL @xattr_name return the first cached @xattr. * * \retval 0 success * \retval -ENODATA if not found */ static int ll_xattr_cache_find(struct list_head *cache, const char *xattr_name, struct ll_xattr_entry **xattr) { struct ll_xattr_entry *entry; list_for_each_entry(entry, cache, xe_list) { /* xattr_name == NULL means look for any entry */ if (xattr_name == NULL || strcmp(xattr_name, entry->xe_name) == 0) { *xattr = entry; CDEBUG(D_CACHE, "find: [%s]=%.*s\n", entry->xe_name, entry->xe_vallen, entry->xe_value); return 0; } } return -ENODATA; } /** * This adds an xattr. * * Add @xattr_name attr with @xattr_val value and @xattr_val_len length, * * \retval 0 success * \retval -ENOMEM if no memory could be allocated for the cached attr * \retval -EPROTO if duplicate xattr is being added */ static int ll_xattr_cache_add(struct list_head *cache, const char *xattr_name, const char *xattr_val, unsigned xattr_val_len) { struct ll_xattr_entry *xattr; if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) { CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name); return -EPROTO; } xattr = kmem_cache_alloc(xattr_kmem, GFP_NOFS | __GFP_ZERO); if (xattr == NULL) { CDEBUG(D_CACHE, "failed to allocate xattr\n"); return -ENOMEM; } xattr->xe_name = kstrdup(xattr_name, GFP_NOFS); if (!xattr->xe_name) { CDEBUG(D_CACHE, "failed to alloc xattr name %u\n", xattr->xe_namelen); goto err_name; } xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS); if (!xattr->xe_value) goto err_value; xattr->xe_vallen = xattr_val_len; list_add(&xattr->xe_list, cache); CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len, xattr_val); return 0; err_value: kfree(xattr->xe_name); err_name: kmem_cache_free(xattr_kmem, xattr); return -ENOMEM; } /** * This removes an extended attribute from cache. * * Remove @xattr_name attribute from @cache. * * \retval 0 success * \retval -ENODATA if @xattr_name is not cached */ static int ll_xattr_cache_del(struct list_head *cache, const char *xattr_name) { struct ll_xattr_entry *xattr; CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name); if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) { list_del(&xattr->xe_list); kfree(xattr->xe_name); kfree(xattr->xe_value); kmem_cache_free(xattr_kmem, xattr); return 0; } return -ENODATA; } /** * This iterates cached extended attributes. * * Walk over cached attributes in @cache and * fill in @xld_buffer or only calculate buffer * size if @xld_buffer is NULL. * * \retval >= 0 buffer list size * \retval -ENODATA if the list cannot fit @xld_size buffer */ static int ll_xattr_cache_list(struct list_head *cache, char *xld_buffer, int xld_size) { struct ll_xattr_entry *xattr, *tmp; int xld_tail = 0; list_for_each_entry_safe(xattr, tmp, cache, xe_list) { CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n", xld_buffer, xld_tail, xattr->xe_name); if (xld_buffer) { xld_size -= xattr->xe_namelen; if (xld_size < 0) break; memcpy(&xld_buffer[xld_tail], xattr->xe_name, xattr->xe_namelen); } xld_tail += xattr->xe_namelen; } if (xld_size < 0) return -ERANGE; return xld_tail; } /** * Check if the xattr cache is initialized (filled). * * \retval 0 @cache is not initialized * \retval 1 @cache is initialized */ static int ll_xattr_cache_valid(struct ll_inode_info *lli) { return !!(lli->lli_flags & LLIF_XATTR_CACHE); } /** * This finalizes the xattr cache. * * Free all xattr memory. @lli is the inode info pointer. * * \retval 0 no error occurred */ static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli) { if (!ll_xattr_cache_valid(lli)) return 0; while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0) ; /* empty loop */ lli->lli_flags &= ~LLIF_XATTR_CACHE; return 0; } int ll_xattr_cache_destroy(struct inode *inode) { struct ll_inode_info *lli = ll_i2info(inode); int rc; down_write(&lli->lli_xattrs_list_rwsem); rc = ll_xattr_cache_destroy_locked(lli); up_write(&lli->lli_xattrs_list_rwsem); return rc; } /** * Match or enqueue a PR lock. * * Find or request an LDLM lock with xattr data. * Since LDLM does not provide API for atomic match_or_enqueue, * the function handles it with a separate enq lock. * If successful, the function exits with the list lock held. * * \retval 0 no error occurred * \retval -ENOMEM not enough memory */ static int ll_xattr_find_get_lock(struct inode *inode, struct lookup_intent *oit, struct ptlrpc_request **req) { ldlm_mode_t mode; struct lustre_handle lockh = { 0 }; struct md_op_data *op_data; struct ll_inode_info *lli = ll_i2info(inode); struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS, .ei_mode = it_to_lock_mode(oit), .ei_cb_bl = ll_md_blocking_ast, .ei_cb_cp = ldlm_completion_ast }; struct ll_sb_info *sbi = ll_i2sbi(inode); struct obd_export *exp = sbi->ll_md_exp; int rc; mutex_lock(&lli->lli_xattrs_enq_lock); /* inode may have been shrunk and recreated, so data is gone, match lock * only when data exists. */ if (ll_xattr_cache_valid(lli)) { /* Try matching first. */ mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0, LCK_PR); if (mode != 0) { /* fake oit in mdc_revalidate_lock() manner */ oit->d.lustre.it_lock_handle = lockh.cookie; oit->d.lustre.it_lock_mode = mode; goto out; } } /* Enqueue if the lock isn't cached locally. */ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0, LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) { mutex_unlock(&lli->lli_xattrs_enq_lock); return PTR_ERR(op_data); } op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS; rc = md_enqueue(exp, &einfo, oit, op_data, &lockh, NULL, 0, NULL, 0); ll_finish_md_op_data(op_data); if (rc < 0) { CDEBUG(D_CACHE, "md_intent_lock failed with %d for fid "DFID"\n", rc, PFID(ll_inode2fid(inode))); mutex_unlock(&lli->lli_xattrs_enq_lock); return rc; } *req = (struct ptlrpc_request *)oit->d.lustre.it_data; out: down_write(&lli->lli_xattrs_list_rwsem); mutex_unlock(&lli->lli_xattrs_enq_lock); return 0; } /** * Refill the xattr cache. * * Fetch and cache the whole of xattrs for @inode, acquiring * a read or a write xattr lock depending on operation in @oit. * Intent is dropped on exit unless the operation is setxattr. * * \retval 0 no error occurred * \retval -EPROTO network protocol error * \retval -ENOMEM not enough memory for the cache */ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit) { struct ll_sb_info *sbi = ll_i2sbi(inode); struct ptlrpc_request *req = NULL; const char *xdata, *xval, *xtail, *xvtail; struct ll_inode_info *lli = ll_i2info(inode); struct mdt_body *body; __u32 *xsizes; int rc, i; rc = ll_xattr_find_get_lock(inode, oit, &req); if (rc) goto out_no_unlock; /* Do we have the data at this point? */ if (ll_xattr_cache_valid(lli)) { ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1); rc = 0; goto out_maybe_drop; } /* Matched but no cache? Cancelled on error by a parallel refill. */ if (unlikely(req == NULL)) { CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n"); rc = -EIO; goto out_maybe_drop; } if (oit->d.lustre.it_status < 0) { CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n", oit->d.lustre.it_status, PFID(ll_inode2fid(inode))); rc = oit->d.lustre.it_status; /* xattr data is so large that we don't want to cache it */ if (rc == -ERANGE) rc = -EAGAIN; goto out_destroy; } body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); if (body == NULL) { CERROR("no MDT BODY in the refill xattr reply\n"); rc = -EPROTO; goto out_destroy; } /* do not need swab xattr data */ xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA, body->eadatasize); xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS, body->aclsize); xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS, body->max_mdsize * sizeof(__u32)); if (xdata == NULL || xval == NULL || xsizes == NULL) { CERROR("wrong setxattr reply\n"); rc = -EPROTO; goto out_destroy; } xtail = xdata + body->eadatasize; xvtail = xval + body->aclsize; CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail); ll_xattr_cache_init(lli); for (i = 0; i < body->max_mdsize; i++) { CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval); /* Perform consistency checks: attr names and vals in pill */ if (memchr(xdata, 0, xtail - xdata) == NULL) { CERROR("xattr protocol violation (names are broken)\n"); rc = -EPROTO; } else if (xval + *xsizes > xvtail) { CERROR("xattr protocol violation (vals are broken)\n"); rc = -EPROTO; } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) { rc = -ENOMEM; } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) { /* Filter out ACL ACCESS since it's cached separately */ CDEBUG(D_CACHE, "not caching %s\n", XATTR_NAME_ACL_ACCESS); rc = 0; } else { rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval, *xsizes); } if (rc < 0) { ll_xattr_cache_destroy_locked(lli); goto out_destroy; } xdata += strlen(xdata) + 1; xval += *xsizes; xsizes++; } if (xdata != xtail || xval != xvtail) CERROR("a hole in xattr data\n"); ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL); goto out_maybe_drop; out_maybe_drop: ll_intent_drop_lock(oit); if (rc != 0) up_write(&lli->lli_xattrs_list_rwsem); out_no_unlock: ptlrpc_req_finished(req); return rc; out_destroy: up_write(&lli->lli_xattrs_list_rwsem); ldlm_lock_decref_and_cancel((struct lustre_handle *) &oit->d.lustre.it_lock_handle, oit->d.lustre.it_lock_mode); goto out_no_unlock; } /** * Get an xattr value or list xattrs using the write-through cache. * * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode. * The resulting value/list is stored in @buffer if the former * is not larger than @size. * * \retval 0 no error occurred * \retval -EPROTO network protocol error * \retval -ENOMEM not enough memory for the cache * \retval -ERANGE the buffer is not large enough * \retval -ENODATA no such attr or the list is empty */ int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer, size_t size, __u64 valid) { struct lookup_intent oit = { .it_op = IT_GETXATTR }; struct ll_inode_info *lli = ll_i2info(inode); int rc = 0; LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS)); down_read(&lli->lli_xattrs_list_rwsem); if (!ll_xattr_cache_valid(lli)) { up_read(&lli->lli_xattrs_list_rwsem); rc = ll_xattr_cache_refill(inode, &oit); if (rc) return rc; downgrade_write(&lli->lli_xattrs_list_rwsem); } else { ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1); } if (valid & OBD_MD_FLXATTR) { struct ll_xattr_entry *xattr; rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr); if (rc == 0) { rc = xattr->xe_vallen; /* zero size means we are only requested size in rc */ if (size != 0) { if (size >= xattr->xe_vallen) memcpy(buffer, xattr->xe_value, xattr->xe_vallen); else rc = -ERANGE; } } } else if (valid & OBD_MD_FLXATTRLS) { rc = ll_xattr_cache_list(&lli->lli_xattrs, size ? buffer : NULL, size); } goto out; out: up_read(&lli->lli_xattrs_list_rwsem); return rc; }
gpl-2.0
sinutech/sinuos-kernel
arch/sh/boards/mach-kfr2r09/setup.c
45
15389
/* * KFR2R09 board support code * * Copyright (C) 2009 Magnus Damm * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mfd/tmio.h> #include <linux/mtd/physmap.h> #include <linux/mtd/onenand.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/input.h> #include <linux/input/sh_keysc.h> #include <linux/i2c.h> #include <linux/usb/r8a66597.h> #include <linux/videodev2.h> #include <linux/sh_intc.h> #include <media/rj54n1cb0c.h> #include <media/soc_camera.h> #include <media/sh_mobile_ceu.h> #include <video/sh_mobile_lcdc.h> #include <asm/suspend.h> #include <asm/clock.h> #include <asm/machvec.h> #include <asm/io.h> #include <cpu/sh7724.h> #include <mach/kfr2r09.h> static struct mtd_partition kfr2r09_nor_flash_partitions[] = { { .name = "boot", .offset = 0, .size = (4 * 1024 * 1024), .mask_flags = MTD_WRITEABLE, /* Read-only */ }, { .name = "other", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data kfr2r09_nor_flash_data = { .width = 2, .parts = kfr2r09_nor_flash_partitions, .nr_parts = ARRAY_SIZE(kfr2r09_nor_flash_partitions), }; static struct resource kfr2r09_nor_flash_resources[] = { [0] = { .name = "NOR Flash", .start = 0x00000000, .end = 0x03ffffff, .flags = IORESOURCE_MEM, } }; static struct platform_device kfr2r09_nor_flash_device = { .name = "physmap-flash", .resource = kfr2r09_nor_flash_resources, .num_resources = ARRAY_SIZE(kfr2r09_nor_flash_resources), .dev = { .platform_data = &kfr2r09_nor_flash_data, }, }; static struct resource kfr2r09_nand_flash_resources[] = { [0] = { .name = "NAND Flash", .start = 0x10000000, .end = 0x1001ffff, .flags = IORESOURCE_MEM, } }; static struct platform_device kfr2r09_nand_flash_device = { .name = "onenand-flash", .resource = kfr2r09_nand_flash_resources, .num_resources = ARRAY_SIZE(kfr2r09_nand_flash_resources), }; static struct sh_keysc_info kfr2r09_sh_keysc_info = { .mode = SH_KEYSC_MODE_1, /* KEYOUT0->4, KEYIN0->4 */ .scan_timing = 3, .delay = 10, .keycodes = { KEY_PHONE, KEY_CLEAR, KEY_MAIL, KEY_WWW, KEY_ENTER, KEY_1, KEY_2, KEY_3, 0, KEY_UP, KEY_4, KEY_5, KEY_6, 0, KEY_LEFT, KEY_7, KEY_8, KEY_9, KEY_PROG1, KEY_RIGHT, KEY_S, KEY_0, KEY_P, KEY_PROG2, KEY_DOWN, 0, 0, 0, 0, 0 }, }; static struct resource kfr2r09_sh_keysc_resources[] = { [0] = { .name = "KEYSC", .start = 0x044b0000, .end = 0x044b000f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xbe0), .flags = IORESOURCE_IRQ, }, }; static struct platform_device kfr2r09_sh_keysc_device = { .name = "sh_keysc", .id = 0, /* "keysc0" clock */ .num_resources = ARRAY_SIZE(kfr2r09_sh_keysc_resources), .resource = kfr2r09_sh_keysc_resources, .dev = { .platform_data = &kfr2r09_sh_keysc_info, }, }; static const struct fb_videomode kfr2r09_lcdc_modes[] = { { .name = "TX07D34VM0AAA", .xres = 240, .yres = 400, .left_margin = 0, .right_margin = 16, .hsync_len = 8, .upper_margin = 0, .lower_margin = 1, .vsync_len = 1, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }, }; static struct sh_mobile_lcdc_info kfr2r09_sh_lcdc_info = { .clock_source = LCDC_CLK_BUS, .ch[0] = { .chan = LCDC_CHAN_MAINLCD, .fourcc = V4L2_PIX_FMT_RGB565, .interface_type = SYS18, .clock_divider = 6, .flags = LCDC_FLAGS_DWPOL, .lcd_modes = kfr2r09_lcdc_modes, .num_modes = ARRAY_SIZE(kfr2r09_lcdc_modes), .panel_cfg = { .width = 35, .height = 58, .setup_sys = kfr2r09_lcd_setup, .start_transfer = kfr2r09_lcd_start, .display_on = kfr2r09_lcd_on, .display_off = kfr2r09_lcd_off, }, .sys_bus_cfg = { .ldmt2r = 0x07010904, .ldmt3r = 0x14012914, /* set 1s delay to encourage fsync() */ .deferred_io_msec = 1000, }, } }; static struct resource kfr2r09_sh_lcdc_resources[] = { [0] = { .name = "LCDC", .start = 0xfe940000, /* P4-only space */ .end = 0xfe942fff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xf40), .flags = IORESOURCE_IRQ, }, }; static struct platform_device kfr2r09_sh_lcdc_device = { .name = "sh_mobile_lcdc_fb", .num_resources = ARRAY_SIZE(kfr2r09_sh_lcdc_resources), .resource = kfr2r09_sh_lcdc_resources, .dev = { .platform_data = &kfr2r09_sh_lcdc_info, }, }; static struct r8a66597_platdata kfr2r09_usb0_gadget_data = { .on_chip = 1, }; static struct resource kfr2r09_usb0_gadget_resources[] = { [0] = { .start = 0x04d80000, .end = 0x04d80123, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xa20), .end = evt2irq(0xa20), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device kfr2r09_usb0_gadget_device = { .name = "r8a66597_udc", .id = 0, .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &kfr2r09_usb0_gadget_data, }, .num_resources = ARRAY_SIZE(kfr2r09_usb0_gadget_resources), .resource = kfr2r09_usb0_gadget_resources, }; static struct sh_mobile_ceu_info sh_mobile_ceu_info = { .flags = SH_CEU_FLAG_USE_8BIT_BUS, }; static struct resource kfr2r09_ceu_resources[] = { [0] = { .name = "CEU", .start = 0xfe910000, .end = 0xfe91009f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x880), .end = evt2irq(0x880), .flags = IORESOURCE_IRQ, }, [2] = { /* place holder for contiguous memory */ }, }; static struct platform_device kfr2r09_ceu_device = { .name = "sh_mobile_ceu", .id = 0, /* "ceu0" clock */ .num_resources = ARRAY_SIZE(kfr2r09_ceu_resources), .resource = kfr2r09_ceu_resources, .dev = { .platform_data = &sh_mobile_ceu_info, }, }; static struct i2c_board_info kfr2r09_i2c_camera = { I2C_BOARD_INFO("rj54n1cb0c", 0x50), }; static struct clk *camera_clk; /* set VIO_CKO clock to 25MHz */ #define CEU_MCLK_FREQ 25000000 #define DRVCRB 0xA405018C static int camera_power(struct device *dev, int mode) { int ret; if (mode) { long rate; camera_clk = clk_get(NULL, "video_clk"); if (IS_ERR(camera_clk)) return PTR_ERR(camera_clk); rate = clk_round_rate(camera_clk, CEU_MCLK_FREQ); ret = clk_set_rate(camera_clk, rate); if (ret < 0) goto eclkrate; /* set DRVCRB * * use 1.8 V for VccQ_VIO * use 2.85V for VccQ_SR */ __raw_writew((__raw_readw(DRVCRB) & ~0x0003) | 0x0001, DRVCRB); /* reset clear */ ret = gpio_request(GPIO_PTB4, NULL); if (ret < 0) goto eptb4; ret = gpio_request(GPIO_PTB7, NULL); if (ret < 0) goto eptb7; ret = gpio_direction_output(GPIO_PTB4, 1); if (!ret) ret = gpio_direction_output(GPIO_PTB7, 1); if (ret < 0) goto egpioout; msleep(1); ret = clk_enable(camera_clk); /* start VIO_CKO */ if (ret < 0) goto eclkon; return 0; } ret = 0; clk_disable(camera_clk); eclkon: gpio_set_value(GPIO_PTB7, 0); egpioout: gpio_set_value(GPIO_PTB4, 0); gpio_free(GPIO_PTB7); eptb7: gpio_free(GPIO_PTB4); eptb4: eclkrate: clk_put(camera_clk); return ret; } static struct rj54n1_pdata rj54n1_priv = { .mclk_freq = CEU_MCLK_FREQ, .ioctl_high = false, }; static struct soc_camera_link rj54n1_link = { .power = camera_power, .board_info = &kfr2r09_i2c_camera, .i2c_adapter_id = 1, .priv = &rj54n1_priv, }; static struct platform_device kfr2r09_camera = { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &rj54n1_link, }, }; static struct resource kfr2r09_sh_sdhi0_resources[] = { [0] = { .name = "SDHI0", .start = 0x04ce0000, .end = 0x04ce00ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xe80), .flags = IORESOURCE_IRQ, }, }; static struct sh_mobile_sdhi_info sh7724_sdhi0_data = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, .tmio_caps = MMC_CAP_SDIO_IRQ, }; static struct platform_device kfr2r09_sh_sdhi0_device = { .name = "sh_mobile_sdhi", .num_resources = ARRAY_SIZE(kfr2r09_sh_sdhi0_resources), .resource = kfr2r09_sh_sdhi0_resources, .dev = { .platform_data = &sh7724_sdhi0_data, }, }; static struct platform_device *kfr2r09_devices[] __initdata = { &kfr2r09_nor_flash_device, &kfr2r09_nand_flash_device, &kfr2r09_sh_keysc_device, &kfr2r09_sh_lcdc_device, &kfr2r09_ceu_device, &kfr2r09_camera, &kfr2r09_sh_sdhi0_device, }; #define BSC_CS0BCR 0xfec10004 #define BSC_CS0WCR 0xfec10024 #define BSC_CS4BCR 0xfec10010 #define BSC_CS4WCR 0xfec10030 #define PORT_MSELCRB 0xa4050182 #ifdef CONFIG_I2C static int kfr2r09_usb0_gadget_i2c_setup(void) { struct i2c_adapter *a; struct i2c_msg msg; unsigned char buf[2]; int ret; a = i2c_get_adapter(0); if (!a) return -ENODEV; /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */ buf[0] = 0x13; msg.addr = 0x09; msg.buf = buf; msg.len = 1; msg.flags = 0; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; buf[0] = 0; msg.addr = 0x09; msg.buf = buf; msg.len = 1; msg.flags = I2C_M_RD; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; buf[1] = buf[0] | (1 << 1); buf[0] = 0x13; msg.addr = 0x09; msg.buf = buf; msg.len = 2; msg.flags = 0; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; return 0; } static int kfr2r09_serial_i2c_setup(void) { struct i2c_adapter *a; struct i2c_msg msg; unsigned char buf[2]; int ret; a = i2c_get_adapter(0); if (!a) return -ENODEV; /* set bit 6 (the 7th bit) of chip at 0x09, register 0x13 */ buf[0] = 0x13; msg.addr = 0x09; msg.buf = buf; msg.len = 1; msg.flags = 0; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; buf[0] = 0; msg.addr = 0x09; msg.buf = buf; msg.len = 1; msg.flags = I2C_M_RD; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; buf[1] = buf[0] | (1 << 6); buf[0] = 0x13; msg.addr = 0x09; msg.buf = buf; msg.len = 2; msg.flags = 0; ret = i2c_transfer(a, &msg, 1); if (ret != 1) return -ENODEV; return 0; } #else static int kfr2r09_usb0_gadget_i2c_setup(void) { return -ENODEV; } static int kfr2r09_serial_i2c_setup(void) { return -ENODEV; } #endif static int kfr2r09_usb0_gadget_setup(void) { int plugged_in; gpio_request(GPIO_PTN4, NULL); /* USB_DET */ gpio_direction_input(GPIO_PTN4); plugged_in = gpio_get_value(GPIO_PTN4); if (!plugged_in) return -ENODEV; /* no cable plugged in */ if (kfr2r09_usb0_gadget_i2c_setup() != 0) return -ENODEV; /* unable to configure using i2c */ __raw_writew((__raw_readw(PORT_MSELCRB) & ~0xc000) | 0x8000, PORT_MSELCRB); gpio_request(GPIO_FN_PDSTATUS, NULL); /* R-standby disables USB clock */ gpio_request(GPIO_PTV6, NULL); /* USBCLK_ON */ gpio_direction_output(GPIO_PTV6, 1); /* USBCLK_ON = H */ msleep(20); /* wait 20ms to let the clock settle */ clk_enable(clk_get(NULL, "usb0")); __raw_writew(0x0600, 0xa40501d4); return 0; } extern char kfr2r09_sdram_enter_start; extern char kfr2r09_sdram_enter_end; extern char kfr2r09_sdram_leave_start; extern char kfr2r09_sdram_leave_end; static int __init kfr2r09_devices_setup(void) { /* register board specific self-refresh code */ sh_mobile_register_self_refresh(SUSP_SH_STANDBY | SUSP_SH_SF | SUSP_SH_RSTANDBY, &kfr2r09_sdram_enter_start, &kfr2r09_sdram_enter_end, &kfr2r09_sdram_leave_start, &kfr2r09_sdram_leave_end); /* enable SCIF1 serial port for YC401 console support */ gpio_request(GPIO_FN_SCIF1_RXD, NULL); gpio_request(GPIO_FN_SCIF1_TXD, NULL); kfr2r09_serial_i2c_setup(); /* ECONTMSK(bit6=L10ONEN) set 1 */ gpio_request(GPIO_PTG3, NULL); /* HPON_ON */ gpio_direction_output(GPIO_PTG3, 1); /* HPON_ON = H */ /* setup NOR flash at CS0 */ __raw_writel(0x36db0400, BSC_CS0BCR); __raw_writel(0x00000500, BSC_CS0WCR); /* setup NAND flash at CS4 */ __raw_writel(0x36db0400, BSC_CS4BCR); __raw_writel(0x00000500, BSC_CS4WCR); /* setup KEYSC pins */ gpio_request(GPIO_FN_KEYOUT0, NULL); gpio_request(GPIO_FN_KEYOUT1, NULL); gpio_request(GPIO_FN_KEYOUT2, NULL); gpio_request(GPIO_FN_KEYOUT3, NULL); gpio_request(GPIO_FN_KEYOUT4_IN6, NULL); gpio_request(GPIO_FN_KEYIN0, NULL); gpio_request(GPIO_FN_KEYIN1, NULL); gpio_request(GPIO_FN_KEYIN2, NULL); gpio_request(GPIO_FN_KEYIN3, NULL); gpio_request(GPIO_FN_KEYIN4, NULL); gpio_request(GPIO_FN_KEYOUT5_IN5, NULL); /* setup LCDC pins for SYS panel */ gpio_request(GPIO_FN_LCDD17, NULL); gpio_request(GPIO_FN_LCDD16, NULL); gpio_request(GPIO_FN_LCDD15, NULL); gpio_request(GPIO_FN_LCDD14, NULL); gpio_request(GPIO_FN_LCDD13, NULL); gpio_request(GPIO_FN_LCDD12, NULL); gpio_request(GPIO_FN_LCDD11, NULL); gpio_request(GPIO_FN_LCDD10, NULL); gpio_request(GPIO_FN_LCDD9, NULL); gpio_request(GPIO_FN_LCDD8, NULL); gpio_request(GPIO_FN_LCDD7, NULL); gpio_request(GPIO_FN_LCDD6, NULL); gpio_request(GPIO_FN_LCDD5, NULL); gpio_request(GPIO_FN_LCDD4, NULL); gpio_request(GPIO_FN_LCDD3, NULL); gpio_request(GPIO_FN_LCDD2, NULL); gpio_request(GPIO_FN_LCDD1, NULL); gpio_request(GPIO_FN_LCDD0, NULL); gpio_request(GPIO_FN_LCDRS, NULL); /* LCD_RS */ gpio_request(GPIO_FN_LCDCS, NULL); /* LCD_CS/ */ gpio_request(GPIO_FN_LCDRD, NULL); /* LCD_RD/ */ gpio_request(GPIO_FN_LCDWR, NULL); /* LCD_WR/ */ gpio_request(GPIO_FN_LCDVSYN, NULL); /* LCD_VSYNC */ gpio_request(GPIO_PTE4, NULL); /* LCD_RST/ */ gpio_direction_output(GPIO_PTE4, 1); gpio_request(GPIO_PTF4, NULL); /* PROTECT/ */ gpio_direction_output(GPIO_PTF4, 1); gpio_request(GPIO_PTU0, NULL); /* LEDSTDBY/ */ gpio_direction_output(GPIO_PTU0, 1); /* setup USB function */ if (kfr2r09_usb0_gadget_setup() == 0) platform_device_register(&kfr2r09_usb0_gadget_device); /* CEU */ gpio_request(GPIO_FN_VIO_CKO, NULL); gpio_request(GPIO_FN_VIO0_CLK, NULL); gpio_request(GPIO_FN_VIO0_VD, NULL); gpio_request(GPIO_FN_VIO0_HD, NULL); gpio_request(GPIO_FN_VIO0_FLD, NULL); gpio_request(GPIO_FN_VIO0_D7, NULL); gpio_request(GPIO_FN_VIO0_D6, NULL); gpio_request(GPIO_FN_VIO0_D5, NULL); gpio_request(GPIO_FN_VIO0_D4, NULL); gpio_request(GPIO_FN_VIO0_D3, NULL); gpio_request(GPIO_FN_VIO0_D2, NULL); gpio_request(GPIO_FN_VIO0_D1, NULL); gpio_request(GPIO_FN_VIO0_D0, NULL); platform_resource_setup_memory(&kfr2r09_ceu_device, "ceu", 4 << 20); /* SDHI0 connected to yc304 */ gpio_request(GPIO_FN_SDHI0CD, NULL); gpio_request(GPIO_FN_SDHI0D3, NULL); gpio_request(GPIO_FN_SDHI0D2, NULL); gpio_request(GPIO_FN_SDHI0D1, NULL); gpio_request(GPIO_FN_SDHI0D0, NULL); gpio_request(GPIO_FN_SDHI0CMD, NULL); gpio_request(GPIO_FN_SDHI0CLK, NULL); return platform_add_devices(kfr2r09_devices, ARRAY_SIZE(kfr2r09_devices)); } device_initcall(kfr2r09_devices_setup); /* Return the board specific boot mode pin configuration */ static int kfr2r09_mode_pins(void) { /* MD0=1, MD1=1, MD2=0: Clock Mode 3 * MD3=0: 16-bit Area0 Bus Width * MD5=1: Little Endian * MD8=1: Test Mode Disabled */ return MODE_PIN0 | MODE_PIN1 | MODE_PIN5 | MODE_PIN8; } /* * The Machine Vector */ static struct sh_machine_vector mv_kfr2r09 __initmv = { .mv_name = "kfr2r09", .mv_mode_pins = kfr2r09_mode_pins, };
gpl-2.0
ace0/linux-whirlwind-rng
drivers/hwmon/lm93.c
45
86314
/* * lm93.c - Part of lm_sensors, Linux kernel modules for hardware monitoring * * Author/Maintainer: Mark M. Hoffman <mhoffman@lightlink.com> * Copyright (c) 2004 Utilitek Systems, Inc. * * derived in part from lm78.c: * Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> * * derived in part from lm85.c: * Copyright (c) 2002, 2003 Philip Pokorny <ppokorny@penguincomputing.com> * Copyright (c) 2003 Margit Schubert-While <margitsw@t-online.de> * * derived in part from w83l785ts.c: * Copyright (c) 2003-2004 Jean Delvare <jdelvare@suse.de> * * Ported to Linux 2.6 by Eric J. Bowersox <ericb@aspsys.com> * Copyright (c) 2005 Aspen Systems, Inc. * * Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org> * Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab * * Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de> * Copyright (c) 2007 Hans J. Koch, Linutronix GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/hwmon-vid.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/jiffies.h> /* LM93 REGISTER ADDRESSES */ /* miscellaneous */ #define LM93_REG_MFR_ID 0x3e #define LM93_REG_VER 0x3f #define LM93_REG_STATUS_CONTROL 0xe2 #define LM93_REG_CONFIG 0xe3 #define LM93_REG_SLEEP_CONTROL 0xe4 /* alarm values start here */ #define LM93_REG_HOST_ERROR_1 0x48 /* voltage inputs: in1-in16 (nr => 0-15) */ #define LM93_REG_IN(nr) (0x56 + (nr)) #define LM93_REG_IN_MIN(nr) (0x90 + (nr) * 2) #define LM93_REG_IN_MAX(nr) (0x91 + (nr) * 2) /* temperature inputs: temp1-temp4 (nr => 0-3) */ #define LM93_REG_TEMP(nr) (0x50 + (nr)) #define LM93_REG_TEMP_MIN(nr) (0x78 + (nr) * 2) #define LM93_REG_TEMP_MAX(nr) (0x79 + (nr) * 2) /* temp[1-4]_auto_boost (nr => 0-3) */ #define LM93_REG_BOOST(nr) (0x80 + (nr)) /* #PROCHOT inputs: prochot1-prochot2 (nr => 0-1) */ #define LM93_REG_PROCHOT_CUR(nr) (0x67 + (nr) * 2) #define LM93_REG_PROCHOT_AVG(nr) (0x68 + (nr) * 2) #define LM93_REG_PROCHOT_MAX(nr) (0xb0 + (nr)) /* fan tach inputs: fan1-fan4 (nr => 0-3) */ #define LM93_REG_FAN(nr) (0x6e + (nr) * 2) #define LM93_REG_FAN_MIN(nr) (0xb4 + (nr) * 2) /* pwm outputs: pwm1-pwm2 (nr => 0-1, reg => 0-3) */ #define LM93_REG_PWM_CTL(nr, reg) (0xc8 + (reg) + (nr) * 4) #define LM93_PWM_CTL1 0x0 #define LM93_PWM_CTL2 0x1 #define LM93_PWM_CTL3 0x2 #define LM93_PWM_CTL4 0x3 /* GPIO input state */ #define LM93_REG_GPI 0x6b /* vid inputs: vid1-vid2 (nr => 0-1) */ #define LM93_REG_VID(nr) (0x6c + (nr)) /* vccp1 & vccp2: VID relative inputs (nr => 0-1) */ #define LM93_REG_VCCP_LIMIT_OFF(nr) (0xb2 + (nr)) /* temp[1-4]_auto_boost_hyst */ #define LM93_REG_BOOST_HYST_12 0xc0 #define LM93_REG_BOOST_HYST_34 0xc1 #define LM93_REG_BOOST_HYST(nr) (0xc0 + (nr)/2) /* temp[1-4]_auto_pwm_[min|hyst] */ #define LM93_REG_PWM_MIN_HYST_12 0xc3 #define LM93_REG_PWM_MIN_HYST_34 0xc4 #define LM93_REG_PWM_MIN_HYST(nr) (0xc3 + (nr)/2) /* prochot_override & prochot_interval */ #define LM93_REG_PROCHOT_OVERRIDE 0xc6 #define LM93_REG_PROCHOT_INTERVAL 0xc7 /* temp[1-4]_auto_base (nr => 0-3) */ #define LM93_REG_TEMP_BASE(nr) (0xd0 + (nr)) /* temp[1-4]_auto_offsets (step => 0-11) */ #define LM93_REG_TEMP_OFFSET(step) (0xd4 + (step)) /* #PROCHOT & #VRDHOT PWM ramp control */ #define LM93_REG_PWM_RAMP_CTL 0xbf /* miscellaneous */ #define LM93_REG_SFC1 0xbc #define LM93_REG_SFC2 0xbd #define LM93_REG_GPI_VID_CTL 0xbe #define LM93_REG_SF_TACH_TO_PWM 0xe0 /* error masks */ #define LM93_REG_GPI_ERR_MASK 0xec #define LM93_REG_MISC_ERR_MASK 0xed /* LM93 REGISTER VALUES */ #define LM93_MFR_ID 0x73 #define LM93_MFR_ID_PROTOTYPE 0x72 /* LM94 REGISTER VALUES */ #define LM94_MFR_ID_2 0x7a #define LM94_MFR_ID 0x79 #define LM94_MFR_ID_PROTOTYPE 0x78 /* SMBus capabilities */ #define LM93_SMBUS_FUNC_FULL (I2C_FUNC_SMBUS_BYTE_DATA | \ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA) #define LM93_SMBUS_FUNC_MIN (I2C_FUNC_SMBUS_BYTE_DATA | \ I2C_FUNC_SMBUS_WORD_DATA) /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; /* Insmod parameters */ static bool disable_block; module_param(disable_block, bool, 0); MODULE_PARM_DESC(disable_block, "Set to non-zero to disable SMBus block data transactions."); static bool init; module_param(init, bool, 0); MODULE_PARM_DESC(init, "Set to non-zero to force chip initialization."); static int vccp_limit_type[2] = {0, 0}; module_param_array(vccp_limit_type, int, NULL, 0); MODULE_PARM_DESC(vccp_limit_type, "Configures in7 and in8 limit modes."); static int vid_agtl; module_param(vid_agtl, int, 0); MODULE_PARM_DESC(vid_agtl, "Configures VID pin input thresholds."); /* Driver data */ static struct i2c_driver lm93_driver; /* LM93 BLOCK READ COMMANDS */ static const struct { u8 cmd; u8 len; } lm93_block_read_cmds[12] = { { 0xf2, 8 }, { 0xf3, 8 }, { 0xf4, 6 }, { 0xf5, 16 }, { 0xf6, 4 }, { 0xf7, 8 }, { 0xf8, 12 }, { 0xf9, 32 }, { 0xfa, 8 }, { 0xfb, 8 }, { 0xfc, 16 }, { 0xfd, 9 }, }; /* * ALARMS: SYSCTL format described further below * REG: 64 bits in 8 registers, as immediately below */ struct block1_t { u8 host_status_1; u8 host_status_2; u8 host_status_3; u8 host_status_4; u8 p1_prochot_status; u8 p2_prochot_status; u8 gpi_status; u8 fan_status; }; /* * Client-specific data */ struct lm93_data { struct device *hwmon_dev; struct mutex update_lock; unsigned long last_updated; /* In jiffies */ /* client update function */ void (*update)(struct lm93_data *, struct i2c_client *); char valid; /* !=0 if following fields are valid */ /* register values, arranged by block read groups */ struct block1_t block1; /* * temp1 - temp4: unfiltered readings * temp1 - temp2: filtered readings */ u8 block2[6]; /* vin1 - vin16: readings */ u8 block3[16]; /* prochot1 - prochot2: readings */ struct { u8 cur; u8 avg; } block4[2]; /* fan counts 1-4 => 14-bits, LE, *left* justified */ u16 block5[4]; /* block6 has a lot of data we don't need */ struct { u8 min; u8 max; } temp_lim[4]; /* vin1 - vin16: low and high limits */ struct { u8 min; u8 max; } block7[16]; /* fan count limits 1-4 => same format as block5 */ u16 block8[4]; /* pwm control registers (2 pwms, 4 regs) */ u8 block9[2][4]; /* auto/pwm base temp and offset temp registers */ struct { u8 base[4]; u8 offset[12]; } block10; /* master config register */ u8 config; /* VID1 & VID2 => register format, 6-bits, right justified */ u8 vid[2]; /* prochot1 - prochot2: limits */ u8 prochot_max[2]; /* vccp1 & vccp2 (in7 & in8): VID relative limits (register format) */ u8 vccp_limits[2]; /* GPIO input state (register format, i.e. inverted) */ u8 gpi; /* #PROCHOT override (register format) */ u8 prochot_override; /* #PROCHOT intervals (register format) */ u8 prochot_interval; /* Fan Boost Temperatures (register format) */ u8 boost[4]; /* Fan Boost Hysteresis (register format) */ u8 boost_hyst[2]; /* Temperature Zone Min. PWM & Hysteresis (register format) */ u8 auto_pwm_min_hyst[2]; /* #PROCHOT & #VRDHOT PWM Ramp Control */ u8 pwm_ramp_ctl; /* miscellaneous setup regs */ u8 sfc1; u8 sfc2; u8 sf_tach_to_pwm; /* * The two PWM CTL2 registers can read something other than what was * last written for the OVR_DC field (duty cycle override). So, we * save the user-commanded value here. */ u8 pwm_override[2]; }; /* * VID: mV * REG: 6-bits, right justified, *always* using Intel VRM/VRD 10 */ static int LM93_VID_FROM_REG(u8 reg) { return vid_from_reg((reg & 0x3f), 100); } /* min, max, and nominal register values, per channel (u8) */ static const u8 lm93_vin_reg_min[16] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xae, }; static const u8 lm93_vin_reg_max[16] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, }; /* * Values from the datasheet. They're here for documentation only. * static const u8 lm93_vin_reg_nom[16] = { * 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, * 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0x40, 0xc0, * }; */ /* min, max, and nominal voltage readings, per channel (mV)*/ static const unsigned long lm93_vin_val_min[16] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3000, }; static const unsigned long lm93_vin_val_max[16] = { 1236, 1236, 1236, 1600, 2000, 2000, 1600, 1600, 4400, 6500, 3333, 2625, 1312, 1312, 1236, 3600, }; /* * Values from the datasheet. They're here for documentation only. * static const unsigned long lm93_vin_val_nom[16] = { * 927, 927, 927, 1200, 1500, 1500, 1200, 1200, * 3300, 5000, 2500, 1969, 984, 984, 309, 3300, * }; */ static unsigned LM93_IN_FROM_REG(int nr, u8 reg) { const long uv_max = lm93_vin_val_max[nr] * 1000; const long uv_min = lm93_vin_val_min[nr] * 1000; const long slope = (uv_max - uv_min) / (lm93_vin_reg_max[nr] - lm93_vin_reg_min[nr]); const long intercept = uv_min - slope * lm93_vin_reg_min[nr]; return (slope * reg + intercept + 500) / 1000; } /* * IN: mV, limits determined by channel nr * REG: scaling determined by channel nr */ static u8 LM93_IN_TO_REG(int nr, unsigned val) { /* range limit */ const long mv = clamp_val(val, lm93_vin_val_min[nr], lm93_vin_val_max[nr]); /* try not to lose too much precision here */ const long uv = mv * 1000; const long uv_max = lm93_vin_val_max[nr] * 1000; const long uv_min = lm93_vin_val_min[nr] * 1000; /* convert */ const long slope = (uv_max - uv_min) / (lm93_vin_reg_max[nr] - lm93_vin_reg_min[nr]); const long intercept = uv_min - slope * lm93_vin_reg_min[nr]; u8 result = ((uv - intercept + (slope/2)) / slope); result = clamp_val(result, lm93_vin_reg_min[nr], lm93_vin_reg_max[nr]); return result; } /* vid in mV, upper == 0 indicates low limit, otherwise upper limit */ static unsigned LM93_IN_REL_FROM_REG(u8 reg, int upper, int vid) { const long uv_offset = upper ? (((reg >> 4 & 0x0f) + 1) * 12500) : (((reg >> 0 & 0x0f) + 1) * -25000); const long uv_vid = vid * 1000; return (uv_vid + uv_offset + 5000) / 10000; } #define LM93_IN_MIN_FROM_REG(reg, vid) LM93_IN_REL_FROM_REG((reg), 0, (vid)) #define LM93_IN_MAX_FROM_REG(reg, vid) LM93_IN_REL_FROM_REG((reg), 1, (vid)) /* * vid in mV , upper == 0 indicates low limit, otherwise upper limit * upper also determines which nibble of the register is returned * (the other nibble will be 0x0) */ static u8 LM93_IN_REL_TO_REG(unsigned val, int upper, int vid) { long uv_offset = vid * 1000 - val * 10000; if (upper) { uv_offset = clamp_val(uv_offset, 12500, 200000); return (u8)((uv_offset / 12500 - 1) << 4); } else { uv_offset = clamp_val(uv_offset, -400000, -25000); return (u8)((uv_offset / -25000 - 1) << 0); } } /* * TEMP: 1/1000 degrees C (-128C to +127C) * REG: 1C/bit, two's complement */ static int LM93_TEMP_FROM_REG(u8 reg) { return (s8)reg * 1000; } #define LM93_TEMP_MIN (-128000) #define LM93_TEMP_MAX (127000) /* * TEMP: 1/1000 degrees C (-128C to +127C) * REG: 1C/bit, two's complement */ static u8 LM93_TEMP_TO_REG(long temp) { int ntemp = clamp_val(temp, LM93_TEMP_MIN, LM93_TEMP_MAX); ntemp += (ntemp < 0 ? -500 : 500); return (u8)(ntemp / 1000); } /* Determine 4-bit temperature offset resolution */ static int LM93_TEMP_OFFSET_MODE_FROM_REG(u8 sfc2, int nr) { /* mode: 0 => 1C/bit, nonzero => 0.5C/bit */ return sfc2 & (nr < 2 ? 0x10 : 0x20); } /* * This function is common to all 4-bit temperature offsets * reg is 4 bits right justified * mode 0 => 1C/bit, mode !0 => 0.5C/bit */ static int LM93_TEMP_OFFSET_FROM_REG(u8 reg, int mode) { return (reg & 0x0f) * (mode ? 5 : 10); } #define LM93_TEMP_OFFSET_MIN (0) #define LM93_TEMP_OFFSET_MAX0 (150) #define LM93_TEMP_OFFSET_MAX1 (75) /* * This function is common to all 4-bit temperature offsets * returns 4 bits right justified * mode 0 => 1C/bit, mode !0 => 0.5C/bit */ static u8 LM93_TEMP_OFFSET_TO_REG(int off, int mode) { int factor = mode ? 5 : 10; off = clamp_val(off, LM93_TEMP_OFFSET_MIN, mode ? LM93_TEMP_OFFSET_MAX1 : LM93_TEMP_OFFSET_MAX0); return (u8)((off + factor/2) / factor); } /* 0 <= nr <= 3 */ static int LM93_TEMP_AUTO_OFFSET_FROM_REG(u8 reg, int nr, int mode) { /* temp1-temp2 (nr=0,1) use lower nibble */ if (nr < 2) return LM93_TEMP_OFFSET_FROM_REG(reg & 0x0f, mode); /* temp3-temp4 (nr=2,3) use upper nibble */ else return LM93_TEMP_OFFSET_FROM_REG(reg >> 4 & 0x0f, mode); } /* * TEMP: 1/10 degrees C (0C to +15C (mode 0) or +7.5C (mode non-zero)) * REG: 1.0C/bit (mode 0) or 0.5C/bit (mode non-zero) * 0 <= nr <= 3 */ static u8 LM93_TEMP_AUTO_OFFSET_TO_REG(u8 old, int off, int nr, int mode) { u8 new = LM93_TEMP_OFFSET_TO_REG(off, mode); /* temp1-temp2 (nr=0,1) use lower nibble */ if (nr < 2) return (old & 0xf0) | (new & 0x0f); /* temp3-temp4 (nr=2,3) use upper nibble */ else return (new << 4 & 0xf0) | (old & 0x0f); } static int LM93_AUTO_BOOST_HYST_FROM_REGS(struct lm93_data *data, int nr, int mode) { u8 reg; switch (nr) { case 0: reg = data->boost_hyst[0] & 0x0f; break; case 1: reg = data->boost_hyst[0] >> 4 & 0x0f; break; case 2: reg = data->boost_hyst[1] & 0x0f; break; case 3: default: reg = data->boost_hyst[1] >> 4 & 0x0f; break; } return LM93_TEMP_FROM_REG(data->boost[nr]) - LM93_TEMP_OFFSET_FROM_REG(reg, mode); } static u8 LM93_AUTO_BOOST_HYST_TO_REG(struct lm93_data *data, long hyst, int nr, int mode) { u8 reg = LM93_TEMP_OFFSET_TO_REG( (LM93_TEMP_FROM_REG(data->boost[nr]) - hyst), mode); switch (nr) { case 0: reg = (data->boost_hyst[0] & 0xf0) | (reg & 0x0f); break; case 1: reg = (reg << 4 & 0xf0) | (data->boost_hyst[0] & 0x0f); break; case 2: reg = (data->boost_hyst[1] & 0xf0) | (reg & 0x0f); break; case 3: default: reg = (reg << 4 & 0xf0) | (data->boost_hyst[1] & 0x0f); break; } return reg; } /* * PWM: 0-255 per sensors documentation * REG: 0-13 as mapped below... right justified */ enum pwm_freq { LM93_PWM_MAP_HI_FREQ, LM93_PWM_MAP_LO_FREQ }; static int lm93_pwm_map[2][16] = { { 0x00, /* 0.00% */ 0x40, /* 25.00% */ 0x50, /* 31.25% */ 0x60, /* 37.50% */ 0x70, /* 43.75% */ 0x80, /* 50.00% */ 0x90, /* 56.25% */ 0xa0, /* 62.50% */ 0xb0, /* 68.75% */ 0xc0, /* 75.00% */ 0xd0, /* 81.25% */ 0xe0, /* 87.50% */ 0xf0, /* 93.75% */ 0xff, /* 100.00% */ 0xff, 0xff, /* 14, 15 are reserved and should never occur */ }, { 0x00, /* 0.00% */ 0x40, /* 25.00% */ 0x49, /* 28.57% */ 0x52, /* 32.14% */ 0x5b, /* 35.71% */ 0x64, /* 39.29% */ 0x6d, /* 42.86% */ 0x76, /* 46.43% */ 0x80, /* 50.00% */ 0x89, /* 53.57% */ 0x92, /* 57.14% */ 0xb6, /* 71.43% */ 0xdb, /* 85.71% */ 0xff, /* 100.00% */ 0xff, 0xff, /* 14, 15 are reserved and should never occur */ }, }; static int LM93_PWM_FROM_REG(u8 reg, enum pwm_freq freq) { return lm93_pwm_map[freq][reg & 0x0f]; } /* round up to nearest match */ static u8 LM93_PWM_TO_REG(int pwm, enum pwm_freq freq) { int i; for (i = 0; i < 13; i++) if (pwm <= lm93_pwm_map[freq][i]) break; /* can fall through with i==13 */ return (u8)i; } static int LM93_FAN_FROM_REG(u16 regs) { const u16 count = le16_to_cpu(regs) >> 2; return count == 0 ? -1 : count == 0x3fff ? 0 : 1350000 / count; } /* * RPM: (82.5 to 1350000) * REG: 14-bits, LE, *left* justified */ static u16 LM93_FAN_TO_REG(long rpm) { u16 count, regs; if (rpm == 0) { count = 0x3fff; } else { rpm = clamp_val(rpm, 1, 1000000); count = clamp_val((1350000 + rpm) / rpm, 1, 0x3ffe); } regs = count << 2; return cpu_to_le16(regs); } /* * PWM FREQ: HZ * REG: 0-7 as mapped below */ static int lm93_pwm_freq_map[8] = { 22500, 96, 84, 72, 60, 48, 36, 12 }; static int LM93_PWM_FREQ_FROM_REG(u8 reg) { return lm93_pwm_freq_map[reg & 0x07]; } /* round up to nearest match */ static u8 LM93_PWM_FREQ_TO_REG(int freq) { int i; for (i = 7; i > 0; i--) if (freq <= lm93_pwm_freq_map[i]) break; /* can fall through with i==0 */ return (u8)i; } /* * TIME: 1/100 seconds * REG: 0-7 as mapped below */ static int lm93_spinup_time_map[8] = { 0, 10, 25, 40, 70, 100, 200, 400, }; static int LM93_SPINUP_TIME_FROM_REG(u8 reg) { return lm93_spinup_time_map[reg >> 5 & 0x07]; } /* round up to nearest match */ static u8 LM93_SPINUP_TIME_TO_REG(int time) { int i; for (i = 0; i < 7; i++) if (time <= lm93_spinup_time_map[i]) break; /* can fall through with i==8 */ return (u8)i; } #define LM93_RAMP_MIN 0 #define LM93_RAMP_MAX 75 static int LM93_RAMP_FROM_REG(u8 reg) { return (reg & 0x0f) * 5; } /* * RAMP: 1/100 seconds * REG: 50mS/bit 4-bits right justified */ static u8 LM93_RAMP_TO_REG(int ramp) { ramp = clamp_val(ramp, LM93_RAMP_MIN, LM93_RAMP_MAX); return (u8)((ramp + 2) / 5); } /* * PROCHOT: 0-255, 0 => 0%, 255 => > 96.6% * REG: (same) */ static u8 LM93_PROCHOT_TO_REG(long prochot) { prochot = clamp_val(prochot, 0, 255); return (u8)prochot; } /* * PROCHOT-INTERVAL: 73 - 37200 (1/100 seconds) * REG: 0-9 as mapped below */ static int lm93_interval_map[10] = { 73, 146, 290, 580, 1170, 2330, 4660, 9320, 18600, 37200, }; static int LM93_INTERVAL_FROM_REG(u8 reg) { return lm93_interval_map[reg & 0x0f]; } /* round up to nearest match */ static u8 LM93_INTERVAL_TO_REG(long interval) { int i; for (i = 0; i < 9; i++) if (interval <= lm93_interval_map[i]) break; /* can fall through with i==9 */ return (u8)i; } /* * GPIO: 0-255, GPIO0 is LSB * REG: inverted */ static unsigned LM93_GPI_FROM_REG(u8 reg) { return ~reg & 0xff; } /* * alarm bitmask definitions * The LM93 has nearly 64 bits of error status... I've pared that down to * what I think is a useful subset in order to fit it into 32 bits. * * Especially note that the #VRD_HOT alarms are missing because we provide * that information as values in another sysfs file. * * If libsensors is extended to support 64 bit values, this could be revisited. */ #define LM93_ALARM_IN1 0x00000001 #define LM93_ALARM_IN2 0x00000002 #define LM93_ALARM_IN3 0x00000004 #define LM93_ALARM_IN4 0x00000008 #define LM93_ALARM_IN5 0x00000010 #define LM93_ALARM_IN6 0x00000020 #define LM93_ALARM_IN7 0x00000040 #define LM93_ALARM_IN8 0x00000080 #define LM93_ALARM_IN9 0x00000100 #define LM93_ALARM_IN10 0x00000200 #define LM93_ALARM_IN11 0x00000400 #define LM93_ALARM_IN12 0x00000800 #define LM93_ALARM_IN13 0x00001000 #define LM93_ALARM_IN14 0x00002000 #define LM93_ALARM_IN15 0x00004000 #define LM93_ALARM_IN16 0x00008000 #define LM93_ALARM_FAN1 0x00010000 #define LM93_ALARM_FAN2 0x00020000 #define LM93_ALARM_FAN3 0x00040000 #define LM93_ALARM_FAN4 0x00080000 #define LM93_ALARM_PH1_ERR 0x00100000 #define LM93_ALARM_PH2_ERR 0x00200000 #define LM93_ALARM_SCSI1_ERR 0x00400000 #define LM93_ALARM_SCSI2_ERR 0x00800000 #define LM93_ALARM_DVDDP1_ERR 0x01000000 #define LM93_ALARM_DVDDP2_ERR 0x02000000 #define LM93_ALARM_D1_ERR 0x04000000 #define LM93_ALARM_D2_ERR 0x08000000 #define LM93_ALARM_TEMP1 0x10000000 #define LM93_ALARM_TEMP2 0x20000000 #define LM93_ALARM_TEMP3 0x40000000 static unsigned LM93_ALARMS_FROM_REG(struct block1_t b1) { unsigned result; result = b1.host_status_2 & 0x3f; if (vccp_limit_type[0]) result |= (b1.host_status_4 & 0x10) << 2; else result |= b1.host_status_2 & 0x40; if (vccp_limit_type[1]) result |= (b1.host_status_4 & 0x20) << 2; else result |= b1.host_status_2 & 0x80; result |= b1.host_status_3 << 8; result |= (b1.fan_status & 0x0f) << 16; result |= (b1.p1_prochot_status & 0x80) << 13; result |= (b1.p2_prochot_status & 0x80) << 14; result |= (b1.host_status_4 & 0xfc) << 20; result |= (b1.host_status_1 & 0x07) << 28; return result; } #define MAX_RETRIES 5 static u8 lm93_read_byte(struct i2c_client *client, u8 reg) { int value, i; /* retry in case of read errors */ for (i = 1; i <= MAX_RETRIES; i++) { value = i2c_smbus_read_byte_data(client, reg); if (value >= 0) { return value; } else { dev_warn(&client->dev, "lm93: read byte data failed, address 0x%02x.\n", reg); mdelay(i + 3); } } /* <TODO> what to return in case of error? */ dev_err(&client->dev, "lm93: All read byte retries failed!!\n"); return 0; } static int lm93_write_byte(struct i2c_client *client, u8 reg, u8 value) { int result; /* <TODO> how to handle write errors? */ result = i2c_smbus_write_byte_data(client, reg, value); if (result < 0) dev_warn(&client->dev, "lm93: write byte data failed, 0x%02x at address 0x%02x.\n", value, reg); return result; } static u16 lm93_read_word(struct i2c_client *client, u8 reg) { int value, i; /* retry in case of read errors */ for (i = 1; i <= MAX_RETRIES; i++) { value = i2c_smbus_read_word_data(client, reg); if (value >= 0) { return value; } else { dev_warn(&client->dev, "lm93: read word data failed, address 0x%02x.\n", reg); mdelay(i + 3); } } /* <TODO> what to return in case of error? */ dev_err(&client->dev, "lm93: All read word retries failed!!\n"); return 0; } static int lm93_write_word(struct i2c_client *client, u8 reg, u16 value) { int result; /* <TODO> how to handle write errors? */ result = i2c_smbus_write_word_data(client, reg, value); if (result < 0) dev_warn(&client->dev, "lm93: write word data failed, 0x%04x at address 0x%02x.\n", value, reg); return result; } static u8 lm93_block_buffer[I2C_SMBUS_BLOCK_MAX]; /* * read block data into values, retry if not expected length * fbn => index to lm93_block_read_cmds table * (Fixed Block Number - section 14.5.2 of LM93 datasheet) */ static void lm93_read_block(struct i2c_client *client, u8 fbn, u8 *values) { int i, result = 0; for (i = 1; i <= MAX_RETRIES; i++) { result = i2c_smbus_read_block_data(client, lm93_block_read_cmds[fbn].cmd, lm93_block_buffer); if (result == lm93_block_read_cmds[fbn].len) { break; } else { dev_warn(&client->dev, "lm93: block read data failed, command 0x%02x.\n", lm93_block_read_cmds[fbn].cmd); mdelay(i + 3); } } if (result == lm93_block_read_cmds[fbn].len) { memcpy(values, lm93_block_buffer, lm93_block_read_cmds[fbn].len); } else { /* <TODO> what to do in case of error? */ } } static struct lm93_data *lm93_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); const unsigned long interval = HZ + (HZ / 2); mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + interval) || !data->valid) { data->update(data, client); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* update routine for data that has no corresponding SMBus block command */ static void lm93_update_client_common(struct lm93_data *data, struct i2c_client *client) { int i; u8 *ptr; /* temp1 - temp4: limits */ for (i = 0; i < 4; i++) { data->temp_lim[i].min = lm93_read_byte(client, LM93_REG_TEMP_MIN(i)); data->temp_lim[i].max = lm93_read_byte(client, LM93_REG_TEMP_MAX(i)); } /* config register */ data->config = lm93_read_byte(client, LM93_REG_CONFIG); /* vid1 - vid2: values */ for (i = 0; i < 2; i++) data->vid[i] = lm93_read_byte(client, LM93_REG_VID(i)); /* prochot1 - prochot2: limits */ for (i = 0; i < 2; i++) data->prochot_max[i] = lm93_read_byte(client, LM93_REG_PROCHOT_MAX(i)); /* vccp1 - vccp2: VID relative limits */ for (i = 0; i < 2; i++) data->vccp_limits[i] = lm93_read_byte(client, LM93_REG_VCCP_LIMIT_OFF(i)); /* GPIO input state */ data->gpi = lm93_read_byte(client, LM93_REG_GPI); /* #PROCHOT override state */ data->prochot_override = lm93_read_byte(client, LM93_REG_PROCHOT_OVERRIDE); /* #PROCHOT intervals */ data->prochot_interval = lm93_read_byte(client, LM93_REG_PROCHOT_INTERVAL); /* Fan Boost Temperature registers */ for (i = 0; i < 4; i++) data->boost[i] = lm93_read_byte(client, LM93_REG_BOOST(i)); /* Fan Boost Temperature Hyst. registers */ data->boost_hyst[0] = lm93_read_byte(client, LM93_REG_BOOST_HYST_12); data->boost_hyst[1] = lm93_read_byte(client, LM93_REG_BOOST_HYST_34); /* Temperature Zone Min. PWM & Hysteresis registers */ data->auto_pwm_min_hyst[0] = lm93_read_byte(client, LM93_REG_PWM_MIN_HYST_12); data->auto_pwm_min_hyst[1] = lm93_read_byte(client, LM93_REG_PWM_MIN_HYST_34); /* #PROCHOT & #VRDHOT PWM Ramp Control register */ data->pwm_ramp_ctl = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL); /* misc setup registers */ data->sfc1 = lm93_read_byte(client, LM93_REG_SFC1); data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sf_tach_to_pwm = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM); /* write back alarm values to clear */ for (i = 0, ptr = (u8 *)(&data->block1); i < 8; i++) lm93_write_byte(client, LM93_REG_HOST_ERROR_1 + i, *(ptr + i)); } /* update routine which uses SMBus block data commands */ static void lm93_update_client_full(struct lm93_data *data, struct i2c_client *client) { dev_dbg(&client->dev, "starting device update (block data enabled)\n"); /* in1 - in16: values & limits */ lm93_read_block(client, 3, (u8 *)(data->block3)); lm93_read_block(client, 7, (u8 *)(data->block7)); /* temp1 - temp4: values */ lm93_read_block(client, 2, (u8 *)(data->block2)); /* prochot1 - prochot2: values */ lm93_read_block(client, 4, (u8 *)(data->block4)); /* fan1 - fan4: values & limits */ lm93_read_block(client, 5, (u8 *)(data->block5)); lm93_read_block(client, 8, (u8 *)(data->block8)); /* pmw control registers */ lm93_read_block(client, 9, (u8 *)(data->block9)); /* alarm values */ lm93_read_block(client, 1, (u8 *)(&data->block1)); /* auto/pwm registers */ lm93_read_block(client, 10, (u8 *)(&data->block10)); lm93_update_client_common(data, client); } /* update routine which uses SMBus byte/word data commands only */ static void lm93_update_client_min(struct lm93_data *data, struct i2c_client *client) { int i, j; u8 *ptr; dev_dbg(&client->dev, "starting device update (block data disabled)\n"); /* in1 - in16: values & limits */ for (i = 0; i < 16; i++) { data->block3[i] = lm93_read_byte(client, LM93_REG_IN(i)); data->block7[i].min = lm93_read_byte(client, LM93_REG_IN_MIN(i)); data->block7[i].max = lm93_read_byte(client, LM93_REG_IN_MAX(i)); } /* temp1 - temp4: values */ for (i = 0; i < 4; i++) { data->block2[i] = lm93_read_byte(client, LM93_REG_TEMP(i)); } /* prochot1 - prochot2: values */ for (i = 0; i < 2; i++) { data->block4[i].cur = lm93_read_byte(client, LM93_REG_PROCHOT_CUR(i)); data->block4[i].avg = lm93_read_byte(client, LM93_REG_PROCHOT_AVG(i)); } /* fan1 - fan4: values & limits */ for (i = 0; i < 4; i++) { data->block5[i] = lm93_read_word(client, LM93_REG_FAN(i)); data->block8[i] = lm93_read_word(client, LM93_REG_FAN_MIN(i)); } /* pwm control registers */ for (i = 0; i < 2; i++) { for (j = 0; j < 4; j++) { data->block9[i][j] = lm93_read_byte(client, LM93_REG_PWM_CTL(i, j)); } } /* alarm values */ for (i = 0, ptr = (u8 *)(&data->block1); i < 8; i++) { *(ptr + i) = lm93_read_byte(client, LM93_REG_HOST_ERROR_1 + i); } /* auto/pwm (base temp) registers */ for (i = 0; i < 4; i++) { data->block10.base[i] = lm93_read_byte(client, LM93_REG_TEMP_BASE(i)); } /* auto/pwm (offset temp) registers */ for (i = 0; i < 12; i++) { data->block10.offset[i] = lm93_read_byte(client, LM93_REG_TEMP_OFFSET(i)); } lm93_update_client_common(data, client); } /* following are the sysfs callback functions */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_IN_FROM_REG(nr, data->block3[nr])); } static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in, NULL, 0); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in, NULL, 1); static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in, NULL, 2); static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in, NULL, 3); static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in, NULL, 4); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in, NULL, 5); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_in, NULL, 6); static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_in, NULL, 7); static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_in, NULL, 8); static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_in, NULL, 9); static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_in, NULL, 10); static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_in, NULL, 11); static SENSOR_DEVICE_ATTR(in13_input, S_IRUGO, show_in, NULL, 12); static SENSOR_DEVICE_ATTR(in14_input, S_IRUGO, show_in, NULL, 13); static SENSOR_DEVICE_ATTR(in15_input, S_IRUGO, show_in, NULL, 14); static SENSOR_DEVICE_ATTR(in16_input, S_IRUGO, show_in, NULL, 15); static ssize_t show_in_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); int vccp = nr - 6; long rc, vid; if ((nr == 6 || nr == 7) && vccp_limit_type[vccp]) { vid = LM93_VID_FROM_REG(data->vid[vccp]); rc = LM93_IN_MIN_FROM_REG(data->vccp_limits[vccp], vid); } else { rc = LM93_IN_FROM_REG(nr, data->block7[nr].min); } return sprintf(buf, "%ld\n", rc); } static ssize_t store_in_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); int vccp = nr - 6; long vid; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if ((nr == 6 || nr == 7) && vccp_limit_type[vccp]) { vid = LM93_VID_FROM_REG(data->vid[vccp]); data->vccp_limits[vccp] = (data->vccp_limits[vccp] & 0xf0) | LM93_IN_REL_TO_REG(val, 0, vid); lm93_write_byte(client, LM93_REG_VCCP_LIMIT_OFF(vccp), data->vccp_limits[vccp]); } else { data->block7[nr].min = LM93_IN_TO_REG(nr, val); lm93_write_byte(client, LM93_REG_IN_MIN(nr), data->block7[nr].min); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0); static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1); static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2); static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3); static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4); static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5); static SENSOR_DEVICE_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6); static SENSOR_DEVICE_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7); static SENSOR_DEVICE_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8); static SENSOR_DEVICE_ATTR(in10_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9); static SENSOR_DEVICE_ATTR(in11_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 10); static SENSOR_DEVICE_ATTR(in12_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 11); static SENSOR_DEVICE_ATTR(in13_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 12); static SENSOR_DEVICE_ATTR(in14_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 13); static SENSOR_DEVICE_ATTR(in15_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 14); static SENSOR_DEVICE_ATTR(in16_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 15); static ssize_t show_in_max(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); int vccp = nr - 6; long rc, vid; if ((nr == 6 || nr == 7) && vccp_limit_type[vccp]) { vid = LM93_VID_FROM_REG(data->vid[vccp]); rc = LM93_IN_MAX_FROM_REG(data->vccp_limits[vccp], vid); } else { rc = LM93_IN_FROM_REG(nr, data->block7[nr].max); } return sprintf(buf, "%ld\n", rc); } static ssize_t store_in_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); int vccp = nr - 6; long vid; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if ((nr == 6 || nr == 7) && vccp_limit_type[vccp]) { vid = LM93_VID_FROM_REG(data->vid[vccp]); data->vccp_limits[vccp] = (data->vccp_limits[vccp] & 0x0f) | LM93_IN_REL_TO_REG(val, 1, vid); lm93_write_byte(client, LM93_REG_VCCP_LIMIT_OFF(vccp), data->vccp_limits[vccp]); } else { data->block7[nr].max = LM93_IN_TO_REG(nr, val); lm93_write_byte(client, LM93_REG_IN_MAX(nr), data->block7[nr].max); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0); static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1); static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2); static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3); static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4); static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5); static SENSOR_DEVICE_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6); static SENSOR_DEVICE_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7); static SENSOR_DEVICE_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8); static SENSOR_DEVICE_ATTR(in10_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9); static SENSOR_DEVICE_ATTR(in11_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 10); static SENSOR_DEVICE_ATTR(in12_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 11); static SENSOR_DEVICE_ATTR(in13_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 12); static SENSOR_DEVICE_ATTR(in14_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 13); static SENSOR_DEVICE_ATTR(in15_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 14); static SENSOR_DEVICE_ATTR(in16_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 15); static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->block2[nr])); } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1); static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2); static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->temp_lim[nr].min)); } static ssize_t store_temp_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_lim[nr].min = LM93_TEMP_TO_REG(val); lm93_write_byte(client, LM93_REG_TEMP_MIN(nr), data->temp_lim[nr].min); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min, store_temp_min, 0); static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min, store_temp_min, 1); static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_temp_min, store_temp_min, 2); static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->temp_lim[nr].max)); } static ssize_t store_temp_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->temp_lim[nr].max = LM93_TEMP_TO_REG(val); lm93_write_byte(client, LM93_REG_TEMP_MAX(nr), data->temp_lim[nr].max); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, store_temp_max, 0); static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, store_temp_max, 1); static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp_max, store_temp_max, 2); static ssize_t show_temp_auto_base(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->block10.base[nr])); } static ssize_t store_temp_auto_base(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->block10.base[nr] = LM93_TEMP_TO_REG(val); lm93_write_byte(client, LM93_REG_TEMP_BASE(nr), data->block10.base[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_base, S_IWUSR | S_IRUGO, show_temp_auto_base, store_temp_auto_base, 0); static SENSOR_DEVICE_ATTR(temp2_auto_base, S_IWUSR | S_IRUGO, show_temp_auto_base, store_temp_auto_base, 1); static SENSOR_DEVICE_ATTR(temp3_auto_base, S_IWUSR | S_IRUGO, show_temp_auto_base, store_temp_auto_base, 2); static ssize_t show_temp_auto_boost(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_TEMP_FROM_REG(data->boost[nr])); } static ssize_t store_temp_auto_boost(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); long val; int err; err = kstrtol(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->boost[nr] = LM93_TEMP_TO_REG(val); lm93_write_byte(client, LM93_REG_BOOST(nr), data->boost[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_boost, S_IWUSR | S_IRUGO, show_temp_auto_boost, store_temp_auto_boost, 0); static SENSOR_DEVICE_ATTR(temp2_auto_boost, S_IWUSR | S_IRUGO, show_temp_auto_boost, store_temp_auto_boost, 1); static SENSOR_DEVICE_ATTR(temp3_auto_boost, S_IWUSR | S_IRUGO, show_temp_auto_boost, store_temp_auto_boost, 2); static ssize_t show_temp_auto_boost_hyst(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr); return sprintf(buf, "%d\n", LM93_AUTO_BOOST_HYST_FROM_REGS(data, nr, mode)); } static ssize_t store_temp_auto_boost_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* force 0.5C/bit mode */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sfc2 |= ((nr < 2) ? 0x10 : 0x20); lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); data->boost_hyst[nr/2] = LM93_AUTO_BOOST_HYST_TO_REG(data, val, nr, 1); lm93_write_byte(client, LM93_REG_BOOST_HYST(nr), data->boost_hyst[nr/2]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_boost_hyst, S_IWUSR | S_IRUGO, show_temp_auto_boost_hyst, store_temp_auto_boost_hyst, 0); static SENSOR_DEVICE_ATTR(temp2_auto_boost_hyst, S_IWUSR | S_IRUGO, show_temp_auto_boost_hyst, store_temp_auto_boost_hyst, 1); static SENSOR_DEVICE_ATTR(temp3_auto_boost_hyst, S_IWUSR | S_IRUGO, show_temp_auto_boost_hyst, store_temp_auto_boost_hyst, 2); static ssize_t show_temp_auto_offset(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr); int nr = s_attr->index; int ofs = s_attr->nr; struct lm93_data *data = lm93_update_device(dev); int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr); return sprintf(buf, "%d\n", LM93_TEMP_AUTO_OFFSET_FROM_REG(data->block10.offset[ofs], nr, mode)); } static ssize_t store_temp_auto_offset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *s_attr = to_sensor_dev_attr_2(attr); int nr = s_attr->index; int ofs = s_attr->nr; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* force 0.5C/bit mode */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sfc2 |= ((nr < 2) ? 0x10 : 0x20); lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); data->block10.offset[ofs] = LM93_TEMP_AUTO_OFFSET_TO_REG( data->block10.offset[ofs], val, nr, 1); lm93_write_byte(client, LM93_REG_TEMP_OFFSET(ofs), data->block10.offset[ofs]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR_2(temp1_auto_offset1, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 0, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset2, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 1, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset3, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 2, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset4, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 3, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset5, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 4, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset6, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 5, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset7, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 6, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset8, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 7, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset9, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 8, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset10, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 9, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset11, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 10, 0); static SENSOR_DEVICE_ATTR_2(temp1_auto_offset12, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 11, 0); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset1, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 0, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset2, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 1, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset3, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 2, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset4, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 3, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset5, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 4, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset6, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 5, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset7, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 6, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset8, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 7, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset9, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 8, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset10, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 9, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset11, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 10, 1); static SENSOR_DEVICE_ATTR_2(temp2_auto_offset12, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 11, 1); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset1, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 0, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset2, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 1, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset3, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 2, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset4, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 3, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset5, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 4, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset6, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 5, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset7, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 6, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset8, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 7, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset9, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 8, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset10, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 9, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset11, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 10, 2); static SENSOR_DEVICE_ATTR_2(temp3_auto_offset12, S_IWUSR | S_IRUGO, show_temp_auto_offset, store_temp_auto_offset, 11, 2); static ssize_t show_temp_auto_pwm_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; u8 reg, ctl4; struct lm93_data *data = lm93_update_device(dev); reg = data->auto_pwm_min_hyst[nr/2] >> 4 & 0x0f; ctl4 = data->block9[nr][LM93_PWM_CTL4]; return sprintf(buf, "%d\n", LM93_PWM_FROM_REG(reg, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ)); } static ssize_t store_temp_auto_pwm_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 reg, ctl4; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); reg = lm93_read_byte(client, LM93_REG_PWM_MIN_HYST(nr)); ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL4)); reg = (reg & 0x0f) | LM93_PWM_TO_REG(val, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ) << 4; data->auto_pwm_min_hyst[nr/2] = reg; lm93_write_byte(client, LM93_REG_PWM_MIN_HYST(nr), reg); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_pwm_min, S_IWUSR | S_IRUGO, show_temp_auto_pwm_min, store_temp_auto_pwm_min, 0); static SENSOR_DEVICE_ATTR(temp2_auto_pwm_min, S_IWUSR | S_IRUGO, show_temp_auto_pwm_min, store_temp_auto_pwm_min, 1); static SENSOR_DEVICE_ATTR(temp3_auto_pwm_min, S_IWUSR | S_IRUGO, show_temp_auto_pwm_min, store_temp_auto_pwm_min, 2); static ssize_t show_temp_auto_offset_hyst(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); int mode = LM93_TEMP_OFFSET_MODE_FROM_REG(data->sfc2, nr); return sprintf(buf, "%d\n", LM93_TEMP_OFFSET_FROM_REG( data->auto_pwm_min_hyst[nr / 2], mode)); } static ssize_t store_temp_auto_offset_hyst(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 reg; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* force 0.5C/bit mode */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sfc2 |= ((nr < 2) ? 0x10 : 0x20); lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); reg = data->auto_pwm_min_hyst[nr/2]; reg = (reg & 0xf0) | (LM93_TEMP_OFFSET_TO_REG(val, 1) & 0x0f); data->auto_pwm_min_hyst[nr/2] = reg; lm93_write_byte(client, LM93_REG_PWM_MIN_HYST(nr), reg); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(temp1_auto_offset_hyst, S_IWUSR | S_IRUGO, show_temp_auto_offset_hyst, store_temp_auto_offset_hyst, 0); static SENSOR_DEVICE_ATTR(temp2_auto_offset_hyst, S_IWUSR | S_IRUGO, show_temp_auto_offset_hyst, store_temp_auto_offset_hyst, 1); static SENSOR_DEVICE_ATTR(temp3_auto_offset_hyst, S_IWUSR | S_IRUGO, show_temp_auto_offset_hyst, store_temp_auto_offset_hyst, 2); static ssize_t show_fan_input(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *s_attr = to_sensor_dev_attr(attr); int nr = s_attr->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_FAN_FROM_REG(data->block5[nr])); } static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0); static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1); static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_input, NULL, 2); static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_input, NULL, 3); static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_FAN_FROM_REG(data->block8[nr])); } static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->block8[nr] = LM93_FAN_TO_REG(val); lm93_write_word(client, LM93_REG_FAN_MIN(nr), data->block8[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 0); static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 1); static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 2); static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, store_fan_min, 3); /* * some tedious bit-twiddling here to deal with the register format: * * data->sf_tach_to_pwm: (tach to pwm mapping bits) * * bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 * T4:P2 T4:P1 T3:P2 T3:P1 T2:P2 T2:P1 T1:P2 T1:P1 * * data->sfc2: (enable bits) * * bit | 3 | 2 | 1 | 0 * T4 T3 T2 T1 */ static ssize_t show_fan_smart_tach(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); long rc = 0; int mapping; /* extract the relevant mapping */ mapping = (data->sf_tach_to_pwm >> (nr * 2)) & 0x03; /* if there's a mapping and it's enabled */ if (mapping && ((data->sfc2 >> nr) & 0x01)) rc = mapping; return sprintf(buf, "%ld\n", rc); } /* * helper function - must grab data->update_lock before calling * fan is 0-3, indicating fan1-fan4 */ static void lm93_write_fan_smart_tach(struct i2c_client *client, struct lm93_data *data, int fan, long value) { /* insert the new mapping and write it out */ data->sf_tach_to_pwm = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM); data->sf_tach_to_pwm &= ~(0x3 << fan * 2); data->sf_tach_to_pwm |= value << fan * 2; lm93_write_byte(client, LM93_REG_SF_TACH_TO_PWM, data->sf_tach_to_pwm); /* insert the enable bit and write it out */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); if (value) data->sfc2 |= 1 << fan; else data->sfc2 &= ~(1 << fan); lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); } static ssize_t store_fan_smart_tach(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); /* sanity test, ignore the write otherwise */ if (val <= 2) { /* can't enable if pwm freq is 22.5KHz */ if (val) { u8 ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(val - 1, LM93_PWM_CTL4)); if ((ctl4 & 0x07) == 0) val = 0; } lm93_write_fan_smart_tach(client, data, nr, val); } mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(fan1_smart_tach, S_IWUSR | S_IRUGO, show_fan_smart_tach, store_fan_smart_tach, 0); static SENSOR_DEVICE_ATTR(fan2_smart_tach, S_IWUSR | S_IRUGO, show_fan_smart_tach, store_fan_smart_tach, 1); static SENSOR_DEVICE_ATTR(fan3_smart_tach, S_IWUSR | S_IRUGO, show_fan_smart_tach, store_fan_smart_tach, 2); static SENSOR_DEVICE_ATTR(fan4_smart_tach, S_IWUSR | S_IRUGO, show_fan_smart_tach, store_fan_smart_tach, 3); static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 ctl2, ctl4; long rc; ctl2 = data->block9[nr][LM93_PWM_CTL2]; ctl4 = data->block9[nr][LM93_PWM_CTL4]; if (ctl2 & 0x01) /* show user commanded value if enabled */ rc = data->pwm_override[nr]; else /* show present h/w value if manual pwm disabled */ rc = LM93_PWM_FROM_REG(ctl2 >> 4, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ); return sprintf(buf, "%ld\n", rc); } static ssize_t store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 ctl2, ctl4; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); ctl2 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL2)); ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL4)); ctl2 = (ctl2 & 0x0f) | LM93_PWM_TO_REG(val, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ) << 4; /* save user commanded value */ data->pwm_override[nr] = LM93_PWM_FROM_REG(ctl2 >> 4, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ); lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL2), ctl2); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0); static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1); static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 ctl2; long rc; ctl2 = data->block9[nr][LM93_PWM_CTL2]; if (ctl2 & 0x01) /* manual override enabled ? */ rc = ((ctl2 & 0xF0) == 0xF0) ? 0 : 1; else rc = 2; return sprintf(buf, "%ld\n", rc); } static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 ctl2; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); ctl2 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL2)); switch (val) { case 0: ctl2 |= 0xF1; /* enable manual override, set PWM to max */ break; case 1: ctl2 |= 0x01; /* enable manual override */ break; case 2: ctl2 &= ~0x01; /* disable manual override */ break; default: mutex_unlock(&data->update_lock); return -EINVAL; } lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL2), ctl2); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable, store_pwm_enable, 0); static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable, store_pwm_enable, 1); static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 ctl4; ctl4 = data->block9[nr][LM93_PWM_CTL4]; return sprintf(buf, "%d\n", LM93_PWM_FREQ_FROM_REG(ctl4)); } /* * helper function - must grab data->update_lock before calling * pwm is 0-1, indicating pwm1-pwm2 * this disables smart tach for all tach channels bound to the given pwm */ static void lm93_disable_fan_smart_tach(struct i2c_client *client, struct lm93_data *data, int pwm) { int mapping = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM); int mask; /* collapse the mapping into a mask of enable bits */ mapping = (mapping >> pwm) & 0x55; mask = mapping & 0x01; mask |= (mapping & 0x04) >> 1; mask |= (mapping & 0x10) >> 2; mask |= (mapping & 0x40) >> 3; /* disable smart tach according to the mask */ data->sfc2 = lm93_read_byte(client, LM93_REG_SFC2); data->sfc2 &= ~mask; lm93_write_byte(client, LM93_REG_SFC2, data->sfc2); } static ssize_t store_pwm_freq(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 ctl4; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL4)); ctl4 = (ctl4 & 0xf8) | LM93_PWM_FREQ_TO_REG(val); data->block9[nr][LM93_PWM_CTL4] = ctl4; /* ctl4 == 0 -> 22.5KHz -> disable smart tach */ if (!ctl4) lm93_disable_fan_smart_tach(client, data, nr); lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL4), ctl4); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_freq, S_IWUSR | S_IRUGO, show_pwm_freq, store_pwm_freq, 0); static SENSOR_DEVICE_ATTR(pwm2_freq, S_IWUSR | S_IRUGO, show_pwm_freq, store_pwm_freq, 1); static ssize_t show_pwm_auto_channels(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", data->block9[nr][LM93_PWM_CTL1]); } static ssize_t store_pwm_auto_channels(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->block9[nr][LM93_PWM_CTL1] = clamp_val(val, 0, 255); lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL1), data->block9[nr][LM93_PWM_CTL1]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_auto_channels, S_IWUSR | S_IRUGO, show_pwm_auto_channels, store_pwm_auto_channels, 0); static SENSOR_DEVICE_ATTR(pwm2_auto_channels, S_IWUSR | S_IRUGO, show_pwm_auto_channels, store_pwm_auto_channels, 1); static ssize_t show_pwm_auto_spinup_min(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 ctl3, ctl4; ctl3 = data->block9[nr][LM93_PWM_CTL3]; ctl4 = data->block9[nr][LM93_PWM_CTL4]; return sprintf(buf, "%d\n", LM93_PWM_FROM_REG(ctl3 & 0x0f, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ)); } static ssize_t store_pwm_auto_spinup_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 ctl3, ctl4; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); ctl3 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3)); ctl4 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL4)); ctl3 = (ctl3 & 0xf0) | LM93_PWM_TO_REG(val, (ctl4 & 0x07) ? LM93_PWM_MAP_LO_FREQ : LM93_PWM_MAP_HI_FREQ); data->block9[nr][LM93_PWM_CTL3] = ctl3; lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3), ctl3); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_min, S_IWUSR | S_IRUGO, show_pwm_auto_spinup_min, store_pwm_auto_spinup_min, 0); static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_min, S_IWUSR | S_IRUGO, show_pwm_auto_spinup_min, store_pwm_auto_spinup_min, 1); static ssize_t show_pwm_auto_spinup_time(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_SPINUP_TIME_FROM_REG( data->block9[nr][LM93_PWM_CTL3])); } static ssize_t store_pwm_auto_spinup_time(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 ctl3; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); ctl3 = lm93_read_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3)); ctl3 = (ctl3 & 0x1f) | (LM93_SPINUP_TIME_TO_REG(val) << 5 & 0xe0); data->block9[nr][LM93_PWM_CTL3] = ctl3; lm93_write_byte(client, LM93_REG_PWM_CTL(nr, LM93_PWM_CTL3), ctl3); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(pwm1_auto_spinup_time, S_IWUSR | S_IRUGO, show_pwm_auto_spinup_time, store_pwm_auto_spinup_time, 0); static SENSOR_DEVICE_ATTR(pwm2_auto_spinup_time, S_IWUSR | S_IRUGO, show_pwm_auto_spinup_time, store_pwm_auto_spinup_time, 1); static ssize_t show_pwm_auto_prochot_ramp(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_RAMP_FROM_REG(data->pwm_ramp_ctl >> 4 & 0x0f)); } static ssize_t store_pwm_auto_prochot_ramp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 ramp; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); ramp = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL); ramp = (ramp & 0x0f) | (LM93_RAMP_TO_REG(val) << 4 & 0xf0); lm93_write_byte(client, LM93_REG_PWM_RAMP_CTL, ramp); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(pwm_auto_prochot_ramp, S_IRUGO | S_IWUSR, show_pwm_auto_prochot_ramp, store_pwm_auto_prochot_ramp); static ssize_t show_pwm_auto_vrdhot_ramp(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_RAMP_FROM_REG(data->pwm_ramp_ctl & 0x0f)); } static ssize_t store_pwm_auto_vrdhot_ramp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 ramp; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); ramp = lm93_read_byte(client, LM93_REG_PWM_RAMP_CTL); ramp = (ramp & 0xf0) | (LM93_RAMP_TO_REG(val) & 0x0f); lm93_write_byte(client, LM93_REG_PWM_RAMP_CTL, ramp); mutex_unlock(&data->update_lock); return 0; } static DEVICE_ATTR(pwm_auto_vrdhot_ramp, S_IRUGO | S_IWUSR, show_pwm_auto_vrdhot_ramp, store_pwm_auto_vrdhot_ramp); static ssize_t show_vid(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_VID_FROM_REG(data->vid[nr])); } static SENSOR_DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL, 0); static SENSOR_DEVICE_ATTR(cpu1_vid, S_IRUGO, show_vid, NULL, 1); static ssize_t show_prochot(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", data->block4[nr].cur); } static SENSOR_DEVICE_ATTR(prochot1, S_IRUGO, show_prochot, NULL, 0); static SENSOR_DEVICE_ATTR(prochot2, S_IRUGO, show_prochot, NULL, 1); static ssize_t show_prochot_avg(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", data->block4[nr].avg); } static SENSOR_DEVICE_ATTR(prochot1_avg, S_IRUGO, show_prochot_avg, NULL, 0); static SENSOR_DEVICE_ATTR(prochot2_avg, S_IRUGO, show_prochot_avg, NULL, 1); static ssize_t show_prochot_max(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", data->prochot_max[nr]); } static ssize_t store_prochot_max(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->prochot_max[nr] = LM93_PROCHOT_TO_REG(val); lm93_write_byte(client, LM93_REG_PROCHOT_MAX(nr), data->prochot_max[nr]); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(prochot1_max, S_IWUSR | S_IRUGO, show_prochot_max, store_prochot_max, 0); static SENSOR_DEVICE_ATTR(prochot2_max, S_IWUSR | S_IRUGO, show_prochot_max, store_prochot_max, 1); static const u8 prochot_override_mask[] = { 0x80, 0x40 }; static ssize_t show_prochot_override(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", (data->prochot_override & prochot_override_mask[nr]) ? 1 : 0); } static ssize_t store_prochot_override(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if (val) data->prochot_override |= prochot_override_mask[nr]; else data->prochot_override &= (~prochot_override_mask[nr]); lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE, data->prochot_override); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(prochot1_override, S_IWUSR | S_IRUGO, show_prochot_override, store_prochot_override, 0); static SENSOR_DEVICE_ATTR(prochot2_override, S_IWUSR | S_IRUGO, show_prochot_override, store_prochot_override, 1); static ssize_t show_prochot_interval(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); u8 tmp; if (nr == 1) tmp = (data->prochot_interval & 0xf0) >> 4; else tmp = data->prochot_interval & 0x0f; return sprintf(buf, "%d\n", LM93_INTERVAL_FROM_REG(tmp)); } static ssize_t store_prochot_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int nr = (to_sensor_dev_attr(attr))->index; struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); u8 tmp; unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); tmp = lm93_read_byte(client, LM93_REG_PROCHOT_INTERVAL); if (nr == 1) tmp = (tmp & 0x0f) | (LM93_INTERVAL_TO_REG(val) << 4); else tmp = (tmp & 0xf0) | LM93_INTERVAL_TO_REG(val); data->prochot_interval = tmp; lm93_write_byte(client, LM93_REG_PROCHOT_INTERVAL, tmp); mutex_unlock(&data->update_lock); return count; } static SENSOR_DEVICE_ATTR(prochot1_interval, S_IWUSR | S_IRUGO, show_prochot_interval, store_prochot_interval, 0); static SENSOR_DEVICE_ATTR(prochot2_interval, S_IWUSR | S_IRUGO, show_prochot_interval, store_prochot_interval, 1); static ssize_t show_prochot_override_duty_cycle(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", data->prochot_override & 0x0f); } static ssize_t store_prochot_override_duty_cycle(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); data->prochot_override = (data->prochot_override & 0xf0) | clamp_val(val, 0, 15); lm93_write_byte(client, LM93_REG_PROCHOT_OVERRIDE, data->prochot_override); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(prochot_override_duty_cycle, S_IRUGO | S_IWUSR, show_prochot_override_duty_cycle, store_prochot_override_duty_cycle); static ssize_t show_prochot_short(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", (data->config & 0x10) ? 1 : 0); } static ssize_t store_prochot_short(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct lm93_data *data = i2c_get_clientdata(client); unsigned long val; int err; err = kstrtoul(buf, 10, &val); if (err) return err; mutex_lock(&data->update_lock); if (val) data->config |= 0x10; else data->config &= ~0x10; lm93_write_byte(client, LM93_REG_CONFIG, data->config); mutex_unlock(&data->update_lock); return count; } static DEVICE_ATTR(prochot_short, S_IRUGO | S_IWUSR, show_prochot_short, store_prochot_short); static ssize_t show_vrdhot(struct device *dev, struct device_attribute *attr, char *buf) { int nr = (to_sensor_dev_attr(attr))->index; struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", data->block1.host_status_1 & (1 << (nr + 4)) ? 1 : 0); } static SENSOR_DEVICE_ATTR(vrdhot1, S_IRUGO, show_vrdhot, NULL, 0); static SENSOR_DEVICE_ATTR(vrdhot2, S_IRUGO, show_vrdhot, NULL, 1); static ssize_t show_gpio(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_GPI_FROM_REG(data->gpi)); } static DEVICE_ATTR(gpio, S_IRUGO, show_gpio, NULL); static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf) { struct lm93_data *data = lm93_update_device(dev); return sprintf(buf, "%d\n", LM93_ALARMS_FROM_REG(data->block1)); } static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); static struct attribute *lm93_attrs[] = { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_in10_input.dev_attr.attr, &sensor_dev_attr_in11_input.dev_attr.attr, &sensor_dev_attr_in12_input.dev_attr.attr, &sensor_dev_attr_in13_input.dev_attr.attr, &sensor_dev_attr_in14_input.dev_attr.attr, &sensor_dev_attr_in15_input.dev_attr.attr, &sensor_dev_attr_in16_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_in8_min.dev_attr.attr, &sensor_dev_attr_in9_min.dev_attr.attr, &sensor_dev_attr_in10_min.dev_attr.attr, &sensor_dev_attr_in11_min.dev_attr.attr, &sensor_dev_attr_in12_min.dev_attr.attr, &sensor_dev_attr_in13_min.dev_attr.attr, &sensor_dev_attr_in14_min.dev_attr.attr, &sensor_dev_attr_in15_min.dev_attr.attr, &sensor_dev_attr_in16_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in8_max.dev_attr.attr, &sensor_dev_attr_in9_max.dev_attr.attr, &sensor_dev_attr_in10_max.dev_attr.attr, &sensor_dev_attr_in11_max.dev_attr.attr, &sensor_dev_attr_in12_max.dev_attr.attr, &sensor_dev_attr_in13_max.dev_attr.attr, &sensor_dev_attr_in14_max.dev_attr.attr, &sensor_dev_attr_in15_max.dev_attr.attr, &sensor_dev_attr_in16_max.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp2_input.dev_attr.attr, &sensor_dev_attr_temp3_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp2_min.dev_attr.attr, &sensor_dev_attr_temp3_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp2_max.dev_attr.attr, &sensor_dev_attr_temp3_max.dev_attr.attr, &sensor_dev_attr_temp1_auto_base.dev_attr.attr, &sensor_dev_attr_temp2_auto_base.dev_attr.attr, &sensor_dev_attr_temp3_auto_base.dev_attr.attr, &sensor_dev_attr_temp1_auto_boost.dev_attr.attr, &sensor_dev_attr_temp2_auto_boost.dev_attr.attr, &sensor_dev_attr_temp3_auto_boost.dev_attr.attr, &sensor_dev_attr_temp1_auto_boost_hyst.dev_attr.attr, &sensor_dev_attr_temp2_auto_boost_hyst.dev_attr.attr, &sensor_dev_attr_temp3_auto_boost_hyst.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset1.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset2.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset3.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset4.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset5.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset6.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset7.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset8.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset9.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset10.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset11.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset12.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset1.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset2.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset3.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset4.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset5.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset6.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset7.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset8.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset9.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset10.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset11.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset12.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset1.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset2.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset3.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset4.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset5.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset6.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset7.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset8.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset9.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset10.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset11.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset12.dev_attr.attr, &sensor_dev_attr_temp1_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_temp2_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_temp3_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_temp1_auto_offset_hyst.dev_attr.attr, &sensor_dev_attr_temp2_auto_offset_hyst.dev_attr.attr, &sensor_dev_attr_temp3_auto_offset_hyst.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan2_input.dev_attr.attr, &sensor_dev_attr_fan3_input.dev_attr.attr, &sensor_dev_attr_fan4_input.dev_attr.attr, &sensor_dev_attr_fan1_min.dev_attr.attr, &sensor_dev_attr_fan2_min.dev_attr.attr, &sensor_dev_attr_fan3_min.dev_attr.attr, &sensor_dev_attr_fan4_min.dev_attr.attr, &sensor_dev_attr_fan1_smart_tach.dev_attr.attr, &sensor_dev_attr_fan2_smart_tach.dev_attr.attr, &sensor_dev_attr_fan3_smart_tach.dev_attr.attr, &sensor_dev_attr_fan4_smart_tach.dev_attr.attr, &sensor_dev_attr_pwm1.dev_attr.attr, &sensor_dev_attr_pwm2.dev_attr.attr, &sensor_dev_attr_pwm1_enable.dev_attr.attr, &sensor_dev_attr_pwm2_enable.dev_attr.attr, &sensor_dev_attr_pwm1_freq.dev_attr.attr, &sensor_dev_attr_pwm2_freq.dev_attr.attr, &sensor_dev_attr_pwm1_auto_channels.dev_attr.attr, &sensor_dev_attr_pwm2_auto_channels.dev_attr.attr, &sensor_dev_attr_pwm1_auto_spinup_min.dev_attr.attr, &sensor_dev_attr_pwm2_auto_spinup_min.dev_attr.attr, &sensor_dev_attr_pwm1_auto_spinup_time.dev_attr.attr, &sensor_dev_attr_pwm2_auto_spinup_time.dev_attr.attr, &dev_attr_pwm_auto_prochot_ramp.attr, &dev_attr_pwm_auto_vrdhot_ramp.attr, &sensor_dev_attr_cpu0_vid.dev_attr.attr, &sensor_dev_attr_cpu1_vid.dev_attr.attr, &sensor_dev_attr_prochot1.dev_attr.attr, &sensor_dev_attr_prochot2.dev_attr.attr, &sensor_dev_attr_prochot1_avg.dev_attr.attr, &sensor_dev_attr_prochot2_avg.dev_attr.attr, &sensor_dev_attr_prochot1_max.dev_attr.attr, &sensor_dev_attr_prochot2_max.dev_attr.attr, &sensor_dev_attr_prochot1_override.dev_attr.attr, &sensor_dev_attr_prochot2_override.dev_attr.attr, &sensor_dev_attr_prochot1_interval.dev_attr.attr, &sensor_dev_attr_prochot2_interval.dev_attr.attr, &dev_attr_prochot_override_duty_cycle.attr, &dev_attr_prochot_short.attr, &sensor_dev_attr_vrdhot1.dev_attr.attr, &sensor_dev_attr_vrdhot2.dev_attr.attr, &dev_attr_gpio.attr, &dev_attr_alarms.attr, NULL }; static struct attribute_group lm93_attr_grp = { .attrs = lm93_attrs, }; static void lm93_init_client(struct i2c_client *client) { int i; u8 reg; /* configure VID pin input thresholds */ reg = lm93_read_byte(client, LM93_REG_GPI_VID_CTL); lm93_write_byte(client, LM93_REG_GPI_VID_CTL, reg | (vid_agtl ? 0x03 : 0x00)); if (init) { /* enable #ALERT pin */ reg = lm93_read_byte(client, LM93_REG_CONFIG); lm93_write_byte(client, LM93_REG_CONFIG, reg | 0x08); /* enable ASF mode for BMC status registers */ reg = lm93_read_byte(client, LM93_REG_STATUS_CONTROL); lm93_write_byte(client, LM93_REG_STATUS_CONTROL, reg | 0x02); /* set sleep state to S0 */ lm93_write_byte(client, LM93_REG_SLEEP_CONTROL, 0); /* unmask #VRDHOT and dynamic VCCP (if nec) error events */ reg = lm93_read_byte(client, LM93_REG_MISC_ERR_MASK); reg &= ~0x03; reg &= ~(vccp_limit_type[0] ? 0x10 : 0); reg &= ~(vccp_limit_type[1] ? 0x20 : 0); lm93_write_byte(client, LM93_REG_MISC_ERR_MASK, reg); } /* start monitoring */ reg = lm93_read_byte(client, LM93_REG_CONFIG); lm93_write_byte(client, LM93_REG_CONFIG, reg | 0x01); /* spin until ready */ for (i = 0; i < 20; i++) { msleep(10); if ((lm93_read_byte(client, LM93_REG_CONFIG) & 0x80) == 0x80) return; } dev_warn(&client->dev, "timed out waiting for sensor chip to signal ready!\n"); } /* Return 0 if detection is successful, -ENODEV otherwise */ static int lm93_detect(struct i2c_client *client, struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; int mfr, ver; const char *name; if (!i2c_check_functionality(adapter, LM93_SMBUS_FUNC_MIN)) return -ENODEV; /* detection */ mfr = lm93_read_byte(client, LM93_REG_MFR_ID); if (mfr != 0x01) { dev_dbg(&adapter->dev, "detect failed, bad manufacturer id 0x%02x!\n", mfr); return -ENODEV; } ver = lm93_read_byte(client, LM93_REG_VER); switch (ver) { case LM93_MFR_ID: case LM93_MFR_ID_PROTOTYPE: name = "lm93"; break; case LM94_MFR_ID_2: case LM94_MFR_ID: case LM94_MFR_ID_PROTOTYPE: name = "lm94"; break; default: dev_dbg(&adapter->dev, "detect failed, bad version id 0x%02x!\n", ver); return -ENODEV; } strlcpy(info->type, name, I2C_NAME_SIZE); dev_dbg(&adapter->dev, "loading %s at %d, 0x%02x\n", client->name, i2c_adapter_id(client->adapter), client->addr); return 0; } static int lm93_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lm93_data *data; int err, func; void (*update)(struct lm93_data *, struct i2c_client *); /* choose update routine based on bus capabilities */ func = i2c_get_functionality(client->adapter); if (((LM93_SMBUS_FUNC_FULL & func) == LM93_SMBUS_FUNC_FULL) && (!disable_block)) { dev_dbg(&client->dev, "using SMBus block data transactions\n"); update = lm93_update_client_full; } else if ((LM93_SMBUS_FUNC_MIN & func) == LM93_SMBUS_FUNC_MIN) { dev_dbg(&client->dev, "disabled SMBus block data transactions\n"); update = lm93_update_client_min; } else { dev_dbg(&client->dev, "detect failed, smbus byte and/or word data not supported!\n"); return -ENODEV; } data = devm_kzalloc(&client->dev, sizeof(struct lm93_data), GFP_KERNEL); if (!data) { dev_dbg(&client->dev, "out of memory!\n"); return -ENOMEM; } i2c_set_clientdata(client, data); /* housekeeping */ data->update = update; mutex_init(&data->update_lock); /* initialize the chip */ lm93_init_client(client); err = sysfs_create_group(&client->dev.kobj, &lm93_attr_grp); if (err) return err; /* Register hwmon driver class */ data->hwmon_dev = hwmon_device_register(&client->dev); if (!IS_ERR(data->hwmon_dev)) return 0; err = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "error registering hwmon device.\n"); sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp); return err; } static int lm93_remove(struct i2c_client *client) { struct lm93_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp); return 0; } static const struct i2c_device_id lm93_id[] = { { "lm93", 0 }, { "lm94", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, lm93_id); static struct i2c_driver lm93_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "lm93", }, .probe = lm93_probe, .remove = lm93_remove, .id_table = lm93_id, .detect = lm93_detect, .address_list = normal_i2c, }; module_i2c_driver(lm93_driver); MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, " "Hans J. Koch <hjk@hansjkoch.de>"); MODULE_DESCRIPTION("LM93 driver"); MODULE_LICENSE("GPL");
gpl-2.0
psyke83/codeaurora-kernel_samsung_europa
drivers/net/defxx.c
557
116388
/* * File Name: * defxx.c * * Copyright Information: * Copyright Digital Equipment Corporation 1996. * * This software may be used and distributed according to the terms of * the GNU General Public License, incorporated herein by reference. * * Abstract: * A Linux device driver supporting the Digital Equipment Corporation * FDDI TURBOchannel, EISA and PCI controller families. Supported * adapters include: * * DEC FDDIcontroller/TURBOchannel (DEFTA) * DEC FDDIcontroller/EISA (DEFEA) * DEC FDDIcontroller/PCI (DEFPA) * * The original author: * LVS Lawrence V. Stefani <lstefani@yahoo.com> * * Maintainers: * macro Maciej W. Rozycki <macro@linux-mips.org> * * Credits: * I'd like to thank Patricia Cross for helping me get started with * Linux, David Davies for a lot of help upgrading and configuring * my development system and for answering many OS and driver * development questions, and Alan Cox for recommendations and * integration help on getting FDDI support into Linux. LVS * * Driver Architecture: * The driver architecture is largely based on previous driver work * for other operating systems. The upper edge interface and * functions were largely taken from existing Linux device drivers * such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C * driver. * * Adapter Probe - * The driver scans for supported EISA adapters by reading the * SLOT ID register for each EISA slot and making a match * against the expected value. * * Bus-Specific Initialization - * This driver currently supports both EISA and PCI controller * families. While the custom DMA chip and FDDI logic is similar * or identical, the bus logic is very different. After * initialization, the only bus-specific differences is in how the * driver enables and disables interrupts. Other than that, the * run-time critical code behaves the same on both families. * It's important to note that both adapter families are configured * to I/O map, rather than memory map, the adapter registers. * * Driver Open/Close - * In the driver open routine, the driver ISR (interrupt service * routine) is registered and the adapter is brought to an * operational state. In the driver close routine, the opposite * occurs; the driver ISR is deregistered and the adapter is * brought to a safe, but closed state. Users may use consecutive * commands to bring the adapter up and down as in the following * example: * ifconfig fddi0 up * ifconfig fddi0 down * ifconfig fddi0 up * * Driver Shutdown - * Apparently, there is no shutdown or halt routine support under * Linux. This routine would be called during "reboot" or * "shutdown" to allow the driver to place the adapter in a safe * state before a warm reboot occurs. To be really safe, the user * should close the adapter before shutdown (eg. ifconfig fddi0 down) * to ensure that the adapter DMA engine is taken off-line. However, * the current driver code anticipates this problem and always issues * a soft reset of the adapter at the beginning of driver initialization. * A future driver enhancement in this area may occur in 2.1.X where * Alan indicated that a shutdown handler may be implemented. * * Interrupt Service Routine - * The driver supports shared interrupts, so the ISR is registered for * each board with the appropriate flag and the pointer to that board's * device structure. This provides the context during interrupt * processing to support shared interrupts and multiple boards. * * Interrupt enabling/disabling can occur at many levels. At the host * end, you can disable system interrupts, or disable interrupts at the * PIC (on Intel systems). Across the bus, both EISA and PCI adapters * have a bus-logic chip interrupt enable/disable as well as a DMA * controller interrupt enable/disable. * * The driver currently enables and disables adapter interrupts at the * bus-logic chip and assumes that Linux will take care of clearing or * acknowledging any host-based interrupt chips. * * Control Functions - * Control functions are those used to support functions such as adding * or deleting multicast addresses, enabling or disabling packet * reception filters, or other custom/proprietary commands. Presently, * the driver supports the "get statistics", "set multicast list", and * "set mac address" functions defined by Linux. A list of possible * enhancements include: * * - Custom ioctl interface for executing port interface commands * - Custom ioctl interface for adding unicast addresses to * adapter CAM (to support bridge functions). * - Custom ioctl interface for supporting firmware upgrades. * * Hardware (port interface) Support Routines - * The driver function names that start with "dfx_hw_" represent * low-level port interface routines that are called frequently. They * include issuing a DMA or port control command to the adapter, * resetting the adapter, or reading the adapter state. Since the * driver initialization and run-time code must make calls into the * port interface, these routines were written to be as generic and * usable as possible. * * Receive Path - * The adapter DMA engine supports a 256 entry receive descriptor block * of which up to 255 entries can be used at any given time. The * architecture is a standard producer, consumer, completion model in * which the driver "produces" receive buffers to the adapter, the * adapter "consumes" the receive buffers by DMAing incoming packet data, * and the driver "completes" the receive buffers by servicing the * incoming packet, then "produces" a new buffer and starts the cycle * again. Receive buffers can be fragmented in up to 16 fragments * (descriptor entries). For simplicity, this driver posts * single-fragment receive buffers of 4608 bytes, then allocates a * sk_buff, copies the data, then reposts the buffer. To reduce CPU * utilization, a better approach would be to pass up the receive * buffer (no extra copy) then allocate and post a replacement buffer. * This is a performance enhancement that should be looked into at * some point. * * Transmit Path - * Like the receive path, the adapter DMA engine supports a 256 entry * transmit descriptor block of which up to 255 entries can be used at * any given time. Transmit buffers can be fragmented in up to 255 * fragments (descriptor entries). This driver always posts one * fragment per transmit packet request. * * The fragment contains the entire packet from FC to end of data. * Before posting the buffer to the adapter, the driver sets a three-byte * packet request header (PRH) which is required by the Motorola MAC chip * used on the adapters. The PRH tells the MAC the type of token to * receive/send, whether or not to generate and append the CRC, whether * synchronous or asynchronous framing is used, etc. Since the PRH * definition is not necessarily consistent across all FDDI chipsets, * the driver, rather than the common FDDI packet handler routines, * sets these bytes. * * To reduce the amount of descriptor fetches needed per transmit request, * the driver takes advantage of the fact that there are at least three * bytes available before the skb->data field on the outgoing transmit * request. This is guaranteed by having fddi_setup() in net_init.c set * dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest * header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad" * bytes which we'll use to store the PRH. * * There's a subtle advantage to adding these pad bytes to the * hard_header_len, it ensures that the data portion of the packet for * an 802.2 SNAP frame is longword aligned. Other FDDI driver * implementations may not need the extra padding and can start copying * or DMAing directly from the FC byte which starts at skb->data. Should * another driver implementation need ADDITIONAL padding, the net_init.c * module should be updated and dev->hard_header_len should be increased. * NOTE: To maintain the alignment on the data portion of the packet, * dev->hard_header_len should always be evenly divisible by 4 and at * least 24 bytes in size. * * Modification History: * Date Name Description * 16-Aug-96 LVS Created. * 20-Aug-96 LVS Updated dfx_probe so that version information * string is only displayed if 1 or more cards are * found. Changed dfx_rcv_queue_process to copy * 3 NULL bytes before FC to ensure that data is * longword aligned in receive buffer. * 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable * LLC group promiscuous mode if multicast list * is too large. LLC individual/group promiscuous * mode is now disabled if IFF_PROMISC flag not set. * dfx_xmt_queue_pkt no longer checks for NULL skb * on Alan Cox recommendation. Added node address * override support. * 12-Sep-96 LVS Reset current address to factory address during * device open. Updated transmit path to post a * single fragment which includes PRH->end of data. * Mar 2000 AC Did various cleanups for 2.3.x * Jun 2000 jgarzik PCI and resource alloc cleanups * Jul 2000 tjeerd Much cleanup and some bug fixes * Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup * Feb 2001 Skb allocation fixes * Feb 2001 davej PCI enable cleanups. * 04 Aug 2003 macro Converted to the DMA API. * 14 Aug 2004 macro Fix device names reported. * 14 Jun 2005 macro Use irqreturn_t. * 23 Oct 2006 macro Big-endian host support. * 14 Dec 2006 macro TURBOchannel support. */ /* Include files */ #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/eisa.h> #include <linux/errno.h> #include <linux/fddidevice.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/tc.h> #include <asm/byteorder.h> #include <asm/io.h> #include "defxx.h" /* Version information string should be updated prior to each new release! */ #define DRV_NAME "defxx" #define DRV_VERSION "v1.10" #define DRV_RELDATE "2006/12/14" static char version[] __devinitdata = DRV_NAME ": " DRV_VERSION " " DRV_RELDATE " Lawrence V. Stefani and others\n"; #define DYNAMIC_BUFFERS 1 #define SKBUFF_RX_COPYBREAK 200 /* * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte * alignment for compatibility with old EISA boards. */ #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128) #ifdef CONFIG_PCI #define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type) #else #define DFX_BUS_PCI(dev) 0 #endif #ifdef CONFIG_EISA #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type) #else #define DFX_BUS_EISA(dev) 0 #endif #ifdef CONFIG_TC #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type) #else #define DFX_BUS_TC(dev) 0 #endif #ifdef CONFIG_DEFXX_MMIO #define DFX_MMIO 1 #else #define DFX_MMIO 0 #endif /* Define module-wide (static) routines */ static void dfx_bus_init(struct net_device *dev); static void dfx_bus_uninit(struct net_device *dev); static void dfx_bus_config_check(DFX_board_t *bp); static int dfx_driver_init(struct net_device *dev, const char *print_name, resource_size_t bar_start); static int dfx_adap_init(DFX_board_t *bp, int get_buffers); static int dfx_open(struct net_device *dev); static int dfx_close(struct net_device *dev); static void dfx_int_pr_halt_id(DFX_board_t *bp); static void dfx_int_type_0_process(DFX_board_t *bp); static void dfx_int_common(struct net_device *dev); static irqreturn_t dfx_interrupt(int irq, void *dev_id); static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev); static void dfx_ctl_set_multicast_list(struct net_device *dev); static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr); static int dfx_ctl_update_cam(DFX_board_t *bp); static int dfx_ctl_update_filters(DFX_board_t *bp); static int dfx_hw_dma_cmd_req(DFX_board_t *bp); static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data); static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type); static int dfx_hw_adap_state_rd(DFX_board_t *bp); static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type); static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); static void dfx_rcv_queue_process(DFX_board_t *bp); static void dfx_rcv_flush(DFX_board_t *bp); static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, struct net_device *dev); static int dfx_xmt_done(DFX_board_t *bp); static void dfx_xmt_flush(DFX_board_t *bp); /* Define module-wide (static) variables */ static struct pci_driver dfx_pci_driver; static struct eisa_driver dfx_eisa_driver; static struct tc_driver dfx_tc_driver; /* * ======================= * = dfx_port_write_long = * = dfx_port_read_long = * ======================= * * Overview: * Routines for reading and writing values from/to adapter * * Returns: * None * * Arguments: * bp - pointer to board information * offset - register offset from base I/O address * data - for dfx_port_write_long, this is a value to write; * for dfx_port_read_long, this is a pointer to store * the read value * * Functional Description: * These routines perform the correct operation to read or write * the adapter register. * * EISA port block base addresses are based on the slot number in which the * controller is installed. For example, if the EISA controller is installed * in slot 4, the port block base address is 0x4000. If the controller is * installed in slot 2, the port block base address is 0x2000, and so on. * This port block can be used to access PDQ, ESIC, and DEFEA on-board * registers using the register offsets defined in DEFXX.H. * * PCI port block base addresses are assigned by the PCI BIOS or system * firmware. There is one 128 byte port block which can be accessed. It * allows for I/O mapping of both PDQ and PFI registers using the register * offsets defined in DEFXX.H. * * Return Codes: * None * * Assumptions: * bp->base is a valid base I/O address for this adapter. * offset is a valid register offset for this adapter. * * Side Effects: * Rather than produce macros for these functions, these routines * are defined using "inline" to ensure that the compiler will * generate inline code and not waste a procedure call and return. * This provides all the benefits of macros, but with the * advantage of strict data type checking. */ static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data) { writel(data, bp->base.mem + offset); mb(); } static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data) { outl(data, bp->base.port + offset); } static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) { struct device __maybe_unused *bdev = bp->bus_dev; int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; if (dfx_use_mmio) dfx_writel(bp, offset, data); else dfx_outl(bp, offset, data); } static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data) { mb(); *data = readl(bp->base.mem + offset); } static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data) { *data = inl(bp->base.port + offset); } static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) { struct device __maybe_unused *bdev = bp->bus_dev; int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; if (dfx_use_mmio) dfx_readl(bp, offset, data); else dfx_inl(bp, offset, data); } /* * ================ * = dfx_get_bars = * ================ * * Overview: * Retrieves the address range used to access control and status * registers. * * Returns: * None * * Arguments: * bdev - pointer to device information * bar_start - pointer to store the start address * bar_len - pointer to store the length of the area * * Assumptions: * I am sure there are some. * * Side Effects: * None */ static void dfx_get_bars(struct device *bdev, resource_size_t *bar_start, resource_size_t *bar_len) { int dfx_bus_pci = DFX_BUS_PCI(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; if (dfx_bus_pci) { int num = dfx_use_mmio ? 0 : 1; *bar_start = pci_resource_start(to_pci_dev(bdev), num); *bar_len = pci_resource_len(to_pci_dev(bdev), num); } if (dfx_bus_eisa) { unsigned long base_addr = to_eisa_device(bdev)->base_addr; resource_size_t bar; if (dfx_use_mmio) { bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2); bar <<= 8; bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1); bar <<= 8; bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0); bar <<= 16; *bar_start = bar; bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2); bar <<= 8; bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1); bar <<= 8; bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0); bar <<= 16; *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1; } else { *bar_start = base_addr; *bar_len = PI_ESIC_K_CSR_IO_LEN; } } if (dfx_bus_tc) { *bar_start = to_tc_dev(bdev)->resource.start + PI_TC_K_CSR_OFFSET; *bar_len = PI_TC_K_CSR_LEN; } } static const struct net_device_ops dfx_netdev_ops = { .ndo_open = dfx_open, .ndo_stop = dfx_close, .ndo_start_xmit = dfx_xmt_queue_pkt, .ndo_get_stats = dfx_ctl_get_stats, .ndo_set_multicast_list = dfx_ctl_set_multicast_list, .ndo_set_mac_address = dfx_ctl_set_mac_address, }; /* * ================ * = dfx_register = * ================ * * Overview: * Initializes a supported FDDI controller * * Returns: * Condition code * * Arguments: * bdev - pointer to device information * * Functional Description: * * Return Codes: * 0 - This device (fddi0, fddi1, etc) configured successfully * -EBUSY - Failed to get resources, or dfx_driver_init failed. * * Assumptions: * It compiles so it should work :-( (PCI cards do :-) * * Side Effects: * Device structures for FDDI adapters (fddi0, fddi1, etc) are * initialized and the board resources are read and stored in * the device structure. */ static int __devinit dfx_register(struct device *bdev) { static int version_disp; int dfx_bus_pci = DFX_BUS_PCI(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; const char *print_name = dev_name(bdev); struct net_device *dev; DFX_board_t *bp; /* board pointer */ resource_size_t bar_start = 0; /* pointer to port */ resource_size_t bar_len = 0; /* resource length */ int alloc_size; /* total buffer size used */ struct resource *region; int err = 0; if (!version_disp) { /* display version info if adapter is found */ version_disp = 1; /* set display flag to TRUE so that */ printk(version); /* we only display this string ONCE */ } dev = alloc_fddidev(sizeof(*bp)); if (!dev) { printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n", print_name); return -ENOMEM; } /* Enable PCI device. */ if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) { printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n", print_name); goto err_out; } SET_NETDEV_DEV(dev, bdev); bp = netdev_priv(dev); bp->bus_dev = bdev; dev_set_drvdata(bdev, dev); dfx_get_bars(bdev, &bar_start, &bar_len); if (dfx_use_mmio) region = request_mem_region(bar_start, bar_len, print_name); else region = request_region(bar_start, bar_len, print_name); if (!region) { printk(KERN_ERR "%s: Cannot reserve I/O resource " "0x%lx @ 0x%lx, aborting\n", print_name, (long)bar_len, (long)bar_start); err = -EBUSY; goto err_out_disable; } /* Set up I/O base address. */ if (dfx_use_mmio) { bp->base.mem = ioremap_nocache(bar_start, bar_len); if (!bp->base.mem) { printk(KERN_ERR "%s: Cannot map MMIO\n", print_name); err = -ENOMEM; goto err_out_region; } } else { bp->base.port = bar_start; dev->base_addr = bar_start; } /* Initialize new device structure */ dev->netdev_ops = &dfx_netdev_ops; if (dfx_bus_pci) pci_set_master(to_pci_dev(bdev)); if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) { err = -ENODEV; goto err_out_unmap; } err = register_netdev(dev); if (err) goto err_out_kfree; printk("%s: registered as %s\n", print_name, dev->name); return 0; err_out_kfree: alloc_size = sizeof(PI_DESCR_BLOCK) + PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + #ifndef DYNAMIC_BUFFERS (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + #endif sizeof(PI_CONSUMER_BLOCK) + (PI_ALIGN_K_DESC_BLK - 1); if (bp->kmalloced) dma_free_coherent(bdev, alloc_size, bp->kmalloced, bp->kmalloced_dma); err_out_unmap: if (dfx_use_mmio) iounmap(bp->base.mem); err_out_region: if (dfx_use_mmio) release_mem_region(bar_start, bar_len); else release_region(bar_start, bar_len); err_out_disable: if (dfx_bus_pci) pci_disable_device(to_pci_dev(bdev)); err_out: free_netdev(dev); return err; } /* * ================ * = dfx_bus_init = * ================ * * Overview: * Initializes the bus-specific controller logic. * * Returns: * None * * Arguments: * dev - pointer to device information * * Functional Description: * Determine and save adapter IRQ in device table, * then perform bus-specific logic initialization. * * Return Codes: * None * * Assumptions: * bp->base has already been set with the proper * base I/O address for this device. * * Side Effects: * Interrupts are enabled at the adapter bus-specific logic. * Note: Interrupts at the DMA engine (PDQ chip) are not * enabled yet. */ static void __devinit dfx_bus_init(struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); struct device *bdev = bp->bus_dev; int dfx_bus_pci = DFX_BUS_PCI(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; u8 val; DBG_printk("In dfx_bus_init...\n"); /* Initialize a pointer back to the net_device struct */ bp->dev = dev; /* Initialize adapter based on bus type */ if (dfx_bus_tc) dev->irq = to_tc_dev(bdev)->interrupt; if (dfx_bus_eisa) { unsigned long base_addr = to_eisa_device(bdev)->base_addr; /* Get the interrupt level from the ESIC chip. */ val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); val &= PI_CONFIG_STAT_0_M_IRQ; val >>= PI_CONFIG_STAT_0_V_IRQ; switch (val) { case PI_CONFIG_STAT_0_IRQ_K_9: dev->irq = 9; break; case PI_CONFIG_STAT_0_IRQ_K_10: dev->irq = 10; break; case PI_CONFIG_STAT_0_IRQ_K_11: dev->irq = 11; break; case PI_CONFIG_STAT_0_IRQ_K_15: dev->irq = 15; break; } /* * Enable memory decoding (MEMCS0) and/or port decoding * (IOCS1/IOCS0) as appropriate in Function Control * Register. One of the port chip selects seems to be * used for the Burst Holdoff register, but this bit of * documentation is missing and as yet it has not been * determined which of the two. This is also the reason * the size of the decoded port range is twice as large * as one required by the PDQ. */ /* Set the decode range of the board. */ val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT); outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val); outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0); outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val); outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0); val = PI_ESIC_K_CSR_IO_LEN - 1; outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff); outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff); outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff); outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff); /* Enable the decoders. */ val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0; if (dfx_use_mmio) val |= PI_FUNCTION_CNTRL_M_MEMCS0; outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val); /* * Enable access to the rest of the module * (including PDQ and packet memory). */ val = PI_SLOT_CNTRL_M_ENB; outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val); /* * Map PDQ registers into memory or port space. This is * done with a bit in the Burst Holdoff register. */ val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF); if (dfx_use_mmio) val |= PI_BURST_HOLDOFF_V_MEM_MAP; else val &= ~PI_BURST_HOLDOFF_V_MEM_MAP; outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val); /* Enable interrupts at EISA bus interface chip (ESIC) */ val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); val |= PI_CONFIG_STAT_0_M_INT_ENB; outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); } if (dfx_bus_pci) { struct pci_dev *pdev = to_pci_dev(bdev); /* Get the interrupt level from the PCI Configuration Table */ dev->irq = pdev->irq; /* Check Latency Timer and set if less than minimal */ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val); if (val < PFI_K_LAT_TIMER_MIN) { val = PFI_K_LAT_TIMER_DEF; pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val); } /* Enable interrupts at PCI bus interface chip (PFI) */ val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB; dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val); } } /* * ================== * = dfx_bus_uninit = * ================== * * Overview: * Uninitializes the bus-specific controller logic. * * Returns: * None * * Arguments: * dev - pointer to device information * * Functional Description: * Perform bus-specific logic uninitialization. * * Return Codes: * None * * Assumptions: * bp->base has already been set with the proper * base I/O address for this device. * * Side Effects: * Interrupts are disabled at the adapter bus-specific logic. */ static void __devexit dfx_bus_uninit(struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); struct device *bdev = bp->bus_dev; int dfx_bus_pci = DFX_BUS_PCI(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); u8 val; DBG_printk("In dfx_bus_uninit...\n"); /* Uninitialize adapter based on bus type */ if (dfx_bus_eisa) { unsigned long base_addr = to_eisa_device(bdev)->base_addr; /* Disable interrupts at EISA bus interface chip (ESIC) */ val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); val &= ~PI_CONFIG_STAT_0_M_INT_ENB; outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); } if (dfx_bus_pci) { /* Disable interrupts at PCI bus interface chip (PFI) */ dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0); } } /* * ======================== * = dfx_bus_config_check = * ======================== * * Overview: * Checks the configuration (burst size, full-duplex, etc.) If any parameters * are illegal, then this routine will set new defaults. * * Returns: * None * * Arguments: * bp - pointer to board information * * Functional Description: * For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later * PDQ, and all FDDI PCI controllers, all values are legal. * * Return Codes: * None * * Assumptions: * dfx_adap_init has NOT been called yet so burst size and other items have * not been set. * * Side Effects: * None */ static void __devinit dfx_bus_config_check(DFX_board_t *bp) { struct device __maybe_unused *bdev = bp->bus_dev; int dfx_bus_eisa = DFX_BUS_EISA(bdev); int status; /* return code from adapter port control call */ u32 host_data; /* LW data returned from port control call */ DBG_printk("In dfx_bus_config_check...\n"); /* Configuration check only valid for EISA adapter */ if (dfx_bus_eisa) { /* * First check if revision 2 EISA controller. Rev. 1 cards used * PDQ revision B, so no workaround needed in this case. Rev. 3 * cards used PDQ revision E, so no workaround needed in this * case, either. Only Rev. 2 cards used either Rev. D or E * chips, so we must verify the chip revision on Rev. 2 cards. */ if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) { /* * Revision 2 FDDI EISA controller found, * so let's check PDQ revision of adapter. */ status = dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_SUB_CMD, PI_SUB_CMD_K_PDQ_REV_GET, 0, &host_data); if ((status != DFX_K_SUCCESS) || (host_data == 2)) { /* * Either we couldn't determine the PDQ revision, or * we determined that it is at revision D. In either case, * we need to implement the workaround. */ /* Ensure that the burst size is set to 8 longwords or less */ switch (bp->burst_size) { case PI_PDATA_B_DMA_BURST_SIZE_32: case PI_PDATA_B_DMA_BURST_SIZE_16: bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8; break; default: break; } /* Ensure that full-duplex mode is not enabled */ bp->full_duplex_enb = PI_SNMP_K_FALSE; } } } } /* * =================== * = dfx_driver_init = * =================== * * Overview: * Initializes remaining adapter board structure information * and makes sure adapter is in a safe state prior to dfx_open(). * * Returns: * Condition code * * Arguments: * dev - pointer to device information * print_name - printable device name * * Functional Description: * This function allocates additional resources such as the host memory * blocks needed by the adapter (eg. descriptor and consumer blocks). * Remaining bus initialization steps are also completed. The adapter * is also reset so that it is in the DMA_UNAVAILABLE state. The OS * must call dfx_open() to open the adapter and bring it on-line. * * Return Codes: * DFX_K_SUCCESS - initialization succeeded * DFX_K_FAILURE - initialization failed - could not allocate memory * or read adapter MAC address * * Assumptions: * Memory allocated from pci_alloc_consistent() call is physically * contiguous, locked memory. * * Side Effects: * Adapter is reset and should be in DMA_UNAVAILABLE state before * returning from this routine. */ static int __devinit dfx_driver_init(struct net_device *dev, const char *print_name, resource_size_t bar_start) { DFX_board_t *bp = netdev_priv(dev); struct device *bdev = bp->bus_dev; int dfx_bus_pci = DFX_BUS_PCI(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; int alloc_size; /* total buffer size needed */ char *top_v, *curr_v; /* virtual addrs into memory block */ dma_addr_t top_p, curr_p; /* physical addrs into memory block */ u32 data; /* host data register value */ __le32 le32; char *board_name = NULL; DBG_printk("In dfx_driver_init...\n"); /* Initialize bus-specific hardware registers */ dfx_bus_init(dev); /* * Initialize default values for configurable parameters * * Note: All of these parameters are ones that a user may * want to customize. It'd be nice to break these * out into Space.c or someplace else that's more * accessible/understandable than this file. */ bp->full_duplex_enb = PI_SNMP_K_FALSE; bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */ bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF; bp->rcv_bufs_to_post = RCV_BUFS_DEF; /* * Ensure that HW configuration is OK * * Note: Depending on the hardware revision, we may need to modify * some of the configurable parameters to workaround hardware * limitations. We'll perform this configuration check AFTER * setting the parameters to their default values. */ dfx_bus_config_check(bp); /* Disable PDQ interrupts first */ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */ (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); /* Read the factory MAC address from the adapter then save it */ if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0, &data) != DFX_K_SUCCESS) { printk("%s: Could not read adapter factory MAC address!\n", print_name); return(DFX_K_FAILURE); } le32 = cpu_to_le32(data); memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32)); if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, &data) != DFX_K_SUCCESS) { printk("%s: Could not read adapter factory MAC address!\n", print_name); return(DFX_K_FAILURE); } le32 = cpu_to_le32(data); memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16)); /* * Set current address to factory address * * Note: Node address override support is handled through * dfx_ctl_set_mac_address. */ memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); if (dfx_bus_tc) board_name = "DEFTA"; if (dfx_bus_eisa) board_name = "DEFEA"; if (dfx_bus_pci) board_name = "DEFPA"; pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, " "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n", print_name, board_name, dfx_use_mmio ? "" : "I/O ", (long long)bar_start, dev->irq, dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); /* * Get memory for descriptor block, consumer block, and other buffers * that need to be DMA read or written to by the adapter. */ alloc_size = sizeof(PI_DESCR_BLOCK) + PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + #ifndef DYNAMIC_BUFFERS (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + #endif sizeof(PI_CONSUMER_BLOCK) + (PI_ALIGN_K_DESC_BLK - 1); bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, &bp->kmalloced_dma, GFP_ATOMIC); if (top_v == NULL) { printk("%s: Could not allocate memory for host buffers " "and structures!\n", print_name); return(DFX_K_FAILURE); } memset(top_v, 0, alloc_size); /* zero out memory before continuing */ top_p = bp->kmalloced_dma; /* get physical address of buffer */ /* * To guarantee the 8K alignment required for the descriptor block, 8K - 1 * plus the amount of memory needed was allocated. The physical address * is now 8K aligned. By carving up the memory in a specific order, * we'll guarantee the alignment requirements for all other structures. * * Note: If the assumptions change regarding the non-paged, non-cached, * physically contiguous nature of the memory block or the address * alignments, then we'll need to implement a different algorithm * for allocating the needed memory. */ curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK); curr_v = top_v + (curr_p - top_p); /* Reserve space for descriptor block */ bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v; bp->descr_block_phys = curr_p; curr_v += sizeof(PI_DESCR_BLOCK); curr_p += sizeof(PI_DESCR_BLOCK); /* Reserve space for command request buffer */ bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v; bp->cmd_req_phys = curr_p; curr_v += PI_CMD_REQ_K_SIZE_MAX; curr_p += PI_CMD_REQ_K_SIZE_MAX; /* Reserve space for command response buffer */ bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v; bp->cmd_rsp_phys = curr_p; curr_v += PI_CMD_RSP_K_SIZE_MAX; curr_p += PI_CMD_RSP_K_SIZE_MAX; /* Reserve space for the LLC host receive queue buffers */ bp->rcv_block_virt = curr_v; bp->rcv_block_phys = curr_p; #ifndef DYNAMIC_BUFFERS curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); #endif /* Reserve space for the consumer block */ bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v; bp->cons_block_phys = curr_p; /* Display virtual and physical addresses if debug driver */ DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n", print_name, (long)bp->descr_block_virt, bp->descr_block_phys); DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n", print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys); DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n", print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys); DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n", print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys); DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n", print_name, (long)bp->cons_block_virt, bp->cons_block_phys); return(DFX_K_SUCCESS); } /* * ================= * = dfx_adap_init = * ================= * * Overview: * Brings the adapter to the link avail/link unavailable state. * * Returns: * Condition code * * Arguments: * bp - pointer to board information * get_buffers - non-zero if buffers to be allocated * * Functional Description: * Issues the low-level firmware/hardware calls necessary to bring * the adapter up, or to properly reset and restore adapter during * run-time. * * Return Codes: * DFX_K_SUCCESS - Adapter brought up successfully * DFX_K_FAILURE - Adapter initialization failed * * Assumptions: * bp->reset_type should be set to a valid reset type value before * calling this routine. * * Side Effects: * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state * upon a successful return of this routine. */ static int dfx_adap_init(DFX_board_t *bp, int get_buffers) { DBG_printk("In dfx_adap_init...\n"); /* Disable PDQ interrupts first */ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */ if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS) { printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name); return(DFX_K_FAILURE); } /* * When the PDQ is reset, some false Type 0 interrupts may be pending, * so we'll acknowledge all Type 0 interrupts now before continuing. */ dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0); /* * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state * * Note: We only need to clear host copies of these registers. The PDQ reset * takes care of the on-board register values. */ bp->cmd_req_reg.lword = 0; bp->cmd_rsp_reg.lword = 0; bp->rcv_xmt_reg.lword = 0; /* Clear consumer block before going to DMA_AVAILABLE state */ memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); /* Initialize the DMA Burst Size */ if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_SUB_CMD, PI_SUB_CMD_K_BURST_SIZE_SET, bp->burst_size, NULL) != DFX_K_SUCCESS) { printk("%s: Could not set adapter burst size!\n", bp->dev->name); return(DFX_K_FAILURE); } /* * Set base address of Consumer Block * * Assumption: 32-bit physical address of consumer block is 64 byte * aligned. That is, bits 0-5 of the address must be zero. */ if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_CONS_BLOCK, bp->cons_block_phys, 0, NULL) != DFX_K_SUCCESS) { printk("%s: Could not set consumer block address!\n", bp->dev->name); return(DFX_K_FAILURE); } /* * Set the base address of Descriptor Block and bring adapter * to DMA_AVAILABLE state. * * Note: We also set the literal and data swapping requirements * in this command. * * Assumption: 32-bit physical address of descriptor block * is 8Kbyte aligned. */ if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT, (u32)(bp->descr_block_phys | PI_PDATA_A_INIT_M_BSWAP_INIT), 0, NULL) != DFX_K_SUCCESS) { printk("%s: Could not set descriptor block address!\n", bp->dev->name); return DFX_K_FAILURE; } /* Set transmit flush timeout value */ bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET; bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME; bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */ bp->cmd_req_virt->char_set.item[0].item_index = 0; bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL; if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) { printk("%s: DMA command request failed!\n", bp->dev->name); return(DFX_K_FAILURE); } /* Set the initial values for eFDXEnable and MACTReq MIB objects */ bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET; bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS; bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb; bp->cmd_req_virt->snmp_set.item[0].item_index = 0; bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ; bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt; bp->cmd_req_virt->snmp_set.item[1].item_index = 0; bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL; if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) { printk("%s: DMA command request failed!\n", bp->dev->name); return(DFX_K_FAILURE); } /* Initialize adapter CAM */ if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) { printk("%s: Adapter CAM update failed!\n", bp->dev->name); return(DFX_K_FAILURE); } /* Initialize adapter filters */ if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) { printk("%s: Adapter filters update failed!\n", bp->dev->name); return(DFX_K_FAILURE); } /* * Remove any existing dynamic buffers (i.e. if the adapter is being * reinitialized) */ if (get_buffers) dfx_rcv_flush(bp); /* Initialize receive descriptor block and produce buffers */ if (dfx_rcv_init(bp, get_buffers)) { printk("%s: Receive buffer allocation failed\n", bp->dev->name); if (get_buffers) dfx_rcv_flush(bp); return(DFX_K_FAILURE); } /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */ bp->cmd_req_virt->cmd_type = PI_CMD_K_START; if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) { printk("%s: Start command failed\n", bp->dev->name); if (get_buffers) dfx_rcv_flush(bp); return(DFX_K_FAILURE); } /* Initialization succeeded, reenable PDQ interrupts */ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS); return(DFX_K_SUCCESS); } /* * ============ * = dfx_open = * ============ * * Overview: * Opens the adapter * * Returns: * Condition code * * Arguments: * dev - pointer to device information * * Functional Description: * This function brings the adapter to an operational state. * * Return Codes: * 0 - Adapter was successfully opened * -EAGAIN - Could not register IRQ or adapter initialization failed * * Assumptions: * This routine should only be called for a device that was * initialized successfully. * * Side Effects: * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state * if the open is successful. */ static int dfx_open(struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); int ret; DBG_printk("In dfx_open...\n"); /* Register IRQ - support shared interrupts by passing device ptr */ ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name, dev); if (ret) { printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq); return ret; } /* * Set current address to factory MAC address * * Note: We've already done this step in dfx_driver_init. * However, it's possible that a user has set a node * address override, then closed and reopened the * adapter. Unless we reset the device address field * now, we'll continue to use the existing modified * address. */ memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); /* Clear local unicast/multicast address tables and counts */ memset(bp->uc_table, 0, sizeof(bp->uc_table)); memset(bp->mc_table, 0, sizeof(bp->mc_table)); bp->uc_count = 0; bp->mc_count = 0; /* Disable promiscuous filter settings */ bp->ind_group_prom = PI_FSTATE_K_BLOCK; bp->group_prom = PI_FSTATE_K_BLOCK; spin_lock_init(&bp->lock); /* Reset and initialize adapter */ bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */ if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS) { printk(KERN_ERR "%s: Adapter open failed!\n", dev->name); free_irq(dev->irq, dev); return -EAGAIN; } /* Set device structure info */ netif_start_queue(dev); return(0); } /* * ============= * = dfx_close = * ============= * * Overview: * Closes the device/module. * * Returns: * Condition code * * Arguments: * dev - pointer to device information * * Functional Description: * This routine closes the adapter and brings it to a safe state. * The interrupt service routine is deregistered with the OS. * The adapter can be opened again with another call to dfx_open(). * * Return Codes: * Always return 0. * * Assumptions: * No further requests for this adapter are made after this routine is * called. dfx_open() can be called to reset and reinitialize the * adapter. * * Side Effects: * Adapter should be in DMA_UNAVAILABLE state upon completion of this * routine. */ static int dfx_close(struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); DBG_printk("In dfx_close...\n"); /* Disable PDQ interrupts first */ dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */ (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); /* * Flush any pending transmit buffers * * Note: It's important that we flush the transmit buffers * BEFORE we clear our copy of the Type 2 register. * Otherwise, we'll have no idea how many buffers * we need to free. */ dfx_xmt_flush(bp); /* * Clear Type 1 and Type 2 registers after adapter reset * * Note: Even though we're closing the adapter, it's * possible that an interrupt will occur after * dfx_close is called. Without some assurance to * the contrary we want to make sure that we don't * process receive and transmit LLC frames and update * the Type 2 register with bad information. */ bp->cmd_req_reg.lword = 0; bp->cmd_rsp_reg.lword = 0; bp->rcv_xmt_reg.lword = 0; /* Clear consumer block for the same reason given above */ memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); /* Release all dynamically allocate skb in the receive ring. */ dfx_rcv_flush(bp); /* Clear device structure flags */ netif_stop_queue(dev); /* Deregister (free) IRQ */ free_irq(dev->irq, dev); return(0); } /* * ====================== * = dfx_int_pr_halt_id = * ====================== * * Overview: * Displays halt id's in string form. * * Returns: * None * * Arguments: * bp - pointer to board information * * Functional Description: * Determine current halt id and display appropriate string. * * Return Codes: * None * * Assumptions: * None * * Side Effects: * None */ static void dfx_int_pr_halt_id(DFX_board_t *bp) { PI_UINT32 port_status; /* PDQ port status register value */ PI_UINT32 halt_id; /* PDQ port status halt ID */ /* Read the latest port status */ dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); /* Display halt state transition information */ halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID; switch (halt_id) { case PI_HALT_ID_K_SELFTEST_TIMEOUT: printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name); break; case PI_HALT_ID_K_PARITY_ERROR: printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name); break; case PI_HALT_ID_K_HOST_DIR_HALT: printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name); break; case PI_HALT_ID_K_SW_FAULT: printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name); break; case PI_HALT_ID_K_HW_FAULT: printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name); break; case PI_HALT_ID_K_PC_TRACE: printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name); break; case PI_HALT_ID_K_DMA_ERROR: printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name); break; case PI_HALT_ID_K_IMAGE_CRC_ERROR: printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name); break; case PI_HALT_ID_K_BUS_EXCEPTION: printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name); break; default: printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id); break; } } /* * ========================== * = dfx_int_type_0_process = * ========================== * * Overview: * Processes Type 0 interrupts. * * Returns: * None * * Arguments: * bp - pointer to board information * * Functional Description: * Processes all enabled Type 0 interrupts. If the reason for the interrupt * is a serious fault on the adapter, then an error message is displayed * and the adapter is reset. * * One tricky potential timing window is the rapid succession of "link avail" * "link unavail" state change interrupts. The acknowledgement of the Type 0 * interrupt must be done before reading the state from the Port Status * register. This is true because a state change could occur after reading * the data, but before acknowledging the interrupt. If this state change * does happen, it would be lost because the driver is using the old state, * and it will never know about the new state because it subsequently * acknowledges the state change interrupt. * * INCORRECT CORRECT * read type 0 int reasons read type 0 int reasons * read adapter state ack type 0 interrupts * ack type 0 interrupts read adapter state * ... process interrupt ... ... process interrupt ... * * Return Codes: * None * * Assumptions: * None * * Side Effects: * An adapter reset may occur if the adapter has any Type 0 error interrupts * or if the port status indicates that the adapter is halted. The driver * is responsible for reinitializing the adapter with the current CAM * contents and adapter filter settings. */ static void dfx_int_type_0_process(DFX_board_t *bp) { PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */ PI_UINT32 state; /* current adap state (from port status) */ /* * Read host interrupt Type 0 register to determine which Type 0 * interrupts are pending. Immediately write it back out to clear * those interrupts. */ dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status); dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status); /* Check for Type 0 error interrupts */ if (type_0_status & (PI_TYPE_0_STAT_M_NXM | PI_TYPE_0_STAT_M_PM_PAR_ERR | PI_TYPE_0_STAT_M_BUS_PAR_ERR)) { /* Check for Non-Existent Memory error */ if (type_0_status & PI_TYPE_0_STAT_M_NXM) printk("%s: Non-Existent Memory Access Error\n", bp->dev->name); /* Check for Packet Memory Parity error */ if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR) printk("%s: Packet Memory Parity Error\n", bp->dev->name); /* Check for Host Bus Parity error */ if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR) printk("%s: Host Bus Parity Error\n", bp->dev->name); /* Reset adapter and bring it back on-line */ bp->link_available = PI_K_FALSE; /* link is no longer available */ bp->reset_type = 0; /* rerun on-board diagnostics */ printk("%s: Resetting adapter...\n", bp->dev->name); if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) { printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); return; } printk("%s: Adapter reset successful!\n", bp->dev->name); return; } /* Check for transmit flush interrupt */ if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH) { /* Flush any pending xmt's and acknowledge the flush interrupt */ bp->link_available = PI_K_FALSE; /* link is no longer available */ dfx_xmt_flush(bp); /* flush any outstanding packets */ (void) dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_XMT_DATA_FLUSH_DONE, 0, 0, NULL); } /* Check for adapter state change */ if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE) { /* Get latest adapter state */ state = dfx_hw_adap_state_rd(bp); /* get adapter state */ if (state == PI_STATE_K_HALTED) { /* * Adapter has transitioned to HALTED state, try to reset * adapter to bring it back on-line. If reset fails, * leave the adapter in the broken state. */ printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name); dfx_int_pr_halt_id(bp); /* display halt id as string */ /* Reset adapter and bring it back on-line */ bp->link_available = PI_K_FALSE; /* link is no longer available */ bp->reset_type = 0; /* rerun on-board diagnostics */ printk("%s: Resetting adapter...\n", bp->dev->name); if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) { printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); return; } printk("%s: Adapter reset successful!\n", bp->dev->name); } else if (state == PI_STATE_K_LINK_AVAIL) { bp->link_available = PI_K_TRUE; /* set link available flag */ } } } /* * ================== * = dfx_int_common = * ================== * * Overview: * Interrupt service routine (ISR) * * Returns: * None * * Arguments: * bp - pointer to board information * * Functional Description: * This is the ISR which processes incoming adapter interrupts. * * Return Codes: * None * * Assumptions: * This routine assumes PDQ interrupts have not been disabled. * When interrupts are disabled at the PDQ, the Port Status register * is automatically cleared. This routine uses the Port Status * register value to determine whether a Type 0 interrupt occurred, * so it's important that adapter interrupts are not normally * enabled/disabled at the PDQ. * * It's vital that this routine is NOT reentered for the * same board and that the OS is not in another section of * code (eg. dfx_xmt_queue_pkt) for the same board on a * different thread. * * Side Effects: * Pending interrupts are serviced. Depending on the type of * interrupt, acknowledging and clearing the interrupt at the * PDQ involves writing a register to clear the interrupt bit * or updating completion indices. */ static void dfx_int_common(struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); PI_UINT32 port_status; /* Port Status register */ /* Process xmt interrupts - frequent case, so always call this routine */ if(dfx_xmt_done(bp)) /* free consumed xmt packets */ netif_wake_queue(dev); /* Process rcv interrupts - frequent case, so always call this routine */ dfx_rcv_queue_process(bp); /* service received LLC frames */ /* * Transmit and receive producer and completion indices are updated on the * adapter by writing to the Type 2 Producer register. Since the frequent * case is that we'll be processing either LLC transmit or receive buffers, * we'll optimize I/O writes by doing a single register write here. */ dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); /* Read PDQ Port Status register to find out which interrupts need processing */ dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); /* Process Type 0 interrupts (if any) - infrequent, so only call when needed */ if (port_status & PI_PSTATUS_M_TYPE_0_PENDING) dfx_int_type_0_process(bp); /* process Type 0 interrupts */ } /* * ================= * = dfx_interrupt = * ================= * * Overview: * Interrupt processing routine * * Returns: * Whether a valid interrupt was seen. * * Arguments: * irq - interrupt vector * dev_id - pointer to device information * * Functional Description: * This routine calls the interrupt processing routine for this adapter. It * disables and reenables adapter interrupts, as appropriate. We can support * shared interrupts since the incoming dev_id pointer provides our device * structure context. * * Return Codes: * IRQ_HANDLED - an IRQ was handled. * IRQ_NONE - no IRQ was handled. * * Assumptions: * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC * on Intel-based systems) is done by the operating system outside this * routine. * * System interrupts are enabled through this call. * * Side Effects: * Interrupts are disabled, then reenabled at the adapter. */ static irqreturn_t dfx_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; DFX_board_t *bp = netdev_priv(dev); struct device *bdev = bp->bus_dev; int dfx_bus_pci = DFX_BUS_PCI(bdev); int dfx_bus_eisa = DFX_BUS_EISA(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); /* Service adapter interrupts */ if (dfx_bus_pci) { u32 status; dfx_port_read_long(bp, PFI_K_REG_STATUS, &status); if (!(status & PFI_STATUS_M_PDQ_INT)) return IRQ_NONE; spin_lock(&bp->lock); /* Disable PDQ-PFI interrupts at PFI */ dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, PFI_MODE_M_DMA_ENB); /* Call interrupt service routine for this adapter */ dfx_int_common(dev); /* Clear PDQ interrupt status bit and reenable interrupts */ dfx_port_write_long(bp, PFI_K_REG_STATUS, PFI_STATUS_M_PDQ_INT); dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, (PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB)); spin_unlock(&bp->lock); } if (dfx_bus_eisa) { unsigned long base_addr = to_eisa_device(bdev)->base_addr; u8 status; status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); if (!(status & PI_CONFIG_STAT_0_M_PEND)) return IRQ_NONE; spin_lock(&bp->lock); /* Disable interrupts at the ESIC */ status &= ~PI_CONFIG_STAT_0_M_INT_ENB; outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); /* Call interrupt service routine for this adapter */ dfx_int_common(dev); /* Reenable interrupts at the ESIC */ status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); status |= PI_CONFIG_STAT_0_M_INT_ENB; outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); spin_unlock(&bp->lock); } if (dfx_bus_tc) { u32 status; dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status); if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING | PI_PSTATUS_M_XMT_DATA_PENDING | PI_PSTATUS_M_SMT_HOST_PENDING | PI_PSTATUS_M_UNSOL_PENDING | PI_PSTATUS_M_CMD_RSP_PENDING | PI_PSTATUS_M_CMD_REQ_PENDING | PI_PSTATUS_M_TYPE_0_PENDING))) return IRQ_NONE; spin_lock(&bp->lock); /* Call interrupt service routine for this adapter */ dfx_int_common(dev); spin_unlock(&bp->lock); } return IRQ_HANDLED; } /* * ===================== * = dfx_ctl_get_stats = * ===================== * * Overview: * Get statistics for FDDI adapter * * Returns: * Pointer to FDDI statistics structure * * Arguments: * dev - pointer to device information * * Functional Description: * Gets current MIB objects from adapter, then * returns FDDI statistics structure as defined * in if_fddi.h. * * Note: Since the FDDI statistics structure is * still new and the device structure doesn't * have an FDDI-specific get statistics handler, * we'll return the FDDI statistics structure as * a pointer to an Ethernet statistics structure. * That way, at least the first part of the statistics * structure can be decoded properly, and it allows * "smart" applications to perform a second cast to * decode the FDDI-specific statistics. * * We'll have to pay attention to this routine as the * device structure becomes more mature and LAN media * independent. * * Return Codes: * None * * Assumptions: * None * * Side Effects: * None */ static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); /* Fill the bp->stats structure with driver-maintained counters */ bp->stats.gen.rx_packets = bp->rcv_total_frames; bp->stats.gen.tx_packets = bp->xmt_total_frames; bp->stats.gen.rx_bytes = bp->rcv_total_bytes; bp->stats.gen.tx_bytes = bp->xmt_total_bytes; bp->stats.gen.rx_errors = bp->rcv_crc_errors + bp->rcv_frame_status_errors + bp->rcv_length_errors; bp->stats.gen.tx_errors = bp->xmt_length_errors; bp->stats.gen.rx_dropped = bp->rcv_discards; bp->stats.gen.tx_dropped = bp->xmt_discards; bp->stats.gen.multicast = bp->rcv_multicast_frames; bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */ /* Get FDDI SMT MIB objects */ bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET; if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) return((struct net_device_stats *) &bp->stats); /* Fill the bp->stats structure with the SMT MIB object values */ memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id)); bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id; bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id; bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id; memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data)); bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id; bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct; bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct; bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct; bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths; bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities; bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy; bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy; bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify; bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy; bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration; bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present; bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state; bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state; bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag; bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status; bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag; bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls; bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls; bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions; bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability; bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability; bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths; bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path; memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN); memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN); memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN); memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN); bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test; bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths; bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type; memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN); bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req; bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg; bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max; bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value; bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold; bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio; bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state; bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag; bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag; bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag; bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available; bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present; bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable; bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound; bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound; bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req; memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration)); bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0]; bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1]; bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0]; bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1]; bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0]; bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1]; bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0]; bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1]; bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0]; bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1]; memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3); memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3); bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0]; bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1]; bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0]; bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1]; bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0]; bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1]; bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0]; bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1]; bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0]; bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1]; bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0]; bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1]; bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0]; bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1]; bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0]; bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1]; bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0]; bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1]; bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0]; bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1]; bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0]; bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1]; bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0]; bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1]; bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0]; bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1]; /* Get FDDI counters */ bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET; if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) return((struct net_device_stats *) &bp->stats); /* Fill the bp->stats structure with the FDDI counter values */ bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls; bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls; bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls; bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls; bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls; bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls; bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls; bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls; bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls; bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; return((struct net_device_stats *) &bp->stats); } /* * ============================== * = dfx_ctl_set_multicast_list = * ============================== * * Overview: * Enable/Disable LLC frame promiscuous mode reception * on the adapter and/or update multicast address table. * * Returns: * None * * Arguments: * dev - pointer to device information * * Functional Description: * This routine follows a fairly simple algorithm for setting the * adapter filters and CAM: * * if IFF_PROMISC flag is set * enable LLC individual/group promiscuous mode * else * disable LLC individual/group promiscuous mode * if number of incoming multicast addresses > * (CAM max size - number of unicast addresses in CAM) * enable LLC group promiscuous mode * set driver-maintained multicast address count to zero * else * disable LLC group promiscuous mode * set driver-maintained multicast address count to incoming count * update adapter CAM * update adapter filters * * Return Codes: * None * * Assumptions: * Multicast addresses are presented in canonical (LSB) format. * * Side Effects: * On-board adapter CAM and filters are updated. */ static void dfx_ctl_set_multicast_list(struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); int i; /* used as index in for loop */ struct dev_mc_list *dmi; /* ptr to multicast addr entry */ /* Enable LLC frame promiscuous mode, if necessary */ if (dev->flags & IFF_PROMISC) bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */ /* Else, update multicast address table */ else { bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */ /* * Check whether incoming multicast address count exceeds table size * * Note: The adapters utilize an on-board 64 entry CAM for * supporting perfect filtering of multicast packets * and bridge functions when adding unicast addresses. * There is no hash function available. To support * additional multicast addresses, the all multicast * filter (LLC group promiscuous mode) must be enabled. * * The firmware reserves two CAM entries for SMT-related * multicast addresses, which leaves 62 entries available. * The following code ensures that we're not being asked * to add more than 62 addresses to the CAM. If we are, * the driver will enable the all multicast filter. * Should the number of multicast addresses drop below * the high water mark, the filter will be disabled and * perfect filtering will be used. */ if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count)) { bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ bp->mc_count = 0; /* Don't add mc addrs to CAM */ } else { bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */ bp->mc_count = dev->mc_count; /* Add mc addrs to CAM */ } /* Copy addresses to multicast address table, then update adapter CAM */ dmi = dev->mc_list; /* point to first multicast addr */ for (i=0; i < bp->mc_count; i++) { memcpy(&bp->mc_table[i*FDDI_K_ALEN], dmi->dmi_addr, FDDI_K_ALEN); dmi = dmi->next; /* point to next multicast addr */ } if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) { DBG_printk("%s: Could not update multicast address table!\n", dev->name); } else { DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count); } } /* Update adapter filters */ if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) { DBG_printk("%s: Could not update adapter filters!\n", dev->name); } else { DBG_printk("%s: Adapter filters updated!\n", dev->name); } } /* * =========================== * = dfx_ctl_set_mac_address = * =========================== * * Overview: * Add node address override (unicast address) to adapter * CAM and update dev_addr field in device table. * * Returns: * None * * Arguments: * dev - pointer to device information * addr - pointer to sockaddr structure containing unicast address to add * * Functional Description: * The adapter supports node address overrides by adding one or more * unicast addresses to the adapter CAM. This is similar to adding * multicast addresses. In this routine we'll update the driver and * device structures with the new address, then update the adapter CAM * to ensure that the adapter will copy and strip frames destined and * sourced by that address. * * Return Codes: * Always returns zero. * * Assumptions: * The address pointed to by addr->sa_data is a valid unicast * address and is presented in canonical (LSB) format. * * Side Effects: * On-board adapter CAM is updated. On-board adapter filters * may be updated. */ static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *p_sockaddr = (struct sockaddr *)addr; DFX_board_t *bp = netdev_priv(dev); /* Copy unicast address to driver-maintained structs and update count */ memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */ memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */ bp->uc_count = 1; /* * Verify we're not exceeding the CAM size by adding unicast address * * Note: It's possible that before entering this routine we've * already filled the CAM with 62 multicast addresses. * Since we need to place the node address override into * the CAM, we have to check to see that we're not * exceeding the CAM size. If we are, we have to enable * the LLC group (multicast) promiscuous mode filter as * in dfx_ctl_set_multicast_list. */ if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE) { bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ bp->mc_count = 0; /* Don't add mc addrs to CAM */ /* Update adapter filters */ if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) { DBG_printk("%s: Could not update adapter filters!\n", dev->name); } else { DBG_printk("%s: Adapter filters updated!\n", dev->name); } } /* Update adapter CAM with new unicast address */ if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) { DBG_printk("%s: Could not set new MAC address!\n", dev->name); } else { DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name); } return(0); /* always return zero */ } /* * ====================== * = dfx_ctl_update_cam = * ====================== * * Overview: * Procedure to update adapter CAM (Content Addressable Memory) * with desired unicast and multicast address entries. * * Returns: * Condition code * * Arguments: * bp - pointer to board information * * Functional Description: * Updates adapter CAM with current contents of board structure * unicast and multicast address tables. Since there are only 62 * free entries in CAM, this routine ensures that the command * request buffer is not overrun. * * Return Codes: * DFX_K_SUCCESS - Request succeeded * DFX_K_FAILURE - Request failed * * Assumptions: * All addresses being added (unicast and multicast) are in canonical * order. * * Side Effects: * On-board adapter CAM is updated. */ static int dfx_ctl_update_cam(DFX_board_t *bp) { int i; /* used as index */ PI_LAN_ADDR *p_addr; /* pointer to CAM entry */ /* * Fill in command request information * * Note: Even though both the unicast and multicast address * table entries are stored as contiguous 6 byte entries, * the firmware address filter set command expects each * entry to be two longwords (8 bytes total). We must be * careful to only copy the six bytes of each unicast and * multicast table entry into each command entry. This * is also why we must first clear the entire command * request buffer. */ memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */ bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET; p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0]; /* Now add unicast addresses to command request buffer, if any */ for (i=0; i < (int)bp->uc_count; i++) { if (i < PI_CMD_ADDR_FILTER_K_SIZE) { memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); p_addr++; /* point to next command entry */ } } /* Now add multicast addresses to command request buffer, if any */ for (i=0; i < (int)bp->mc_count; i++) { if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE) { memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); p_addr++; /* point to next command entry */ } } /* Issue command to update adapter CAM, then return */ if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) return(DFX_K_FAILURE); return(DFX_K_SUCCESS); } /* * ========================== * = dfx_ctl_update_filters = * ========================== * * Overview: * Procedure to update adapter filters with desired * filter settings. * * Returns: * Condition code * * Arguments: * bp - pointer to board information * * Functional Description: * Enables or disables filter using current filter settings. * * Return Codes: * DFX_K_SUCCESS - Request succeeded. * DFX_K_FAILURE - Request failed. * * Assumptions: * We must always pass up packets destined to the broadcast * address (FF-FF-FF-FF-FF-FF), so we'll always keep the * broadcast filter enabled. * * Side Effects: * On-board adapter filters are updated. */ static int dfx_ctl_update_filters(DFX_board_t *bp) { int i = 0; /* used as index */ /* Fill in command request information */ bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET; /* Initialize Broadcast filter - * ALWAYS ENABLED * */ bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST; bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS; /* Initialize LLC Individual/Group Promiscuous filter */ bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM; bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom; /* Initialize LLC Group Promiscuous filter */ bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM; bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom; /* Terminate the item code list */ bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL; /* Issue command to update adapter filters, then return */ if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) return(DFX_K_FAILURE); return(DFX_K_SUCCESS); } /* * ====================== * = dfx_hw_dma_cmd_req = * ====================== * * Overview: * Sends PDQ DMA command to adapter firmware * * Returns: * Condition code * * Arguments: * bp - pointer to board information * * Functional Description: * The command request and response buffers are posted to the adapter in the manner * described in the PDQ Port Specification: * * 1. Command Response Buffer is posted to adapter. * 2. Command Request Buffer is posted to adapter. * 3. Command Request consumer index is polled until it indicates that request * buffer has been DMA'd to adapter. * 4. Command Response consumer index is polled until it indicates that response * buffer has been DMA'd from adapter. * * This ordering ensures that a response buffer is already available for the firmware * to use once it's done processing the request buffer. * * Return Codes: * DFX_K_SUCCESS - DMA command succeeded * DFX_K_OUTSTATE - Adapter is NOT in proper state * DFX_K_HW_TIMEOUT - DMA command timed out * * Assumptions: * Command request buffer has already been filled with desired DMA command. * * Side Effects: * None */ static int dfx_hw_dma_cmd_req(DFX_board_t *bp) { int status; /* adapter status */ int timeout_cnt; /* used in for loops */ /* Make sure the adapter is in a state that we can issue the DMA command in */ status = dfx_hw_adap_state_rd(bp); if ((status == PI_STATE_K_RESET) || (status == PI_STATE_K_HALTED) || (status == PI_STATE_K_DMA_UNAVAIL) || (status == PI_STATE_K_UPGRADE)) return(DFX_K_OUTSTATE); /* Put response buffer on the command response queue */ bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP | ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys; /* Bump (and wrap) the producer index and write out to register */ bp->cmd_rsp_reg.index.prod += 1; bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1; dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); /* Put request buffer on the command request queue */ bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN)); bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys; /* Bump (and wrap) the producer index and write out to register */ bp->cmd_req_reg.index.prod += 1; bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1; dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); /* * Here we wait for the command request consumer index to be equal * to the producer, indicating that the adapter has DMAed the request. */ for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--) { if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req)) break; udelay(100); /* wait for 100 microseconds */ } if (timeout_cnt == 0) return(DFX_K_HW_TIMEOUT); /* Bump (and wrap) the completion index and write out to register */ bp->cmd_req_reg.index.comp += 1; bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1; dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); /* * Here we wait for the command response consumer index to be equal * to the producer, indicating that the adapter has DMAed the response. */ for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--) { if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp)) break; udelay(100); /* wait for 100 microseconds */ } if (timeout_cnt == 0) return(DFX_K_HW_TIMEOUT); /* Bump (and wrap) the completion index and write out to register */ bp->cmd_rsp_reg.index.comp += 1; bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1; dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); return(DFX_K_SUCCESS); } /* * ======================== * = dfx_hw_port_ctrl_req = * ======================== * * Overview: * Sends PDQ port control command to adapter firmware * * Returns: * Host data register value in host_data if ptr is not NULL * * Arguments: * bp - pointer to board information * command - port control command * data_a - port data A register value * data_b - port data B register value * host_data - ptr to host data register value * * Functional Description: * Send generic port control command to adapter by writing * to various PDQ port registers, then polling for completion. * * Return Codes: * DFX_K_SUCCESS - port control command succeeded * DFX_K_HW_TIMEOUT - port control command timed out * * Assumptions: * None * * Side Effects: * None */ static int dfx_hw_port_ctrl_req( DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data ) { PI_UINT32 port_cmd; /* Port Control command register value */ int timeout_cnt; /* used in for loops */ /* Set Command Error bit in command longword */ port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR); /* Issue port command to the adapter */ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a); dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b); dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd); /* Now wait for command to complete */ if (command == PI_PCTRL_M_BLAST_FLASH) timeout_cnt = 600000; /* set command timeout count to 60 seconds */ else timeout_cnt = 20000; /* set command timeout count to 2 seconds */ for (; timeout_cnt > 0; timeout_cnt--) { dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd); if (!(port_cmd & PI_PCTRL_M_CMD_ERROR)) break; udelay(100); /* wait for 100 microseconds */ } if (timeout_cnt == 0) return(DFX_K_HW_TIMEOUT); /* * If the address of host_data is non-zero, assume caller has supplied a * non NULL pointer, and return the contents of the HOST_DATA register in * it. */ if (host_data != NULL) dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data); return(DFX_K_SUCCESS); } /* * ===================== * = dfx_hw_adap_reset = * ===================== * * Overview: * Resets adapter * * Returns: * None * * Arguments: * bp - pointer to board information * type - type of reset to perform * * Functional Description: * Issue soft reset to adapter by writing to PDQ Port Reset * register. Use incoming reset type to tell adapter what * kind of reset operation to perform. * * Return Codes: * None * * Assumptions: * This routine merely issues a soft reset to the adapter. * It is expected that after this routine returns, the caller * will appropriately poll the Port Status register for the * adapter to enter the proper state. * * Side Effects: * Internal adapter registers are cleared. */ static void dfx_hw_adap_reset( DFX_board_t *bp, PI_UINT32 type ) { /* Set Reset type and assert reset */ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET); /* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */ udelay(20); /* Deassert reset */ dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0); } /* * ======================== * = dfx_hw_adap_state_rd = * ======================== * * Overview: * Returns current adapter state * * Returns: * Adapter state per PDQ Port Specification * * Arguments: * bp - pointer to board information * * Functional Description: * Reads PDQ Port Status register and returns adapter state. * * Return Codes: * None * * Assumptions: * None * * Side Effects: * None */ static int dfx_hw_adap_state_rd(DFX_board_t *bp) { PI_UINT32 port_status; /* Port Status register value */ dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); return((port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE); } /* * ===================== * = dfx_hw_dma_uninit = * ===================== * * Overview: * Brings adapter to DMA_UNAVAILABLE state * * Returns: * Condition code * * Arguments: * bp - pointer to board information * type - type of reset to perform * * Functional Description: * Bring adapter to DMA_UNAVAILABLE state by performing the following: * 1. Set reset type bit in Port Data A Register then reset adapter. * 2. Check that adapter is in DMA_UNAVAILABLE state. * * Return Codes: * DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state * DFX_K_HW_TIMEOUT - adapter did not reset properly * * Assumptions: * None * * Side Effects: * Internal adapter registers are cleared. */ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type) { int timeout_cnt; /* used in for loops */ /* Set reset type bit and reset adapter */ dfx_hw_adap_reset(bp, type); /* Now wait for adapter to enter DMA_UNAVAILABLE state */ for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--) { if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL) break; udelay(100); /* wait for 100 microseconds */ } if (timeout_cnt == 0) return(DFX_K_HW_TIMEOUT); return(DFX_K_SUCCESS); } /* * Align an sk_buff to a boundary power of 2 * */ static void my_skb_align(struct sk_buff *skb, int n) { unsigned long x = (unsigned long)skb->data; unsigned long v; v = ALIGN(x, n); /* Where we want to be */ skb_reserve(skb, v - x); } /* * ================ * = dfx_rcv_init = * ================ * * Overview: * Produces buffers to adapter LLC Host receive descriptor block * * Returns: * None * * Arguments: * bp - pointer to board information * get_buffers - non-zero if buffers to be allocated * * Functional Description: * This routine can be called during dfx_adap_init() or during an adapter * reset. It initializes the descriptor block and produces all allocated * LLC Host queue receive buffers. * * Return Codes: * Return 0 on success or -ENOMEM if buffer allocation failed (when using * dynamic buffer allocation). If the buffer allocation failed, the * already allocated buffers will not be released and the caller should do * this. * * Assumptions: * The PDQ has been reset and the adapter and driver maintained Type 2 * register indices are cleared. * * Side Effects: * Receive buffers are posted to the adapter LLC queue and the adapter * is notified. */ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers) { int i, j; /* used in for loop */ /* * Since each receive buffer is a single fragment of same length, initialize * first longword in each receive descriptor for entire LLC Host descriptor * block. Also initialize second longword in each receive descriptor with * physical address of receive buffer. We'll always allocate receive * buffers in powers of 2 so that we can easily fill the 256 entry descriptor * block and produce new receive buffers by simply updating the receive * producer index. * * Assumptions: * To support all shipping versions of PDQ, the receive buffer size * must be mod 128 in length and the physical address must be 128 byte * aligned. In other words, bits 0-6 of the length and address must * be zero for the following descriptor field entries to be correct on * all PDQ-based boards. We guaranteed both requirements during * driver initialization when we allocated memory for the receive buffers. */ if (get_buffers) { #ifdef DYNAMIC_BUFFERS for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) { struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO); if (!newskb) return -ENOMEM; bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); /* * align to 128 bytes for compatibility with * the old EISA boards. */ my_skb_align(newskb, 128); bp->descr_block_virt->rcv_data[i + j].long_1 = (u32)dma_map_single(bp->bus_dev, newskb->data, NEW_SKB_SIZE, DMA_FROM_DEVICE); /* * p_rcv_buff_va is only used inside the * kernel so we put the skb pointer here. */ bp->p_rcv_buff_va[i+j] = (char *) newskb; } #else for (i=0; i < (int)(bp->rcv_bufs_to_post); i++) for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) { bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX)); bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX)); } #endif } /* Update receive producer and Type 2 register */ bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post; dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); return 0; } /* * ========================= * = dfx_rcv_queue_process = * ========================= * * Overview: * Process received LLC frames. * * Returns: * None * * Arguments: * bp - pointer to board information * * Functional Description: * Received LLC frames are processed until there are no more consumed frames. * Once all frames are processed, the receive buffers are returned to the * adapter. Note that this algorithm fixes the length of time that can be spent * in this routine, because there are a fixed number of receive buffers to * process and buffers are not produced until this routine exits and returns * to the ISR. * * Return Codes: * None * * Assumptions: * None * * Side Effects: * None */ static void dfx_rcv_queue_process( DFX_board_t *bp ) { PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */ char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */ u32 descr, pkt_len; /* FMC descriptor field and packet length */ struct sk_buff *skb; /* pointer to a sk_buff to hold incoming packet data */ /* Service all consumed LLC receive frames */ p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons) { /* Process any errors */ int entry; entry = bp->rcv_xmt_reg.index.rcv_comp; #ifdef DYNAMIC_BUFFERS p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data); #else p_buff = (char *) bp->p_rcv_buff_va[entry]; #endif memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32)); if (descr & PI_FMC_DESCR_M_RCC_FLUSH) { if (descr & PI_FMC_DESCR_M_RCC_CRC) bp->rcv_crc_errors++; else bp->rcv_frame_status_errors++; } else { int rx_in_place = 0; /* The frame was received without errors - verify packet length */ pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN); pkt_len -= 4; /* subtract 4 byte CRC */ if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN)) bp->rcv_length_errors++; else{ #ifdef DYNAMIC_BUFFERS if (pkt_len > SKBUFF_RX_COPYBREAK) { struct sk_buff *newskb; newskb = dev_alloc_skb(NEW_SKB_SIZE); if (newskb){ rx_in_place = 1; my_skb_align(newskb, 128); skb = (struct sk_buff *)bp->p_rcv_buff_va[entry]; dma_unmap_single(bp->bus_dev, bp->descr_block_virt->rcv_data[entry].long_1, NEW_SKB_SIZE, DMA_FROM_DEVICE); skb_reserve(skb, RCV_BUFF_K_PADDING); bp->p_rcv_buff_va[entry] = (char *)newskb; bp->descr_block_virt->rcv_data[entry].long_1 = (u32)dma_map_single(bp->bus_dev, newskb->data, NEW_SKB_SIZE, DMA_FROM_DEVICE); } else skb = NULL; } else #endif skb = dev_alloc_skb(pkt_len+3); /* alloc new buffer to pass up, add room for PRH */ if (skb == NULL) { printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name); bp->rcv_discards++; break; } else { #ifndef DYNAMIC_BUFFERS if (! rx_in_place) #endif { /* Receive buffer allocated, pass receive packet up */ skb_copy_to_linear_data(skb, p_buff + RCV_BUFF_K_PADDING, pkt_len + 3); } skb_reserve(skb,3); /* adjust data field so that it points to FC byte */ skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */ skb->protocol = fddi_type_trans(skb, bp->dev); bp->rcv_total_bytes += skb->len; netif_rx(skb); /* Update the rcv counters */ bp->rcv_total_frames++; if (*(p_buff + RCV_BUFF_K_DA) & 0x01) bp->rcv_multicast_frames++; } } } /* * Advance the producer (for recycling) and advance the completion * (for servicing received frames). Note that it is okay to * advance the producer without checking that it passes the * completion index because they are both advanced at the same * rate. */ bp->rcv_xmt_reg.index.rcv_prod += 1; bp->rcv_xmt_reg.index.rcv_comp += 1; } } /* * ===================== * = dfx_xmt_queue_pkt = * ===================== * * Overview: * Queues packets for transmission * * Returns: * Condition code * * Arguments: * skb - pointer to sk_buff to queue for transmission * dev - pointer to device information * * Functional Description: * Here we assume that an incoming skb transmit request * is contained in a single physically contiguous buffer * in which the virtual address of the start of packet * (skb->data) can be converted to a physical address * by using pci_map_single(). * * Since the adapter architecture requires a three byte * packet request header to prepend the start of packet, * we'll write the three byte field immediately prior to * the FC byte. This assumption is valid because we've * ensured that dev->hard_header_len includes three pad * bytes. By posting a single fragment to the adapter, * we'll reduce the number of descriptor fetches and * bus traffic needed to send the request. * * Also, we can't free the skb until after it's been DMA'd * out by the adapter, so we'll queue it in the driver and * return it in dfx_xmt_done. * * Return Codes: * 0 - driver queued packet, link is unavailable, or skbuff was bad * 1 - caller should requeue the sk_buff for later transmission * * Assumptions: * First and foremost, we assume the incoming skb pointer * is NOT NULL and is pointing to a valid sk_buff structure. * * The outgoing packet is complete, starting with the * frame control byte including the last byte of data, * but NOT including the 4 byte CRC. We'll let the * adapter hardware generate and append the CRC. * * The entire packet is stored in one physically * contiguous buffer which is not cached and whose * 32-bit physical address can be determined. * * It's vital that this routine is NOT reentered for the * same board and that the OS is not in another section of * code (eg. dfx_int_common) for the same board on a * different thread. * * Side Effects: * None */ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, struct net_device *dev) { DFX_board_t *bp = netdev_priv(dev); u8 prod; /* local transmit producer index */ PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */ XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ unsigned long flags; netif_stop_queue(dev); /* * Verify that incoming transmit request is OK * * Note: The packet size check is consistent with other * Linux device drivers, although the correct packet * size should be verified before calling the * transmit routine. */ if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN)) { printk("%s: Invalid packet length - %u bytes\n", dev->name, skb->len); bp->xmt_length_errors++; /* bump error counter */ netif_wake_queue(dev); dev_kfree_skb(skb); return NETDEV_TX_OK; /* return "success" */ } /* * See if adapter link is available, if not, free buffer * * Note: If the link isn't available, free buffer and return 0 * rather than tell the upper layer to requeue the packet. * The methodology here is that by the time the link * becomes available, the packet to be sent will be * fairly stale. By simply dropping the packet, the * higher layer protocols will eventually time out * waiting for response packets which it won't receive. */ if (bp->link_available == PI_K_FALSE) { if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */ bp->link_available = PI_K_TRUE; /* if so, set flag and continue */ else { bp->xmt_discards++; /* bump error counter */ dev_kfree_skb(skb); /* free sk_buff now */ netif_wake_queue(dev); return NETDEV_TX_OK; /* return "success" */ } } spin_lock_irqsave(&bp->lock, flags); /* Get the current producer and the next free xmt data descriptor */ prod = bp->rcv_xmt_reg.index.xmt_prod; p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]); /* * Get pointer to auxiliary queue entry to contain information * for this packet. * * Note: The current xmt producer index will become the * current xmt completion index when we complete this * packet later on. So, we'll get the pointer to the * next auxiliary queue entry now before we bump the * producer index. */ p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */ /* Write the three PRH bytes immediately before the FC byte */ skb_push(skb,3); skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */ skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */ skb->data[2] = DFX_PRH2_BYTE; /* specification */ /* * Write the descriptor with buffer info and bump producer * * Note: Since we need to start DMA from the packet request * header, we'll add 3 bytes to the DMA buffer length, * and we'll determine the physical address of the * buffer from the PRH, not skb->data. * * Assumptions: * 1. Packet starts with the frame control (FC) byte * at skb->data. * 2. The 4-byte CRC is not appended to the buffer or * included in the length. * 3. Packet length (skb->len) is from FC to end of * data, inclusive. * 4. The packet length does not exceed the maximum * FDDI LLC frame length of 4491 bytes. * 5. The entire packet is contained in a physically * contiguous, non-cached, locked memory space * comprised of a single buffer pointed to by * skb->data. * 6. The physical address of the start of packet * can be determined from the virtual address * by using pci_map_single() and is only 32-bits * wide. */ p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN)); p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data, skb->len, DMA_TO_DEVICE); /* * Verify that descriptor is actually available * * Note: If descriptor isn't available, return 1 which tells * the upper layer to requeue the packet for later * transmission. * * We need to ensure that the producer never reaches the * completion, except to indicate that the queue is empty. */ if (prod == bp->rcv_xmt_reg.index.xmt_comp) { skb_pull(skb,3); spin_unlock_irqrestore(&bp->lock, flags); return NETDEV_TX_BUSY; /* requeue packet for later */ } /* * Save info for this packet for xmt done indication routine * * Normally, we'd save the producer index in the p_xmt_drv_descr * structure so that we'd have it handy when we complete this * packet later (in dfx_xmt_done). However, since the current * transmit architecture guarantees a single fragment for the * entire packet, we can simply bump the completion index by * one (1) for each completed packet. * * Note: If this assumption changes and we're presented with * an inconsistent number of transmit fragments for packet * data, we'll need to modify this code to save the current * transmit producer index. */ p_xmt_drv_descr->p_skb = skb; /* Update Type 2 register */ bp->rcv_xmt_reg.index.xmt_prod = prod; dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); spin_unlock_irqrestore(&bp->lock, flags); netif_wake_queue(dev); return NETDEV_TX_OK; /* packet queued to adapter */ } /* * ================ * = dfx_xmt_done = * ================ * * Overview: * Processes all frames that have been transmitted. * * Returns: * None * * Arguments: * bp - pointer to board information * * Functional Description: * For all consumed transmit descriptors that have not * yet been completed, we'll free the skb we were holding * onto using dev_kfree_skb and bump the appropriate * counters. * * Return Codes: * None * * Assumptions: * The Type 2 register is not updated in this routine. It is * assumed that it will be updated in the ISR when dfx_xmt_done * returns. * * Side Effects: * None */ static int dfx_xmt_done(DFX_board_t *bp) { XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */ u8 comp; /* local transmit completion index */ int freed = 0; /* buffers freed */ /* Service all consumed transmit frames */ p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons) { /* Get pointer to the transmit driver descriptor block information */ p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); /* Increment transmit counters */ bp->xmt_total_frames++; bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len; /* Return skb to operating system */ comp = bp->rcv_xmt_reg.index.xmt_comp; dma_unmap_single(bp->bus_dev, bp->descr_block_virt->xmt_data[comp].long_1, p_xmt_drv_descr->p_skb->len, DMA_TO_DEVICE); dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); /* * Move to start of next packet by updating completion index * * Here we assume that a transmit packet request is always * serviced by posting one fragment. We can therefore * simplify the completion code by incrementing the * completion index by one. This code will need to be * modified if this assumption changes. See comments * in dfx_xmt_queue_pkt for more details. */ bp->rcv_xmt_reg.index.xmt_comp += 1; freed++; } return freed; } /* * ================= * = dfx_rcv_flush = * ================= * * Overview: * Remove all skb's in the receive ring. * * Returns: * None * * Arguments: * bp - pointer to board information * * Functional Description: * Free's all the dynamically allocated skb's that are * currently attached to the device receive ring. This * function is typically only used when the device is * initialized or reinitialized. * * Return Codes: * None * * Side Effects: * None */ #ifdef DYNAMIC_BUFFERS static void dfx_rcv_flush( DFX_board_t *bp ) { int i, j; for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) { struct sk_buff *skb; skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j]; if (skb) dev_kfree_skb(skb); bp->p_rcv_buff_va[i+j] = NULL; } } #else static inline void dfx_rcv_flush( DFX_board_t *bp ) { } #endif /* DYNAMIC_BUFFERS */ /* * ================= * = dfx_xmt_flush = * ================= * * Overview: * Processes all frames whether they've been transmitted * or not. * * Returns: * None * * Arguments: * bp - pointer to board information * * Functional Description: * For all produced transmit descriptors that have not * yet been completed, we'll free the skb we were holding * onto using dev_kfree_skb and bump the appropriate * counters. Of course, it's possible that some of * these transmit requests actually did go out, but we * won't make that distinction here. Finally, we'll * update the consumer index to match the producer. * * Return Codes: * None * * Assumptions: * This routine does NOT update the Type 2 register. It * is assumed that this routine is being called during a * transmit flush interrupt, or a shutdown or close routine. * * Side Effects: * None */ static void dfx_xmt_flush( DFX_board_t *bp ) { u32 prod_cons; /* rcv/xmt consumer block longword */ XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ u8 comp; /* local transmit completion index */ /* Flush all outstanding transmit frames */ while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod) { /* Get pointer to the transmit driver descriptor block information */ p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); /* Return skb to operating system */ comp = bp->rcv_xmt_reg.index.xmt_comp; dma_unmap_single(bp->bus_dev, bp->descr_block_virt->xmt_data[comp].long_1, p_xmt_drv_descr->p_skb->len, DMA_TO_DEVICE); dev_kfree_skb(p_xmt_drv_descr->p_skb); /* Increment transmit error counter */ bp->xmt_discards++; /* * Move to start of next packet by updating completion index * * Here we assume that a transmit packet request is always * serviced by posting one fragment. We can therefore * simplify the completion code by incrementing the * completion index by one. This code will need to be * modified if this assumption changes. See comments * in dfx_xmt_queue_pkt for more details. */ bp->rcv_xmt_reg.index.xmt_comp += 1; } /* Update the transmit consumer index in the consumer block */ prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX); prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX); bp->cons_block_virt->xmt_rcv_data = prod_cons; } /* * ================== * = dfx_unregister = * ================== * * Overview: * Shuts down an FDDI controller * * Returns: * Condition code * * Arguments: * bdev - pointer to device information * * Functional Description: * * Return Codes: * None * * Assumptions: * It compiles so it should work :-( (PCI cards do :-) * * Side Effects: * Device structures for FDDI adapters (fddi0, fddi1, etc) are * freed. */ static void __devexit dfx_unregister(struct device *bdev) { struct net_device *dev = dev_get_drvdata(bdev); DFX_board_t *bp = netdev_priv(dev); int dfx_bus_pci = DFX_BUS_PCI(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; resource_size_t bar_start = 0; /* pointer to port */ resource_size_t bar_len = 0; /* resource length */ int alloc_size; /* total buffer size used */ unregister_netdev(dev); alloc_size = sizeof(PI_DESCR_BLOCK) + PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + #ifndef DYNAMIC_BUFFERS (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + #endif sizeof(PI_CONSUMER_BLOCK) + (PI_ALIGN_K_DESC_BLK - 1); if (bp->kmalloced) dma_free_coherent(bdev, alloc_size, bp->kmalloced, bp->kmalloced_dma); dfx_bus_uninit(dev); dfx_get_bars(bdev, &bar_start, &bar_len); if (dfx_use_mmio) { iounmap(bp->base.mem); release_mem_region(bar_start, bar_len); } else release_region(bar_start, bar_len); if (dfx_bus_pci) pci_disable_device(to_pci_dev(bdev)); free_netdev(dev); } static int __devinit __maybe_unused dfx_dev_register(struct device *); static int __devexit __maybe_unused dfx_dev_unregister(struct device *); #ifdef CONFIG_PCI static int __devinit dfx_pci_register(struct pci_dev *, const struct pci_device_id *); static void __devexit dfx_pci_unregister(struct pci_dev *); static struct pci_device_id dfx_pci_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) }, { } }; MODULE_DEVICE_TABLE(pci, dfx_pci_table); static struct pci_driver dfx_pci_driver = { .name = "defxx", .id_table = dfx_pci_table, .probe = dfx_pci_register, .remove = __devexit_p(dfx_pci_unregister), }; static __devinit int dfx_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent) { return dfx_register(&pdev->dev); } static void __devexit dfx_pci_unregister(struct pci_dev *pdev) { dfx_unregister(&pdev->dev); } #endif /* CONFIG_PCI */ #ifdef CONFIG_EISA static struct eisa_device_id dfx_eisa_table[] = { { "DEC3001", DEFEA_PROD_ID_1 }, { "DEC3002", DEFEA_PROD_ID_2 }, { "DEC3003", DEFEA_PROD_ID_3 }, { "DEC3004", DEFEA_PROD_ID_4 }, { } }; MODULE_DEVICE_TABLE(eisa, dfx_eisa_table); static struct eisa_driver dfx_eisa_driver = { .id_table = dfx_eisa_table, .driver = { .name = "defxx", .bus = &eisa_bus_type, .probe = dfx_dev_register, .remove = __devexit_p(dfx_dev_unregister), }, }; #endif /* CONFIG_EISA */ #ifdef CONFIG_TC static struct tc_device_id const dfx_tc_table[] = { { "DEC ", "PMAF-FA " }, { "DEC ", "PMAF-FD " }, { "DEC ", "PMAF-FS " }, { "DEC ", "PMAF-FU " }, { } }; MODULE_DEVICE_TABLE(tc, dfx_tc_table); static struct tc_driver dfx_tc_driver = { .id_table = dfx_tc_table, .driver = { .name = "defxx", .bus = &tc_bus_type, .probe = dfx_dev_register, .remove = __devexit_p(dfx_dev_unregister), }, }; #endif /* CONFIG_TC */ static int __devinit __maybe_unused dfx_dev_register(struct device *dev) { int status; status = dfx_register(dev); if (!status) get_device(dev); return status; } static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev) { put_device(dev); dfx_unregister(dev); return 0; } static int __devinit dfx_init(void) { int status; status = pci_register_driver(&dfx_pci_driver); if (!status) status = eisa_driver_register(&dfx_eisa_driver); if (!status) status = tc_register_driver(&dfx_tc_driver); return status; } static void __devexit dfx_cleanup(void) { tc_unregister_driver(&dfx_tc_driver); eisa_driver_unregister(&dfx_eisa_driver); pci_unregister_driver(&dfx_pci_driver); } module_init(dfx_init); module_exit(dfx_cleanup); MODULE_AUTHOR("Lawrence V. Stefani"); MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver " DRV_VERSION " " DRV_RELDATE); MODULE_LICENSE("GPL");
gpl-2.0
jznomoney/htc-sense-froyo-kernel
drivers/parport/parport_cs.c
557
8491
/*====================================================================== A driver for PCMCIA parallel port adapters (specifically, for the Quatech SPP-100 EPP card: other cards will probably require driver tweaks) parport_cs.c 1.29 2002/10/11 06:57:41 The contents of this file are subject to the Mozilla Public License Version 1.1 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.mozilla.org/MPL/ Software distributed under the License is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for the specific language governing rights and limitations under the License. The initial developer of the original code is David A. Hinds <dahinds@users.sourceforge.net>. Portions created by David A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights Reserved. Alternatively, the contents of this file may be used under the terms of the GNU General Public License version 2 (the "GPL"), in which case the provisions of the GPL are applicable instead of the above. If you wish to allow the use of your version of this file only under the terms of the GPL and not to allow others to use your version of this file under the MPL, indicate your decision by deleting the provisions above and replace them with the notice and other provisions required by the GPL. If you do not delete the provisions above, a recipient may use your version of this file under either the MPL or the GPL. ======================================================================*/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/interrupt.h> #include <linux/parport.h> #include <linux/parport_pc.h> #include <pcmcia/cs_types.h> #include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> /*====================================================================*/ /* Module parameters */ MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); MODULE_DESCRIPTION("PCMCIA parallel port card driver"); MODULE_LICENSE("Dual MPL/GPL"); #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) INT_MODULE_PARM(epp_mode, 1); #ifdef PCMCIA_DEBUG INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); #define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) static char *version = "parport_cs.c 1.29 2002/10/11 06:57:41 (David Hinds)"; #else #define DEBUG(n, args...) #endif /*====================================================================*/ #define FORCE_EPP_MODE 0x08 typedef struct parport_info_t { struct pcmcia_device *p_dev; int ndev; dev_node_t node; struct parport *port; } parport_info_t; static void parport_detach(struct pcmcia_device *p_dev); static int parport_config(struct pcmcia_device *link); static void parport_cs_release(struct pcmcia_device *); /*====================================================================== parport_attach() creates an "instance" of the driver, allocating local data structures for one device. The device is registered with Card Services. ======================================================================*/ static int parport_probe(struct pcmcia_device *link) { parport_info_t *info; DEBUG(0, "parport_attach()\n"); /* Create new parport device */ info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; link->priv = info; info->p_dev = link; link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; link->irq.IRQInfo1 = IRQ_LEVEL_ID; link->conf.Attributes = CONF_ENABLE_IRQ; link->conf.IntType = INT_MEMORY_AND_IO; return parport_config(link); } /* parport_attach */ /*====================================================================== This deletes a driver "instance". The device is de-registered with Card Services. If it has been released, all local data structures are freed. Otherwise, the structures will be freed when the device is released. ======================================================================*/ static void parport_detach(struct pcmcia_device *link) { DEBUG(0, "parport_detach(0x%p)\n", link); parport_cs_release(link); kfree(link->priv); } /* parport_detach */ /*====================================================================== parport_config() is scheduled to run after a CARD_INSERTION event is received, to configure the PCMCIA socket, and to make the parport device available to the system. ======================================================================*/ #define CS_CHECK(fn, ret) \ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) static int parport_config_check(struct pcmcia_device *p_dev, cistpl_cftable_entry_t *cfg, cistpl_cftable_entry_t *dflt, unsigned int vcc, void *priv_data) { if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; if (epp_mode) p_dev->conf.ConfigIndex |= FORCE_EPP_MODE; p_dev->io.BasePort1 = io->win[0].base; p_dev->io.NumPorts1 = io->win[0].len; p_dev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK; if (io->nwin == 2) { p_dev->io.BasePort2 = io->win[1].base; p_dev->io.NumPorts2 = io->win[1].len; } if (pcmcia_request_io(p_dev, &p_dev->io) != 0) return -ENODEV; return 0; } return -ENODEV; } static int parport_config(struct pcmcia_device *link) { parport_info_t *info = link->priv; struct parport *p; int last_ret, last_fn; DEBUG(0, "parport_config(0x%p)\n", link); last_ret = pcmcia_loop_config(link, parport_config_check, NULL); if (last_ret) { cs_error(link, RequestIO, last_ret); goto failed; } CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq)); CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf)); p = parport_pc_probe_port(link->io.BasePort1, link->io.BasePort2, link->irq.AssignedIRQ, PARPORT_DMA_NONE, &link->dev, IRQF_SHARED); if (p == NULL) { printk(KERN_NOTICE "parport_cs: parport_pc_probe_port() at " "0x%3x, irq %u failed\n", link->io.BasePort1, link->irq.AssignedIRQ); goto failed; } p->modes |= PARPORT_MODE_PCSPP; if (epp_mode) p->modes |= PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP; info->ndev = 1; info->node.major = LP_MAJOR; info->node.minor = p->number; info->port = p; strcpy(info->node.dev_name, p->name); link->dev_node = &info->node; return 0; cs_failed: cs_error(link, last_fn, last_ret); failed: parport_cs_release(link); return -ENODEV; } /* parport_config */ /*====================================================================== After a card is removed, parport_cs_release() will unregister the device, and release the PCMCIA configuration. If the device is still open, this will be postponed until it is closed. ======================================================================*/ static void parport_cs_release(struct pcmcia_device *link) { parport_info_t *info = link->priv; DEBUG(0, "parport_release(0x%p)\n", link); if (info->ndev) { struct parport *p = info->port; parport_pc_unregister_port(p); } info->ndev = 0; pcmcia_disable_device(link); } /* parport_cs_release */ static struct pcmcia_device_id parport_ids[] = { PCMCIA_DEVICE_FUNC_ID(3), PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc), PCMCIA_DEVICE_MANF_CARD(0x0137, 0x0003), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, parport_ids); static struct pcmcia_driver parport_cs_driver = { .owner = THIS_MODULE, .drv = { .name = "parport_cs", }, .probe = parport_probe, .remove = parport_detach, .id_table = parport_ids, }; static int __init init_parport_cs(void) { return pcmcia_register_driver(&parport_cs_driver); } static void __exit exit_parport_cs(void) { pcmcia_unregister_driver(&parport_cs_driver); } module_init(init_parport_cs); module_exit(exit_parport_cs);
gpl-2.0
vitek999/android_kernel_oukitel_orange
drivers/misc/lattice-ecp3-config.c
2093
5888
/* * Copyright (C) 2012 Stefan Roese <sr@denx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/device.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/spi/spi.h> #include <linux/platform_device.h> #include <linux/delay.h> #define FIRMWARE_NAME "lattice-ecp3.bit" /* * The JTAG ID's of the supported FPGA's. The ID is 32bit wide * reversed as noted in the manual. */ #define ID_ECP3_17 0xc2088080 #define ID_ECP3_35 0xc2048080 /* FPGA commands */ #define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */ #define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */ #define FPGA_CMD_CLEAR 0x70 #define FPGA_CMD_REFRESH 0x71 #define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */ #define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */ #define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */ /* * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf * (LatticeECP3 Slave SPI Port User's Guide) */ #define FPGA_STATUS_DONE 0x00004000 #define FPGA_STATUS_CLEARED 0x00010000 #define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */ #define FPGA_CLEAR_MSLEEP 10 #define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP) struct fpga_data { struct completion fw_loaded; }; struct ecp3_dev { u32 jedec_id; char *name; }; static const struct ecp3_dev ecp3_dev[] = { { .jedec_id = ID_ECP3_17, .name = "Lattice ECP3-17", }, { .jedec_id = ID_ECP3_35, .name = "Lattice ECP3-35", }, }; static void firmware_load(const struct firmware *fw, void *context) { struct spi_device *spi = (struct spi_device *)context; struct fpga_data *data = spi_get_drvdata(spi); u8 *buffer; int ret; u8 txbuf[8]; u8 rxbuf[8]; int rx_len = 8; int i; u32 jedec_id; u32 status; if (fw->size == 0) { dev_err(&spi->dev, "Error: Firmware size is 0!\n"); return; } /* Fill dummy data (24 stuffing bits for commands) */ txbuf[1] = 0x00; txbuf[2] = 0x00; txbuf[3] = 0x00; /* Trying to speak with the FPGA via SPI... */ txbuf[0] = FPGA_CMD_READ_ID; ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]); jedec_id = *(u32 *)&rxbuf[4]; for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) { if (jedec_id == ecp3_dev[i].jedec_id) break; } if (i == ARRAY_SIZE(ecp3_dev)) { dev_err(&spi->dev, "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n", jedec_id); return; } dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name); txbuf[0] = FPGA_CMD_READ_STATUS; ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]); buffer = kzalloc(fw->size + 8, GFP_KERNEL); if (!buffer) { dev_err(&spi->dev, "Error: Can't allocate memory!\n"); return; } /* * Insert WRITE_INC command into stream (one SPI frame) */ buffer[0] = FPGA_CMD_WRITE_INC; buffer[1] = 0xff; buffer[2] = 0xff; buffer[3] = 0xff; memcpy(buffer + 4, fw->data, fw->size); txbuf[0] = FPGA_CMD_REFRESH; ret = spi_write(spi, txbuf, 4); txbuf[0] = FPGA_CMD_WRITE_EN; ret = spi_write(spi, txbuf, 4); txbuf[0] = FPGA_CMD_CLEAR; ret = spi_write(spi, txbuf, 4); /* * Wait for FPGA memory to become cleared */ for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) { txbuf[0] = FPGA_CMD_READ_STATUS; ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); status = *(u32 *)&rxbuf[4]; if (status == FPGA_STATUS_CLEARED) break; msleep(FPGA_CLEAR_MSLEEP); } if (i == FPGA_CLEAR_LOOP_COUNT) { dev_err(&spi->dev, "Error: Timeout waiting for FPGA to clear (status=%08x)!\n", status); kfree(buffer); return; } dev_info(&spi->dev, "Configuring the FPGA...\n"); ret = spi_write(spi, buffer, fw->size + 8); txbuf[0] = FPGA_CMD_WRITE_DIS; ret = spi_write(spi, txbuf, 4); txbuf[0] = FPGA_CMD_READ_STATUS; ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]); status = *(u32 *)&rxbuf[4]; /* Check result */ if (status & FPGA_STATUS_DONE) dev_info(&spi->dev, "FPGA succesfully configured!\n"); else dev_info(&spi->dev, "FPGA not configured (DONE not set)\n"); /* * Don't forget to release the firmware again */ release_firmware(fw); kfree(buffer); complete(&data->fw_loaded); } static int lattice_ecp3_probe(struct spi_device *spi) { struct fpga_data *data; int err; data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL); if (!data) { dev_err(&spi->dev, "Memory allocation for fpga_data failed\n"); return -ENOMEM; } spi_set_drvdata(spi, data); init_completion(&data->fw_loaded); err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, FIRMWARE_NAME, &spi->dev, GFP_KERNEL, spi, firmware_load); if (err) { dev_err(&spi->dev, "Firmware loading failed with %d!\n", err); return err; } dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n"); return 0; } static int lattice_ecp3_remove(struct spi_device *spi) { struct fpga_data *data = spi_get_drvdata(spi); wait_for_completion(&data->fw_loaded); return 0; } static const struct spi_device_id lattice_ecp3_id[] = { { "ecp3-17", 0 }, { "ecp3-35", 0 }, { } }; MODULE_DEVICE_TABLE(spi, lattice_ecp3_id); static struct spi_driver lattice_ecp3_driver = { .driver = { .name = "lattice-ecp3", .owner = THIS_MODULE, }, .probe = lattice_ecp3_probe, .remove = lattice_ecp3_remove, .id_table = lattice_ecp3_id, }; module_spi_driver(lattice_ecp3_driver); MODULE_AUTHOR("Stefan Roese <sr@denx.de>"); MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI"); MODULE_LICENSE("GPL");
gpl-2.0
redglasses/linux-yocto-3.10
arch/m68k/platform/coldfire/m523x.c
2605
2291
/***************************************************************************/ /* * linux/arch/m68knommu/platform/523x/config.c * * Sub-architcture dependent initialization code for the Freescale * 523x CPUs. * * Copyright (C) 1999-2005, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2001-2003, SnapGear Inc. (www.snapgear.com) */ /***************************************************************************/ #include <linux/kernel.h> #include <linux/param.h> #include <linux/init.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfclk.h> /***************************************************************************/ DEFINE_CLK(pll, "pll.0", MCF_CLK); DEFINE_CLK(sys, "sys.0", MCF_BUSCLK); DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK); DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK); DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK); DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK); DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK); DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK); DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK); DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK); struct clk *mcf_clks[] = { &clk_pll, &clk_sys, &clk_mcfpit0, &clk_mcfpit1, &clk_mcfpit2, &clk_mcfpit3, &clk_mcfuart0, &clk_mcfuart1, &clk_mcfuart2, &clk_fec0, NULL }; /***************************************************************************/ #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m523x_qspi_init(void) { u16 par; /* setup QSPS pins for QSPI with gpio CS control */ writeb(0x1f, MCFGPIO_PAR_QSPI); /* and CS2 & CS3 as gpio */ par = readw(MCFGPIO_PAR_TIMER); par &= 0x3f3f; writew(par, MCFGPIO_PAR_TIMER); } #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ static void __init m523x_fec_init(void) { /* Set multi-function pins to ethernet use */ writeb(readb(MCFGPIO_PAR_FECI2C) | 0xf0, MCFGPIO_PAR_FECI2C); } /***************************************************************************/ void __init config_BSP(char *commandp, int size) { mach_sched_init = hw_timer_init; m523x_fec_init(); #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m523x_qspi_init(); #endif } /***************************************************************************/
gpl-2.0
Zzomborg/Laearning
arch/mips/lantiq/xway/prom-xway.c
2605
1148
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Copyright (C) 2010 John Crispin <blogic@openwrt.org> */ #include <linux/module.h> #include <linux/clk.h> #include <asm/bootinfo.h> #include <asm/time.h> #include <lantiq_soc.h> #include "../prom.h" #define SOC_DANUBE "Danube" #define SOC_TWINPASS "Twinpass" #define SOC_AR9 "AR9" #define PART_SHIFT 12 #define PART_MASK 0x0FFFFFFF #define REV_SHIFT 28 #define REV_MASK 0xF0000000 void __init ltq_soc_detect(struct ltq_soc_info *i) { i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT; i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT; switch (i->partnum) { case SOC_ID_DANUBE1: case SOC_ID_DANUBE2: i->name = SOC_DANUBE; i->type = SOC_TYPE_DANUBE; break; case SOC_ID_TWINPASS: i->name = SOC_TWINPASS; i->type = SOC_TYPE_DANUBE; break; case SOC_ID_ARX188: case SOC_ID_ARX168: case SOC_ID_ARX182: i->name = SOC_AR9; i->type = SOC_TYPE_AR9; break; default: unreachable(); break; } }
gpl-2.0
Flyhalf205/android_kernel_htc_t6
drivers/mtd/maps/l440gx.c
9773
4077
/* * BIOS Flash chip on Intel 440GX board. * * Bugs this currently does not work under linuxBIOS. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #define PIIXE_IOBASE_RESOURCE 11 #define WINDOW_ADDR 0xfff00000 #define WINDOW_SIZE 0x00100000 #define BUSWIDTH 1 static u32 iobase; #define IOBASE iobase #define TRIBUF_PORT (IOBASE+0x37) #define VPP_PORT (IOBASE+0x28) static struct mtd_info *mymtd; /* Is this really the vpp port? */ static DEFINE_SPINLOCK(l440gx_vpp_lock); static int l440gx_vpp_refcnt; static void l440gx_set_vpp(struct map_info *map, int vpp) { unsigned long flags; spin_lock_irqsave(&l440gx_vpp_lock, flags); if (vpp) { if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */ outl(inl(VPP_PORT) | 1, VPP_PORT); } else { if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */ outl(inl(VPP_PORT) & ~1, VPP_PORT); } spin_unlock_irqrestore(&l440gx_vpp_lock, flags); } static struct map_info l440gx_map = { .name = "L440GX BIOS", .size = WINDOW_SIZE, .bankwidth = BUSWIDTH, .phys = WINDOW_ADDR, #if 0 /* FIXME verify that this is the * appripriate code for vpp enable/disable */ .set_vpp = l440gx_set_vpp #endif }; static int __init init_l440gx(void) { struct pci_dev *dev, *pm_dev; struct resource *pm_iobase; __u16 word; dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, NULL); pm_dev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); pci_dev_put(dev); if (!dev || !pm_dev) { printk(KERN_NOTICE "L440GX flash mapping: failed to find PIIX4 ISA bridge, cannot continue\n"); pci_dev_put(pm_dev); return -ENODEV; } l440gx_map.virt = ioremap_nocache(WINDOW_ADDR, WINDOW_SIZE); if (!l440gx_map.virt) { printk(KERN_WARNING "Failed to ioremap L440GX flash region\n"); pci_dev_put(pm_dev); return -ENOMEM; } simple_map_init(&l440gx_map); printk(KERN_NOTICE "window_addr = 0x%08lx\n", (unsigned long)l440gx_map.virt); /* Setup the pm iobase resource * This code should move into some kind of generic bridge * driver but for the moment I'm content with getting the * allocation correct. */ pm_iobase = &pm_dev->resource[PIIXE_IOBASE_RESOURCE]; if (!(pm_iobase->flags & IORESOURCE_IO)) { pm_iobase->name = "pm iobase"; pm_iobase->start = 0; pm_iobase->end = 63; pm_iobase->flags = IORESOURCE_IO; /* Put the current value in the resource */ pci_read_config_dword(pm_dev, 0x40, &iobase); iobase &= ~1; pm_iobase->start += iobase & ~1; pm_iobase->end += iobase & ~1; pci_dev_put(pm_dev); /* Allocate the resource region */ if (pci_assign_resource(pm_dev, PIIXE_IOBASE_RESOURCE) != 0) { pci_dev_put(dev); pci_dev_put(pm_dev); printk(KERN_WARNING "Could not allocate pm iobase resource\n"); iounmap(l440gx_map.virt); return -ENXIO; } } /* Set the iobase */ iobase = pm_iobase->start; pci_write_config_dword(pm_dev, 0x40, iobase | 1); /* Set XBCS# */ pci_read_config_word(dev, 0x4e, &word); word |= 0x4; pci_write_config_word(dev, 0x4e, word); /* Supply write voltage to the chip */ l440gx_set_vpp(&l440gx_map, 1); /* Enable the gate on the WE line */ outb(inb(TRIBUF_PORT) & ~1, TRIBUF_PORT); printk(KERN_NOTICE "Enabled WE line to L440GX BIOS flash chip.\n"); mymtd = do_map_probe("jedec_probe", &l440gx_map); if (!mymtd) { printk(KERN_NOTICE "JEDEC probe on BIOS chip failed. Using ROM\n"); mymtd = do_map_probe("map_rom", &l440gx_map); } if (mymtd) { mymtd->owner = THIS_MODULE; mtd_device_register(mymtd, NULL, 0); return 0; } iounmap(l440gx_map.virt); return -ENXIO; } static void __exit cleanup_l440gx(void) { mtd_device_unregister(mymtd); map_destroy(mymtd); iounmap(l440gx_map.virt); } module_init(init_l440gx); module_exit(cleanup_l440gx); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("MTD map driver for BIOS chips on Intel L440GX motherboards");
gpl-2.0
GlitchKernel/Glitch
drivers/media/video/sn9c102/sn9c102_hv7131r.c
12845
10697
/*************************************************************************** * Plug-in for HV7131R image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int hv7131r_init(struct sn9c102_device* cam) { int err = 0; switch (sn9c102_get_bridge(cam)) { case BRIDGE_SN9C103: err = sn9c102_write_const_regs(cam, {0x00, 0x03}, {0x1a, 0x04}, {0x20, 0x05}, {0x20, 0x06}, {0x03, 0x10}, {0x00, 0x14}, {0x60, 0x17}, {0x0a, 0x18}, {0xf0, 0x19}, {0x1d, 0x1a}, {0x10, 0x1b}, {0x02, 0x1c}, {0x03, 0x1d}, {0x0f, 0x1e}, {0x0c, 0x1f}, {0x00, 0x20}, {0x10, 0x21}, {0x20, 0x22}, {0x30, 0x23}, {0x40, 0x24}, {0x50, 0x25}, {0x60, 0x26}, {0x70, 0x27}, {0x80, 0x28}, {0x90, 0x29}, {0xa0, 0x2a}, {0xb0, 0x2b}, {0xc0, 0x2c}, {0xd0, 0x2d}, {0xe0, 0x2e}, {0xf0, 0x2f}, {0xff, 0x30}); break; case BRIDGE_SN9C105: case BRIDGE_SN9C120: err = sn9c102_write_const_regs(cam, {0x44, 0x01}, {0x40, 0x02}, {0x00, 0x03}, {0x1a, 0x04}, {0x44, 0x05}, {0x3e, 0x06}, {0x1a, 0x07}, {0x03, 0x10}, {0x08, 0x14}, {0xa3, 0x17}, {0x4b, 0x18}, {0x00, 0x19}, {0x1d, 0x1a}, {0x10, 0x1b}, {0x02, 0x1c}, {0x03, 0x1d}, {0x0f, 0x1e}, {0x0c, 0x1f}, {0x00, 0x20}, {0x29, 0x21}, {0x40, 0x22}, {0x54, 0x23}, {0x66, 0x24}, {0x76, 0x25}, {0x85, 0x26}, {0x94, 0x27}, {0xa1, 0x28}, {0xae, 0x29}, {0xbb, 0x2a}, {0xc7, 0x2b}, {0xd3, 0x2c}, {0xde, 0x2d}, {0xea, 0x2e}, {0xf4, 0x2f}, {0xff, 0x30}, {0x00, 0x3F}, {0xC7, 0x40}, {0x01, 0x41}, {0x44, 0x42}, {0x00, 0x43}, {0x44, 0x44}, {0x00, 0x45}, {0x44, 0x46}, {0x00, 0x47}, {0xC7, 0x48}, {0x01, 0x49}, {0xC7, 0x4A}, {0x01, 0x4B}, {0xC7, 0x4C}, {0x01, 0x4D}, {0x44, 0x4E}, {0x00, 0x4F}, {0x44, 0x50}, {0x00, 0x51}, {0x44, 0x52}, {0x00, 0x53}, {0xC7, 0x54}, {0x01, 0x55}, {0xC7, 0x56}, {0x01, 0x57}, {0xC7, 0x58}, {0x01, 0x59}, {0x44, 0x5A}, {0x00, 0x5B}, {0x44, 0x5C}, {0x00, 0x5D}, {0x44, 0x5E}, {0x00, 0x5F}, {0xC7, 0x60}, {0x01, 0x61}, {0xC7, 0x62}, {0x01, 0x63}, {0xC7, 0x64}, {0x01, 0x65}, {0x44, 0x66}, {0x00, 0x67}, {0x44, 0x68}, {0x00, 0x69}, {0x44, 0x6A}, {0x00, 0x6B}, {0xC7, 0x6C}, {0x01, 0x6D}, {0xC7, 0x6E}, {0x01, 0x6F}, {0xC7, 0x70}, {0x01, 0x71}, {0x44, 0x72}, {0x00, 0x73}, {0x44, 0x74}, {0x00, 0x75}, {0x44, 0x76}, {0x00, 0x77}, {0xC7, 0x78}, {0x01, 0x79}, {0xC7, 0x7A}, {0x01, 0x7B}, {0xC7, 0x7C}, {0x01, 0x7D}, {0x44, 0x7E}, {0x00, 0x7F}, {0x14, 0x84}, {0x00, 0x85}, {0x27, 0x86}, {0x00, 0x87}, {0x07, 0x88}, {0x00, 0x89}, {0xEC, 0x8A}, {0x0f, 0x8B}, {0xD8, 0x8C}, {0x0f, 0x8D}, {0x3D, 0x8E}, {0x00, 0x8F}, {0x3D, 0x90}, {0x00, 0x91}, {0xCD, 0x92}, {0x0f, 0x93}, {0xf7, 0x94}, {0x0f, 0x95}, {0x0C, 0x96}, {0x00, 0x97}, {0x00, 0x98}, {0x66, 0x99}, {0x05, 0x9A}, {0x00, 0x9B}, {0x04, 0x9C}, {0x00, 0x9D}, {0x08, 0x9E}, {0x00, 0x9F}, {0x2D, 0xC0}, {0x2D, 0xC1}, {0x3A, 0xC2}, {0x05, 0xC3}, {0x04, 0xC4}, {0x3F, 0xC5}, {0x00, 0xC6}, {0x00, 0xC7}, {0x50, 0xC8}, {0x3C, 0xC9}, {0x28, 0xCA}, {0xD8, 0xCB}, {0x14, 0xCC}, {0xEC, 0xCD}, {0x32, 0xCE}, {0xDD, 0xCF}, {0x32, 0xD0}, {0xDD, 0xD1}, {0x6A, 0xD2}, {0x50, 0xD3}, {0x00, 0xD4}, {0x00, 0xD5}, {0x00, 0xD6}); break; default: break; } err += sn9c102_i2c_write(cam, 0x20, 0x00); err += sn9c102_i2c_write(cam, 0x21, 0xd6); err += sn9c102_i2c_write(cam, 0x25, 0x06); return err; } static int hv7131r_get_ctrl(struct sn9c102_device* cam, struct v4l2_control* ctrl) { switch (ctrl->id) { case V4L2_CID_GAIN: if ((ctrl->value = sn9c102_i2c_read(cam, 0x30)) < 0) return -EIO; return 0; case V4L2_CID_RED_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x31)) < 0) return -EIO; ctrl->value = ctrl->value & 0x3f; return 0; case V4L2_CID_BLUE_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x33)) < 0) return -EIO; ctrl->value = ctrl->value & 0x3f; return 0; case SN9C102_V4L2_CID_GREEN_BALANCE: if ((ctrl->value = sn9c102_i2c_read(cam, 0x32)) < 0) return -EIO; ctrl->value = ctrl->value & 0x3f; return 0; case V4L2_CID_BLACK_LEVEL: if ((ctrl->value = sn9c102_i2c_read(cam, 0x01)) < 0) return -EIO; ctrl->value = (ctrl->value & 0x08) ? 1 : 0; return 0; default: return -EINVAL; } } static int hv7131r_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { int err = 0; switch (ctrl->id) { case V4L2_CID_GAIN: err += sn9c102_i2c_write(cam, 0x30, ctrl->value); break; case V4L2_CID_RED_BALANCE: err += sn9c102_i2c_write(cam, 0x31, ctrl->value); break; case V4L2_CID_BLUE_BALANCE: err += sn9c102_i2c_write(cam, 0x33, ctrl->value); break; case SN9C102_V4L2_CID_GREEN_BALANCE: err += sn9c102_i2c_write(cam, 0x32, ctrl->value); break; case V4L2_CID_BLACK_LEVEL: { int r = sn9c102_i2c_read(cam, 0x01); if (r < 0) return -EIO; err += sn9c102_i2c_write(cam, 0x01, (ctrl->value<<3) | (r&0xf7)); } break; default: return -EINVAL; } return err ? -EIO : 0; } static int hv7131r_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 1, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 1; err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); return err; } static int hv7131r_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { int err = 0; switch (sn9c102_get_bridge(cam)) { case BRIDGE_SN9C103: if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) { err += sn9c102_write_reg(cam, 0xa0, 0x19); err += sn9c102_i2c_write(cam, 0x01, 0x04); } else { err += sn9c102_write_reg(cam, 0x30, 0x19); err += sn9c102_i2c_write(cam, 0x01, 0x04); } break; case BRIDGE_SN9C105: case BRIDGE_SN9C120: if (pix->pixelformat == V4L2_PIX_FMT_SBGGR8) { err += sn9c102_write_reg(cam, 0xa5, 0x17); err += sn9c102_i2c_write(cam, 0x01, 0x24); } else { err += sn9c102_write_reg(cam, 0xa3, 0x17); err += sn9c102_i2c_write(cam, 0x01, 0x04); } break; default: break; } return err; } static const struct sn9c102_sensor hv7131r = { .name = "HV7131R", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C103 | BRIDGE_SN9C105 | BRIDGE_SN9C120, .sysfs_ops = SN9C102_I2C_READ | SN9C102_I2C_WRITE, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_2WIRES, .i2c_slave_id = 0x11, .init = &hv7131r_init, .qctrl = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "global gain", .minimum = 0x00, .maximum = 0xff, .step = 0x01, .default_value = 0x40, .flags = 0, }, { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "red balance", .minimum = 0x00, .maximum = 0x3f, .step = 0x01, .default_value = 0x08, .flags = 0, }, { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "blue balance", .minimum = 0x00, .maximum = 0x3f, .step = 0x01, .default_value = 0x1a, .flags = 0, }, { .id = SN9C102_V4L2_CID_GREEN_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "green balance", .minimum = 0x00, .maximum = 0x3f, .step = 0x01, .default_value = 0x2f, .flags = 0, }, { .id = V4L2_CID_BLACK_LEVEL, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "auto black level compensation", .minimum = 0x00, .maximum = 0x01, .step = 0x01, .default_value = 0x00, .flags = 0, }, }, .get_ctrl = &hv7131r_get_ctrl, .set_ctrl = &hv7131r_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 640, .height = 480, }, .defrect = { .left = 0, .top = 0, .width = 640, .height = 480, }, }, .set_crop = &hv7131r_set_crop, .pix_format = { .width = 640, .height = 480, .pixelformat = V4L2_PIX_FMT_SBGGR8, .priv = 8, }, .set_pix_format = &hv7131r_set_pix_format }; int sn9c102_probe_hv7131r(struct sn9c102_device* cam) { int devid, err; err = sn9c102_write_const_regs(cam, {0x09, 0x01}, {0x44, 0x02}, {0x34, 0x01}, {0x20, 0x17}, {0x34, 0x01}, {0x46, 0x01}); devid = sn9c102_i2c_try_read(cam, &hv7131r, 0x00); if (err || devid < 0) return -EIO; if (devid != 0x02) return -ENODEV; sn9c102_attach_sensor(cam, &hv7131r); return 0; }
gpl-2.0
chrbayer/linux-sunxi
drivers/mfd/dm355evm_msp.c
46
10970
/* * dm355evm_msp.c - driver for MSP430 firmware on DM355EVM board * * Copyright (C) 2008 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/i2c.h> #include <linux/i2c/dm355evm_msp.h> /* * The DM355 is a DaVinci chip with video support but no C64+ DSP. Its * EVM board has an MSP430 programmed with firmware for various board * support functions. This driver exposes some of them directly, and * supports other drivers (e.g. RTC, input) for more complex access. * * Because this firmware is entirely board-specific, this file embeds * knowledge that would be passed as platform_data in a generic driver. * * This driver was tested with firmware revision A4. */ #if IS_ENABLED(CONFIG_INPUT_DM355EVM) #define msp_has_keyboard() true #else #define msp_has_keyboard() false #endif #if IS_ENABLED(CONFIG_LEDS_GPIO) #define msp_has_leds() true #else #define msp_has_leds() false #endif #if IS_ENABLED(CONFIG_RTC_DRV_DM355EVM) #define msp_has_rtc() true #else #define msp_has_rtc() false #endif #if IS_ENABLED(CONFIG_VIDEO_TVP514X) #define msp_has_tvp() true #else #define msp_has_tvp() false #endif /*----------------------------------------------------------------------*/ /* REVISIT for paranoia's sake, retry reads/writes on error */ static struct i2c_client *msp430; /** * dm355evm_msp_write - Writes a register in dm355evm_msp * @value: the value to be written * @reg: register address * * Returns result of operation - 0 is success, else negative errno */ int dm355evm_msp_write(u8 value, u8 reg) { return i2c_smbus_write_byte_data(msp430, reg, value); } EXPORT_SYMBOL(dm355evm_msp_write); /** * dm355evm_msp_read - Reads a register from dm355evm_msp * @reg: register address * * Returns result of operation - value, or negative errno */ int dm355evm_msp_read(u8 reg) { return i2c_smbus_read_byte_data(msp430, reg); } EXPORT_SYMBOL(dm355evm_msp_read); /*----------------------------------------------------------------------*/ /* * Many of the msp430 pins are just used as fixed-direction GPIOs. * We could export a few more of them this way, if we wanted. */ #define MSP_GPIO(bit, reg) ((DM355EVM_MSP_ ## reg) << 3 | (bit)) static const u8 msp_gpios[] = { /* eight leds */ MSP_GPIO(0, LED), MSP_GPIO(1, LED), MSP_GPIO(2, LED), MSP_GPIO(3, LED), MSP_GPIO(4, LED), MSP_GPIO(5, LED), MSP_GPIO(6, LED), MSP_GPIO(7, LED), /* SW6 and the NTSC/nPAL jumper */ MSP_GPIO(0, SWITCH1), MSP_GPIO(1, SWITCH1), MSP_GPIO(2, SWITCH1), MSP_GPIO(3, SWITCH1), MSP_GPIO(4, SWITCH1), /* switches on MMC/SD sockets */ /* * Note: EVMDM355_ECP_VA4.pdf suggests that Bit 2 and 4 should be * checked for card detection. However on the EVM bit 1 and 3 gives * this status, for 0 and 1 instance respectively. The pdf also * suggests that Bit 1 and 3 should be checked for write protection. * However on the EVM bit 2 and 4 gives this status,for 0 and 1 * instance respectively. */ MSP_GPIO(2, SDMMC), MSP_GPIO(1, SDMMC), /* mmc0 WP, nCD */ MSP_GPIO(4, SDMMC), MSP_GPIO(3, SDMMC), /* mmc1 WP, nCD */ }; #define MSP_GPIO_REG(offset) (msp_gpios[(offset)] >> 3) #define MSP_GPIO_MASK(offset) BIT(msp_gpios[(offset)] & 0x07) static int msp_gpio_in(struct gpio_chip *chip, unsigned offset) { switch (MSP_GPIO_REG(offset)) { case DM355EVM_MSP_SWITCH1: case DM355EVM_MSP_SWITCH2: case DM355EVM_MSP_SDMMC: return 0; default: return -EINVAL; } } static u8 msp_led_cache; static int msp_gpio_get(struct gpio_chip *chip, unsigned offset) { int reg, status; reg = MSP_GPIO_REG(offset); status = dm355evm_msp_read(reg); if (status < 0) return status; if (reg == DM355EVM_MSP_LED) msp_led_cache = status; return !!(status & MSP_GPIO_MASK(offset)); } static int msp_gpio_out(struct gpio_chip *chip, unsigned offset, int value) { int mask, bits; /* NOTE: there are some other signals that could be * packaged as output GPIOs, but they aren't as useful * as the LEDs ... so for now we don't. */ if (MSP_GPIO_REG(offset) != DM355EVM_MSP_LED) return -EINVAL; mask = MSP_GPIO_MASK(offset); bits = msp_led_cache; bits &= ~mask; if (value) bits |= mask; msp_led_cache = bits; return dm355evm_msp_write(bits, DM355EVM_MSP_LED); } static void msp_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { msp_gpio_out(chip, offset, value); } static struct gpio_chip dm355evm_msp_gpio = { .label = "dm355evm_msp", .owner = THIS_MODULE, .direction_input = msp_gpio_in, .get = msp_gpio_get, .direction_output = msp_gpio_out, .set = msp_gpio_set, .base = -EINVAL, /* dynamic assignment */ .ngpio = ARRAY_SIZE(msp_gpios), .can_sleep = true, }; /*----------------------------------------------------------------------*/ static struct device *add_child(struct i2c_client *client, const char *name, void *pdata, unsigned pdata_len, bool can_wakeup, int irq) { struct platform_device *pdev; int status; pdev = platform_device_alloc(name, -1); if (!pdev) { dev_dbg(&client->dev, "can't alloc dev\n"); status = -ENOMEM; goto err; } device_init_wakeup(&pdev->dev, can_wakeup); pdev->dev.parent = &client->dev; if (pdata) { status = platform_device_add_data(pdev, pdata, pdata_len); if (status < 0) { dev_dbg(&pdev->dev, "can't add platform_data\n"); goto err; } } if (irq) { struct resource r = { .start = irq, .flags = IORESOURCE_IRQ, }; status = platform_device_add_resources(pdev, &r, 1); if (status < 0) { dev_dbg(&pdev->dev, "can't add irq\n"); goto err; } } status = platform_device_add(pdev); err: if (status < 0) { platform_device_put(pdev); dev_err(&client->dev, "can't add %s dev\n", name); return ERR_PTR(status); } return &pdev->dev; } static int add_children(struct i2c_client *client) { static const struct { int offset; char *label; } config_inputs[] = { /* 8 == right after the LEDs */ { 8 + 0, "sw6_1", }, { 8 + 1, "sw6_2", }, { 8 + 2, "sw6_3", }, { 8 + 3, "sw6_4", }, { 8 + 4, "NTSC/nPAL", }, }; struct device *child; int status; int i; /* GPIO-ish stuff */ dm355evm_msp_gpio.parent = &client->dev; status = gpiochip_add_data(&dm355evm_msp_gpio, NULL); if (status < 0) return status; /* LED output */ if (msp_has_leds()) { #define GPIO_LED(l) .name = l, .active_low = true static struct gpio_led evm_leds[] = { { GPIO_LED("dm355evm::ds14"), .default_trigger = "heartbeat", }, { GPIO_LED("dm355evm::ds15"), .default_trigger = "mmc0", }, { GPIO_LED("dm355evm::ds16"), /* could also be a CE-ATA drive */ .default_trigger = "mmc1", }, { GPIO_LED("dm355evm::ds17"), .default_trigger = "nand-disk", }, { GPIO_LED("dm355evm::ds18"), }, { GPIO_LED("dm355evm::ds19"), }, { GPIO_LED("dm355evm::ds20"), }, { GPIO_LED("dm355evm::ds21"), }, }; #undef GPIO_LED struct gpio_led_platform_data evm_led_data = { .num_leds = ARRAY_SIZE(evm_leds), .leds = evm_leds, }; for (i = 0; i < ARRAY_SIZE(evm_leds); i++) evm_leds[i].gpio = i + dm355evm_msp_gpio.base; /* NOTE: these are the only fully programmable LEDs * on the board, since GPIO-61/ds22 (and many signals * going to DC7) must be used for AEMIF address lines * unless the top 1 GB of NAND is unused... */ child = add_child(client, "leds-gpio", &evm_led_data, sizeof(evm_led_data), false, 0); if (IS_ERR(child)) return PTR_ERR(child); } /* configuration inputs */ for (i = 0; i < ARRAY_SIZE(config_inputs); i++) { int gpio = dm355evm_msp_gpio.base + config_inputs[i].offset; gpio_request_one(gpio, GPIOF_IN, config_inputs[i].label); /* make it easy for userspace to see these */ gpio_export(gpio, false); } /* MMC/SD inputs -- right after the last config input */ if (dev_get_platdata(&client->dev)) { void (*mmcsd_setup)(unsigned) = dev_get_platdata(&client->dev); mmcsd_setup(dm355evm_msp_gpio.base + 8 + 5); } /* RTC is a 32 bit counter, no alarm */ if (msp_has_rtc()) { child = add_child(client, "rtc-dm355evm", NULL, 0, false, 0); if (IS_ERR(child)) return PTR_ERR(child); } /* input from buttons and IR remote (uses the IRQ) */ if (msp_has_keyboard()) { child = add_child(client, "dm355evm_keys", NULL, 0, true, client->irq); if (IS_ERR(child)) return PTR_ERR(child); } return 0; } /*----------------------------------------------------------------------*/ static void dm355evm_command(unsigned command) { int status; status = dm355evm_msp_write(command, DM355EVM_MSP_COMMAND); if (status < 0) dev_err(&msp430->dev, "command %d failure %d\n", command, status); } static void dm355evm_power_off(void) { dm355evm_command(MSP_COMMAND_POWEROFF); } static int dm355evm_msp_remove(struct i2c_client *client) { pm_power_off = NULL; msp430 = NULL; return 0; } static int dm355evm_msp_probe(struct i2c_client *client, const struct i2c_device_id *id) { int status; const char *video = msp_has_tvp() ? "TVP5146" : "imager"; if (msp430) return -EBUSY; msp430 = client; /* display revision status; doubles as sanity check */ status = dm355evm_msp_read(DM355EVM_MSP_FIRMREV); if (status < 0) goto fail; dev_info(&client->dev, "firmware v.%02X, %s as video-in\n", status, video); /* mux video input: either tvp5146 or some external imager */ status = dm355evm_msp_write(msp_has_tvp() ? 0 : MSP_VIDEO_IMAGER, DM355EVM_MSP_VIDEO_IN); if (status < 0) dev_warn(&client->dev, "error %d muxing %s as video-in\n", status, video); /* init LED cache, and turn off the LEDs */ msp_led_cache = 0xff; dm355evm_msp_write(msp_led_cache, DM355EVM_MSP_LED); /* export capabilities we support */ status = add_children(client); if (status < 0) goto fail; /* PM hookup */ pm_power_off = dm355evm_power_off; return 0; fail: /* FIXME remove children ... */ dm355evm_msp_remove(client); return status; } static const struct i2c_device_id dm355evm_msp_ids[] = { { "dm355evm_msp", 0 }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(i2c, dm355evm_msp_ids); static struct i2c_driver dm355evm_msp_driver = { .driver.name = "dm355evm_msp", .id_table = dm355evm_msp_ids, .probe = dm355evm_msp_probe, .remove = dm355evm_msp_remove, }; static int __init dm355evm_msp_init(void) { return i2c_add_driver(&dm355evm_msp_driver); } subsys_initcall(dm355evm_msp_init); static void __exit dm355evm_msp_exit(void) { i2c_del_driver(&dm355evm_msp_driver); } module_exit(dm355evm_msp_exit); MODULE_DESCRIPTION("Interface to MSP430 firmware on DM355EVM"); MODULE_LICENSE("GPL");
gpl-2.0
arshull/GalaTab3_KK_Kernel_T310
drivers/usb/gadget/composite.c
46
46747
/* * composite.c - infrastructure for Composite USB Gadgets * * Copyright (C) 2006-2008 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define VERBOSE_DEBUG */ #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/utsname.h> #include <linux/usb/composite.h> #include <asm/unaligned.h> #include "multi_config.h" /* #undef DBG #define DBG(dev, fmt, args...) printk(KERN_DEBUG "usb: "fmt, ##args) */ /* * The code in this file is utility code, used to build a gadget driver * from one or more "function" drivers, one or more "configuration" * objects, and a "usb_composite_driver" by gluing them together along * with the relevant device-wide data. */ /* big enough to hold our biggest descriptor */ #define USB_BUFSIZ 1024 static struct usb_composite_driver *composite; static int (*composite_gadget_bind)(struct usb_composite_dev *cdev); /* Some systems will need runtime overrides for the product identifiers * published in the device descriptor, either numbers or strings or both. * String parameters are in UTF-8 (superset of ASCII's 7 bit characters). */ static ushort idVendor; module_param(idVendor, ushort, 0); MODULE_PARM_DESC(idVendor, "USB Vendor ID"); static ushort idProduct; module_param(idProduct, ushort, 0); MODULE_PARM_DESC(idProduct, "USB Product ID"); static ushort bcdDevice; module_param(bcdDevice, ushort, 0); MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)"); static char *iManufacturer; module_param(iManufacturer, charp, 0); MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string"); static char *iProduct; module_param(iProduct, charp, 0); MODULE_PARM_DESC(iProduct, "USB Product string"); static char *iSerialNumber; module_param(iSerialNumber, charp, 0); MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); static char composite_manufacturer[50]; /*-------------------------------------------------------------------------*/ /** * usb_add_function() - add a function to a configuration * @config: the configuration * @function: the function being added * Context: single threaded during gadget setup * * After initialization, each configuration must have one or more * functions added to it. Adding a function involves calling its @bind() * method to allocate resources such as interface and string identifiers * and endpoints. * * This function returns the value of the function's bind(), which is * zero for success else a negative errno value. */ int usb_add_function(struct usb_configuration *config, struct usb_function *function) { int value = -EINVAL; DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n", function->name, function, config->label, config); if (!function->set_alt || !function->disable) goto done; function->config = config; list_add_tail(&function->list, &config->functions); /* REVISIT *require* function->bind? */ if (function->bind) { value = function->bind(config, function); if (value < 0) { list_del(&function->list); function->config = NULL; } } else value = 0; /* We allow configurations that don't work at both speeds. * If we run into a lowspeed Linux system, treat it the same * as full speed ... it's the function drivers that will need * to avoid bulk and ISO transfers. */ if (!config->fullspeed && function->descriptors) config->fullspeed = true; if (!config->highspeed && function->hs_descriptors) config->highspeed = true; if (!config->superspeed && function->ss_descriptors) config->superspeed = true; done: if (value) DBG(config->cdev, "adding '%s'/%p --> %d\n", function->name, function, value); return value; } /** * usb_function_deactivate - prevent function and gadget enumeration * @function: the function that isn't yet ready to respond * * Blocks response of the gadget driver to host enumeration by * preventing the data line pullup from being activated. This is * normally called during @bind() processing to change from the * initial "ready to respond" state, or when a required resource * becomes available. * * For example, drivers that serve as a passthrough to a userspace * daemon can block enumeration unless that daemon (such as an OBEX, * MTP, or print server) is ready to handle host requests. * * Not all systems support software control of their USB peripheral * data pullups. * * Returns zero on success, else negative errno. */ int usb_function_deactivate(struct usb_function *function) { struct usb_composite_dev *cdev = function->config->cdev; unsigned long flags; int status = 0; spin_lock_irqsave(&cdev->lock, flags); if (cdev->deactivations == 0) status = usb_gadget_disconnect(cdev->gadget); if (status == 0) cdev->deactivations++; spin_unlock_irqrestore(&cdev->lock, flags); return status; } /** * usb_function_activate - allow function and gadget enumeration * @function: function on which usb_function_activate() was called * * Reverses effect of usb_function_deactivate(). If no more functions * are delaying their activation, the gadget driver will respond to * host enumeration procedures. * * Returns zero on success, else negative errno. */ int usb_function_activate(struct usb_function *function) { struct usb_composite_dev *cdev = function->config->cdev; int status = 0; spin_lock(&cdev->lock); if (WARN_ON(cdev->deactivations == 0)) status = -EINVAL; else { cdev->deactivations--; if (cdev->deactivations == 0) status = usb_gadget_connect(cdev->gadget); } spin_unlock(&cdev->lock); return status; } /** * usb_interface_id() - allocate an unused interface ID * @config: configuration associated with the interface * @function: function handling the interface * Context: single threaded during gadget setup * * usb_interface_id() is called from usb_function.bind() callbacks to * allocate new interface IDs. The function driver will then store that * ID in interface, association, CDC union, and other descriptors. It * will also handle any control requests targeted at that interface, * particularly changing its altsetting via set_alt(). There may * also be class-specific or vendor-specific requests to handle. * * All interface identifier should be allocated using this routine, to * ensure that for example different functions don't wrongly assign * different meanings to the same identifier. Note that since interface * identifiers are configuration-specific, functions used in more than * one configuration (or more than once in a given configuration) need * multiple versions of the relevant descriptors. * * Returns the interface ID which was allocated; or -ENODEV if no * more interface IDs can be allocated. */ int usb_interface_id(struct usb_configuration *config, struct usb_function *function) { unsigned id = config->next_interface_id; if (id < MAX_CONFIG_INTERFACES) { config->interface[id] = function; config->next_interface_id = id + 1; return id; } return -ENODEV; } static int config_buf(struct usb_configuration *config, enum usb_device_speed speed, void *buf, u8 type) { struct usb_config_descriptor *c = buf; void *next = buf + USB_DT_CONFIG_SIZE; int len = USB_BUFSIZ - USB_DT_CONFIG_SIZE; struct usb_function *f; int status; /* write the config descriptor */ c = buf; c->bLength = USB_DT_CONFIG_SIZE; c->bDescriptorType = type; /* wTotalLength is written later */ c->bNumInterfaces = config->next_interface_id; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE c->bConfigurationValue = get_config_number() + 1; #else c->bConfigurationValue = config->bConfigurationValue; #endif c->iConfiguration = config->iConfiguration; c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes; c->bMaxPower = config->bMaxPower ? : (CONFIG_USB_GADGET_VBUS_DRAW / 2); /* There may be e.g. OTG descriptors */ if (config->descriptors) { status = usb_descriptor_fillbuf(next, len, config->descriptors); if (status < 0) return status; len -= status; next += status; } /* add each function's descriptors */ list_for_each_entry(f, &config->functions, list) { struct usb_descriptor_header **descriptors; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (!is_available_function(f->name)) { USB_DBG("skip f->%s\n", f->name); continue; } else { USB_DBG("f->%s\n", f->name); } #endif switch (speed) { case USB_SPEED_SUPER: descriptors = f->ss_descriptors; break; case USB_SPEED_HIGH: descriptors = f->hs_descriptors; break; default: descriptors = f->descriptors; } if (!descriptors) continue; status = usb_descriptor_fillbuf(next, len, (const struct usb_descriptor_header **) descriptors); if (status < 0) return status; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (change_conf(f, next, len, config, speed) < 0) { USB_DBG_ESS("failed to change configuration\n"); return -EINVAL; } #endif len -= status; next += status; } #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE set_interface_count(config, c); #endif len = next - buf; c->wTotalLength = cpu_to_le16(len); return len; } static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c; u8 type = w_value >> 8; enum usb_device_speed speed = USB_SPEED_UNKNOWN; if (gadget->speed == USB_SPEED_SUPER) speed = gadget->speed; else if (gadget_is_dualspeed(gadget)) { int hs = 0; if (gadget->speed == USB_SPEED_HIGH) hs = 1; if (type == USB_DT_OTHER_SPEED_CONFIG) hs = !hs; if (hs) speed = USB_SPEED_HIGH; } /* This is a lookup by config *INDEX* */ w_value &= 0xff; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE w_value = set_config_number(w_value); #endif list_for_each_entry(c, &cdev->configs, list) { /* ignore configs that won't work at this speed */ switch (speed) { case USB_SPEED_SUPER: if (!c->superspeed) continue; /* USB 3.0 Max power < 900 mA */ if (c->bMaxPower > 0x70) /* 896mA = 0x70 * 8mA */ c->bMaxPower = 0x70; /* 896 mA */ break; case USB_SPEED_HIGH: if (!c->highspeed) continue; /* USB 2.0 Max power < 500 mA */ if (c->bMaxPower > 0xFA) /* 500mA = 0xFA * 2mA */ c->bMaxPower = 0xFA; /* 500 mA */ break; default: if (!c->fullspeed) continue; /* USB 2.0 Max power < 500 mA */ if (c->bMaxPower > 0xFA) /* 500mA = 0xFA * 2mA */ c->bMaxPower = 0xFA; /* 500 mA */ } if (w_value == 0) return config_buf(c, speed, cdev->req->buf, type); w_value--; } return -EINVAL; } static int count_configs(struct usb_composite_dev *cdev, unsigned type) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c; unsigned count = 0; int hs = 0; int ss = 0; if (gadget_is_dualspeed(gadget)) { if (gadget->speed == USB_SPEED_HIGH) hs = 1; if (gadget->speed == USB_SPEED_SUPER) ss = 1; if (type == USB_DT_DEVICE_QUALIFIER) hs = !hs; } list_for_each_entry(c, &cdev->configs, list) { /* ignore configs that won't work at this speed */ if (ss) { if (!c->superspeed) continue; } else if (hs) { if (!c->highspeed) continue; } else { if (!c->fullspeed) continue; } count++; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE count = count_multi_config(c, count); #endif } return count; } /** * bos_desc() - prepares the BOS descriptor. * @cdev: pointer to usb_composite device to generate the bos * descriptor for * * This function generates the BOS (Binary Device Object) * descriptor and its device capabilities descriptors. The BOS * descriptor should be supported by a SuperSpeed device. */ static int bos_desc(struct usb_composite_dev *cdev) { struct usb_ext_cap_descriptor *usb_ext; struct usb_ss_cap_descriptor *ss_cap; struct usb_dcd_config_params dcd_config_params; struct usb_bos_descriptor *bos = cdev->req->buf; bos->bLength = USB_DT_BOS_SIZE; bos->bDescriptorType = USB_DT_BOS; bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE); bos->bNumDeviceCaps = 0; /* * A SuperSpeed device shall include the USB2.0 extension descriptor * and shall support LPM when operating in USB2.0 HS mode. */ usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength); bos->bNumDeviceCaps++; le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE); usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE; usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY; usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT; usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT); /* * The Superspeed USB Capability descriptor shall be implemented by all * SuperSpeed devices. */ ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength); bos->bNumDeviceCaps++; le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE); ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE; ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE; ss_cap->bmAttributes = 0; /* LTM is not supported yet */ ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION | USB_FULL_SPEED_OPERATION | USB_HIGH_SPEED_OPERATION | USB_5GBPS_OPERATION); ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION; /* Get Controller configuration */ if (cdev->gadget->ops->get_config_params) cdev->gadget->ops->get_config_params(&dcd_config_params); else { dcd_config_params.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT; dcd_config_params.bU2DevExitLat = cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT); } ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat; ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat; return le16_to_cpu(bos->wTotalLength); } static void device_qual(struct usb_composite_dev *cdev) { struct usb_qualifier_descriptor *qual = cdev->req->buf; qual->bLength = sizeof(*qual); qual->bDescriptorType = USB_DT_DEVICE_QUALIFIER; /* POLICY: same bcdUSB and device type info at both speeds */ qual->bcdUSB = cdev->desc.bcdUSB; qual->bDeviceClass = cdev->desc.bDeviceClass; qual->bDeviceSubClass = cdev->desc.bDeviceSubClass; qual->bDeviceProtocol = cdev->desc.bDeviceProtocol; /* ASSUME same EP0 fifo size at both speeds */ qual->bMaxPacketSize0 = cdev->gadget->ep0->maxpacket; qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER); qual->bRESERVED = 0; } /*-------------------------------------------------------------------------*/ static void reset_config(struct usb_composite_dev *cdev) { struct usb_function *f; DBG(cdev, "reset config\n"); list_for_each_entry(f, &cdev->config->functions, list) { if (f->disable) f->disable(f); bitmap_zero(f->endpoints, 32); } cdev->config = NULL; } static int set_config(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl, unsigned number) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c = NULL; int result = -EINVAL; unsigned power = gadget_is_otg(gadget) ? 8 : 100; int tmp; if (number) { list_for_each_entry(c, &cdev->configs, list) { #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (c->bConfigurationValue == number || check_config(number)) { #else if (c->bConfigurationValue == number) { #endif /* * We disable the FDs of the previous * configuration only if the new configuration * is a valid one */ if (cdev->config) reset_config(cdev); result = 0; break; } } if (result < 0) goto done; } else { /* Zero configuration value - need to reset the config */ if (cdev->config) reset_config(cdev); result = 0; } INFO(cdev, "%s speed config #%d: %s\n", ({ char *speed; switch (gadget->speed) { case USB_SPEED_LOW: speed = "low"; break; case USB_SPEED_FULL: speed = "full"; break; case USB_SPEED_HIGH: speed = "high"; break; case USB_SPEED_SUPER: speed = "super"; break; default: speed = "?"; break; } ; speed; }), number, c ? c->label : "unconfigured"); if (!c) goto done; cdev->config = c; /* Initialize all interfaces by setting them to altsetting zero. */ for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) { struct usb_function *f = c->interface[tmp]; struct usb_descriptor_header **descriptors; if (!f) break; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE USB_DBG_ESS("e %s[%d]\n", f->name, tmp); #endif /* * Record which endpoints are used by the function. This is used * to dispatch control requests targeted at that endpoint to the * function's setup callback instead of the current * configuration's setup callback. */ switch (gadget->speed) { case USB_SPEED_SUPER: descriptors = f->ss_descriptors; break; case USB_SPEED_HIGH: descriptors = f->hs_descriptors; break; default: descriptors = f->descriptors; } for (; *descriptors; ++descriptors) { struct usb_endpoint_descriptor *ep; int addr; if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT) continue; ep = (struct usb_endpoint_descriptor *)*descriptors; addr = ((ep->bEndpointAddress & 0x80) >> 3) | (ep->bEndpointAddress & 0x0f); set_bit(addr, f->endpoints); } result = f->set_alt(f, tmp, 0); if (result < 0) { DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n", tmp, f->name, f, result); reset_config(cdev); goto done; } if (result == USB_GADGET_DELAYED_STATUS) { DBG(cdev, "%s: interface %d (%s) requested delayed status\n", __func__, tmp, f->name); cdev->delayed_status++; DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } } /* when we return, be sure our power usage is valid */ power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW; done: usb_gadget_vbus_draw(gadget, power); if (result >= 0 && cdev->delayed_status) result = USB_GADGET_DELAYED_STATUS; return result; } /** * usb_add_config() - add a configuration to a device. * @cdev: wraps the USB gadget * @config: the configuration, with bConfigurationValue assigned * @bind: the configuration's bind function * Context: single threaded during gadget setup * * One of the main tasks of a composite @bind() routine is to * add each of the configurations it supports, using this routine. * * This function returns the value of the configuration's @bind(), which * is zero for success else a negative errno value. Binding configurations * assigns global resources including string IDs, and per-configuration * resources such as interface IDs and endpoints. */ int usb_add_config(struct usb_composite_dev *cdev, struct usb_configuration *config, int (*bind)(struct usb_configuration *)) { int status = -EINVAL; struct usb_configuration *c; DBG(cdev, "adding config #%u '%s'/%p\n", config->bConfigurationValue, config->label, config); if (!config->bConfigurationValue || !bind) goto done; /* Prevent duplicate configuration identifiers */ list_for_each_entry(c, &cdev->configs, list) { if (c->bConfigurationValue == config->bConfigurationValue) { status = -EBUSY; goto done; } } config->cdev = cdev; list_add_tail(&config->list, &cdev->configs); INIT_LIST_HEAD(&config->functions); config->next_interface_id = 0; memset(config->interface, '\0', sizeof(config->interface)); status = bind(config); if (status < 0) { while (!list_empty(&config->functions)) { struct usb_function *f; f = list_first_entry(&config->functions, struct usb_function, list); list_del(&f->list); if (f->unbind) { DBG(cdev, "unbind function '%s'/%p\n", f->name, f); f->unbind(config, f); /* may free memory for "f" */ } } list_del(&config->list); config->cdev = NULL; } else { unsigned i; DBG(cdev, "cfg %d/%p speeds:%s%s%s\n", config->bConfigurationValue, config, config->superspeed ? " super" : "", config->highspeed ? " high" : "", config->fullspeed ? (gadget_is_dualspeed(cdev->gadget) ? " full" : " full/low") : ""); for (i = 0; i < MAX_CONFIG_INTERFACES; i++) { struct usb_function *f = config->interface[i]; if (!f) continue; DBG(cdev, " interface %d = %s/%p\n", i, f->name, f); } } /* set_alt(), or next bind(), sets up * ep->driver_data as needed. */ usb_ep_autoconfig_reset(cdev->gadget); done: if (status) DBG(cdev, "added config '%s'/%u --> %d\n", config->label, config->bConfigurationValue, status); return status; } static int unbind_config(struct usb_composite_dev *cdev, struct usb_configuration *config) { while (!list_empty(&config->functions)) { struct usb_function *f; f = list_first_entry(&config->functions, struct usb_function, list); list_del(&f->list); if (f->unbind) { DBG(cdev, "unbind function '%s'/%p\n", f->name, f); f->unbind(config, f); /* may free memory for "f" */ } } if (config->unbind) { DBG(cdev, "unbind config '%s'/%p\n", config->label, config); config->unbind(config); /* may free memory for "c" */ } return 0; } int usb_remove_config(struct usb_composite_dev *cdev, struct usb_configuration *config) { unsigned long flags; printk(KERN_DEBUG "usb: %s cdev->config=%p, config=%p\n", __func__, cdev->config, config); spin_lock_irqsave(&cdev->lock, flags); if (cdev->config == config) reset_config(cdev); list_del(&config->list); spin_unlock_irqrestore(&cdev->lock, flags); return unbind_config(cdev, config); } /*-------------------------------------------------------------------------*/ /* We support strings in multiple languages ... string descriptor zero * says which languages are supported. The typical case will be that * only one language (probably English) is used, with I18N handled on * the host side. */ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) { const struct usb_gadget_strings *s; u16 language; __le16 *tmp; while (*sp) { s = *sp; language = cpu_to_le16(s->language); for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { if (*tmp == language) goto repeat; } *tmp++ = language; repeat: sp++; } } static int lookup_string( struct usb_gadget_strings **sp, void *buf, u16 language, int id ) { struct usb_gadget_strings *s; int value; while (*sp) { s = *sp++; if (s->language != language) continue; value = usb_gadget_get_string(s, id, buf); if (value > 0) return value; } return -EINVAL; } static int get_string(struct usb_composite_dev *cdev, void *buf, u16 language, int id) { struct usb_configuration *c; struct usb_function *f; int len; const char *str; /* Yes, not only is USB's I18N support probably more than most * folk will ever care about ... also, it's all supported here. * (Except for UTF8 support for Unicode's "Astral Planes".) */ /* 0 == report all available language codes */ if (id == 0) { struct usb_string_descriptor *s = buf; struct usb_gadget_strings **sp; memset(s, 0, 256); s->bDescriptorType = USB_DT_STRING; sp = composite->strings; if (sp) collect_langs(sp, s->wData); list_for_each_entry(c, &cdev->configs, list) { sp = c->strings; if (sp) collect_langs(sp, s->wData); list_for_each_entry(f, &c->functions, list) { #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE if (!is_available_function(f->name)) { USB_DBG("skip f->%s\n", f->name); continue; } else { USB_DBG("f->%s\n", f->name); } #endif sp = f->strings; if (sp) collect_langs(sp, s->wData); } } for (len = 0; len <= 126 && s->wData[len]; len++) continue; if (!len) return -EINVAL; s->bLength = 2 * (len + 1); return s->bLength; } /* Otherwise, look up and return a specified string. First * check if the string has not been overridden. */ if (cdev->manufacturer_override == id) str = iManufacturer ?: composite->iManufacturer ?: composite_manufacturer; else if (cdev->product_override == id) str = iProduct ?: composite->iProduct; else if (cdev->serial_override == id) str = iSerialNumber; else str = NULL; if (str) { struct usb_gadget_strings strings = { .language = language, .strings = &(struct usb_string) { 0xff, str } }; return usb_gadget_get_string(&strings, 0xff, buf); } /* String IDs are device-scoped, so we look up each string * table we're told about. These lookups are infrequent; * simpler-is-better here. */ if (composite->strings) { len = lookup_string(composite->strings, buf, language, id); if (len > 0) return len; } list_for_each_entry(c, &cdev->configs, list) { if (c->strings) { len = lookup_string(c->strings, buf, language, id); if (len > 0) return len; } list_for_each_entry(f, &c->functions, list) { if (!f->strings) continue; len = lookup_string(f->strings, buf, language, id); if (len > 0) return len; } } return -EINVAL; } /** * usb_string_id() - allocate an unused string ID * @cdev: the device whose string descriptor IDs are being allocated * Context: single threaded during gadget setup * * @usb_string_id() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then store that ID in the appropriate descriptors and string table. * * All string identifier should be allocated using this, * @usb_string_ids_tab() or @usb_string_ids_n() routine, to ensure * that for example different functions don't wrongly assign different * meanings to the same identifier. */ int usb_string_id(struct usb_composite_dev *cdev) { if (cdev->next_string_id < 254) { /* string id 0 is reserved by USB spec for list of * supported languages */ /* 255 reserved as well? -- mina86 */ cdev->next_string_id++; printk(KERN_DEBUG "usb: %s cdev(0x%p)->next_string_id=%d\n", __func__, cdev, cdev->next_string_id); return cdev->next_string_id; } printk(KERN_DEBUG "usb: %s error cdev(0x%p)->next_string_id=%d\n", __func__, cdev, cdev->next_string_id); return -ENODEV; } /** * usb_string_ids() - allocate unused string IDs in batch * @cdev: the device whose string descriptor IDs are being allocated * @str: an array of usb_string objects to assign numbers to * Context: single threaded during gadget setup * * @usb_string_ids() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then copy IDs from the string table to the appropriate descriptors * and string table for other languages. * * All string identifier should be allocated using this, * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for * example different functions don't wrongly assign different meanings * to the same identifier. */ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str) { int next = cdev->next_string_id; printk(KERN_DEBUG "usb: %s --cdev(0x%p)->next_string_id=%d\n", __func__, cdev, cdev->next_string_id); for (; str->s; ++str) { if (unlikely(next >= 254)) return -ENODEV; str->id = ++next; } cdev->next_string_id = next; return 0; } /** * usb_string_ids_n() - allocate unused string IDs in batch * @c: the device whose string descriptor IDs are being allocated * @n: number of string IDs to allocate * Context: single threaded during gadget setup * * Returns the first requested ID. This ID and next @n-1 IDs are now * valid IDs. At least provided that @n is non-zero because if it * is, returns last requested ID which is now very useful information. * * @usb_string_ids_n() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then store that ID in the appropriate descriptors and string table. * * All string identifier should be allocated using this, * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for * example different functions don't wrongly assign different meanings * to the same identifier. */ int usb_string_ids_n(struct usb_composite_dev *c, unsigned n) { unsigned next = c->next_string_id; printk(KERN_DEBUG "usb: %s --cdev(0x%p)->next_string_id=%d\n", __func__, c, c->next_string_id); if (unlikely(n > 254 || (unsigned)next + n > 254)) return -ENODEV; c->next_string_id += n; return next + 1; } /*-------------------------------------------------------------------------*/ static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req) { if (req->status || req->actual != req->length) DBG((struct usb_composite_dev *) ep->driver_data, "setup complete --> %d, %d/%d\n", req->status, req->actual, req->length); } /* * The setup() callback implements all the ep0 functionality that's * not handled lower down, in hardware or the hardware driver(like * device and endpoint feature flags, and their status). It's all * housekeeping for the gadget function we're implementing. Most of * the work is in config and function specific setup. */ static int composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; int status = 0; u16 w_index = le16_to_cpu(ctrl->wIndex); u8 intf = w_index & 0xFF; u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); struct usb_function *f = NULL; u8 endp; /* partial re-init of the response message; the function or the * gadget might need to intercept e.g. a control-OUT completion * when we delegate to it. */ req->zero = 0; req->complete = composite_setup_complete; req->length = 0; gadget->ep0->driver_data = cdev; switch (ctrl->bRequest) { /* we handle all standard USB descriptors */ case USB_REQ_GET_DESCRIPTOR: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; switch (w_value >> 8) { case USB_DT_DEVICE: cdev->desc.bNumConfigurations = count_configs(cdev, USB_DT_DEVICE); cdev->desc.bMaxPacketSize0 = cdev->gadget->ep0->maxpacket; if (gadget_is_superspeed(gadget)) { if (gadget->speed >= USB_SPEED_SUPER) { cdev->desc.bcdUSB = cpu_to_le16(0x0300); cdev->desc.bMaxPacketSize0 = 9; } else { cdev->desc.bcdUSB = cpu_to_le16(0x0210); } } value = min(w_length, (u16) sizeof cdev->desc); memcpy(req->buf, &cdev->desc, value); printk(KERN_DEBUG "usb: GET_DES\n"); break; case USB_DT_DEVICE_QUALIFIER: if (!gadget_is_dualspeed(gadget) || gadget->speed >= USB_SPEED_SUPER) break; device_qual(cdev); value = min_t(int, w_length, sizeof(struct usb_qualifier_descriptor)); break; case USB_DT_OTHER_SPEED_CONFIG: if (!gadget_is_dualspeed(gadget) || gadget->speed >= USB_SPEED_SUPER) break; /* FALLTHROUGH */ case USB_DT_CONFIG: value = config_desc(cdev, w_value); if (value >= 0) value = min(w_length, (u16) value); break; case USB_DT_STRING: #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE set_string_mode(w_length); #endif value = get_string(cdev, req->buf, w_index, w_value & 0xff); if (value >= 0) value = min(w_length, (u16) value); break; case USB_DT_BOS: if (gadget_is_superspeed(gadget)) { value = bos_desc(cdev); value = min(w_length, (u16) value); } break; } break; /* any number of configs can work */ case USB_REQ_SET_CONFIGURATION: if (ctrl->bRequestType != 0) goto unknown; if (gadget_is_otg(gadget)) { if (gadget->a_hnp_support) DBG(cdev, "HNP available\n"); else if (gadget->a_alt_hnp_support) DBG(cdev, "HNP on another port\n"); else VDBG(cdev, "HNP inactive\n"); } spin_lock(&cdev->lock); value = set_config(cdev, ctrl, w_value); spin_unlock(&cdev->lock); printk(KERN_DEBUG "usb: SET_CON\n"); if(value == 0) { if(w_value) set_config_number(w_value - 1); } break; case USB_REQ_GET_CONFIGURATION: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; if (cdev->config) #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE *(u8 *)req->buf = get_config_number() + 1; #else *(u8 *)req->buf = cdev->config->bConfigurationValue; #endif else *(u8 *)req->buf = 0; value = min(w_length, (u16) 1); break; /* function drivers must handle get/set altsetting; if there's * no get() method, we know only altsetting zero works. */ case USB_REQ_SET_INTERFACE: if (ctrl->bRequestType != USB_RECIP_INTERFACE) goto unknown; if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; if (w_value && !f->set_alt) break; value = f->set_alt(f, w_index, w_value); if (value == USB_GADGET_DELAYED_STATUS) { DBG(cdev, "%s: interface %d (%s) requested delayed status\n", __func__, intf, f->name); cdev->delayed_status++; DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) goto unknown; if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; /* lots of interfaces only need altsetting zero... */ value = f->get_alt ? f->get_alt(f, w_index) : 0; if (value < 0) break; *((u8 *)req->buf) = value; value = min(w_length, (u16) 1); break; /* * USB 3.0 additions: * Function driver should handle get_status request. If such cb * wasn't supplied we respond with default value = 0 * Note: function driver should supply such cb only for the first * interface of the function */ case USB_REQ_GET_STATUS: if (!gadget_is_superspeed(gadget)) goto unknown; if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE)) goto unknown; value = 2; /* This is the length of the get_status reply */ put_unaligned_le16(0, req->buf); if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; status = f->get_status ? f->get_status(f) : 0; if (status < 0) break; put_unaligned_le16(status & 0x0000ffff, req->buf); break; /* * Function drivers should handle SetFeature/ClearFeature * (FUNCTION_SUSPEND) request. function_suspend cb should be supplied * only for the first interface of the function */ case USB_REQ_CLEAR_FEATURE: case USB_REQ_SET_FEATURE: if (!gadget_is_superspeed(gadget)) goto unknown; if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE)) goto unknown; switch (w_value) { case USB_INTRF_FUNC_SUSPEND: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; value = 0; if (f->func_suspend) value = f->func_suspend(f, w_index >> 8); if (value < 0) { ERROR(cdev, "func_suspend() returned error %d\n", value); value = 0; } break; } break; default: unknown: VDBG(cdev, "non-core control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* functions always handle their interfaces and endpoints... * punt other recipients (other, WUSB, ...) to the current * configuration code. * * REVISIT it could make sense to let the composite device * take such requests too, if that's ever needed: to work * in config 0, etc. */ switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; break; case USB_RECIP_ENDPOINT: endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); list_for_each_entry(f, &cdev->config->functions, list) { if (test_bit(endp, f->endpoints)) break; } if (&f->list == &cdev->config->functions) f = NULL; break; } if (f && f->setup) value = f->setup(f, ctrl); else { struct usb_configuration *c; c = cdev->config; if (c && c->setup) value = c->setup(c, ctrl); } goto done; } /* respond with data transfer before status phase? */ if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) { req->length = value; req->zero = value < w_length; value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC); if (value < 0) { DBG(cdev, "ep_queue --> %d\n", value); req->status = 0; composite_setup_complete(gadget->ep0, req); } } else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) { WARN(cdev, "%s: Delayed status not supported for w_length != 0", __func__); } done: /* device either stalls (value < 0) or reports success */ return value; } static void composite_disconnect(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); unsigned long flags; #ifdef CONFIG_USB_ANDROID_SAMSUNG_COMPOSITE set_string_mode(0); #endif /* REVISIT: should we have config and device level * disconnect callbacks? */ spin_lock_irqsave(&cdev->lock, flags); if (cdev->config) reset_config(cdev); if (composite->disconnect) composite->disconnect(cdev); spin_unlock_irqrestore(&cdev->lock, flags); } /*-------------------------------------------------------------------------*/ static ssize_t composite_show_suspended(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_composite_dev *cdev = get_gadget_data(gadget); return sprintf(buf, "%d\n", cdev->suspended); } static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL); static void composite_unbind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); /* composite_disconnect() must already have been called * by the underlying peripheral controller driver! * so there's no i/o concurrency that could affect the * state protected by cdev->lock. */ WARN_ON(cdev->config); while (!list_empty(&cdev->configs)) { struct usb_configuration *c; c = list_first_entry(&cdev->configs, struct usb_configuration, list); list_del(&c->list); unbind_config(cdev, c); } if (composite->unbind) composite->unbind(cdev); if (cdev->req) { kfree(cdev->req->buf); usb_ep_free_request(gadget->ep0, cdev->req); } device_remove_file(&gadget->dev, &dev_attr_suspended); kfree(cdev); set_gadget_data(gadget, NULL); composite = NULL; } static u8 override_id(struct usb_composite_dev *cdev, u8 *desc) { if (!*desc) { int ret = usb_string_id(cdev); if (unlikely(ret < 0)) WARNING(cdev, "failed to override string ID\n"); else *desc = ret; } return *desc; } static int composite_bind(struct usb_gadget *gadget) { struct usb_composite_dev *cdev; int status = -ENOMEM; cdev = kzalloc(sizeof *cdev, GFP_KERNEL); if (!cdev) return status; spin_lock_init(&cdev->lock); cdev->gadget = gadget; set_gadget_data(gadget, cdev); INIT_LIST_HEAD(&cdev->configs); /* preallocate control response and buffer */ cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL); if (!cdev->req) goto fail; cdev->req->buf = kmalloc(USB_BUFSIZ, GFP_KERNEL); if (!cdev->req->buf) goto fail; cdev->req->complete = composite_setup_complete; gadget->ep0->driver_data = cdev; cdev->bufsiz = USB_BUFSIZ; cdev->driver = composite; /* * As per USB compliance update, a device that is actively drawing * more than 100mA from USB must report itself as bus-powered in * the GetStatus(DEVICE) call. */ if (CONFIG_USB_GADGET_VBUS_DRAW <= USB_SELF_POWER_VBUS_MAX_DRAW) usb_gadget_set_selfpowered(gadget); /* interface and string IDs start at zero via kzalloc. * we force endpoints to start unassigned; few controller * drivers will zero ep->driver_data. */ usb_ep_autoconfig_reset(cdev->gadget); /* composite gadget needs to assign strings for whole device (like * serial number), register function drivers, potentially update * power state and consumption, etc */ status = composite_gadget_bind(cdev); if (status < 0) goto fail; cdev->desc = *composite->dev; /* standardized runtime overrides for device ID data */ if (idVendor) cdev->desc.idVendor = cpu_to_le16(idVendor); if (idProduct) cdev->desc.idProduct = cpu_to_le16(idProduct); if (bcdDevice) cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); printk(KERN_DEBUG "usb: %s idVendor=0x%x, idProduct=0x%x\n", __func__, idVendor, idProduct); printk(KERN_DEBUG "usb: %s bcdDevice=0x%x\n", __func__, bcdDevice); /* string overrides */ if (iManufacturer || !cdev->desc.iManufacturer) { if (!iManufacturer && !composite->iManufacturer && !*composite_manufacturer) snprintf(composite_manufacturer, sizeof composite_manufacturer, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); cdev->manufacturer_override = override_id(cdev, &cdev->desc.iManufacturer); } printk(KERN_DEBUG "usb: %s composite_manufacturer=%s\n", __func__, composite_manufacturer); if (iProduct || (!cdev->desc.iProduct && composite->iProduct)) cdev->product_override = override_id(cdev, &cdev->desc.iProduct); if (iSerialNumber) cdev->serial_override = override_id(cdev, &cdev->desc.iSerialNumber); /* has userspace failed to provide a serial number? */ if (composite->needs_serial && !cdev->desc.iSerialNumber) WARNING(cdev, "userspace failed to provide iSerialNumber\n"); /* finish up */ status = device_create_file(&gadget->dev, &dev_attr_suspended); if (status) goto fail; INFO(cdev, "%s ready\n", composite->name); return 0; fail: composite_unbind(gadget); return status; } /*-------------------------------------------------------------------------*/ static void composite_suspend(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; /* REVISIT: should we have config level * suspend/resume callbacks? */ DBG(cdev, "suspend\n"); if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) { if (f->suspend) f->suspend(f); } } if (composite->suspend) composite->suspend(cdev); cdev->suspended = 1; usb_gadget_vbus_draw(gadget, 2); } static void composite_resume(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; u8 maxpower; /* REVISIT: should we have config level * suspend/resume callbacks? */ DBG(cdev, "resume\n"); if (composite->resume) composite->resume(cdev); if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) { if (f->resume) f->resume(f); } maxpower = cdev->config->bMaxPower; usb_gadget_vbus_draw(gadget, maxpower ? (2 * maxpower) : CONFIG_USB_GADGET_VBUS_DRAW); } cdev->suspended = 0; } /*-------------------------------------------------------------------------*/ static struct usb_gadget_driver composite_driver = { #ifdef CONFIG_USB_GADGET_SUPERSPEED .speed = USB_SPEED_SUPER, #else .speed = USB_SPEED_HIGH, #endif .unbind = composite_unbind, .setup = composite_setup, .disconnect = composite_disconnect, .suspend = composite_suspend, .resume = composite_resume, .driver = { .owner = THIS_MODULE, }, }; /** * usb_composite_probe() - register a composite driver * @driver: the driver to register * @bind: the callback used to allocate resources that are shared across the * whole device, such as string IDs, and add its configurations using * @usb_add_config(). This may fail by returning a negative errno * value; it should return zero on successful initialization. * Context: single threaded during gadget setup * * This function is used to register drivers using the composite driver * framework. The return value is zero, or a negative errno value. * Those values normally come from the driver's @bind method, which does * all the work of setting up the driver to match the hardware. * * On successful return, the gadget is ready to respond to requests from * the host, unless one of its components invokes usb_gadget_disconnect() * while it was binding. That would usually be done in order to wait for * some userspace participation. */ int usb_composite_probe(struct usb_composite_driver *driver, int (*bind)(struct usb_composite_dev *cdev)) { if (!driver || !driver->dev || !bind || composite) return -EINVAL; if (!driver->name) driver->name = "composite"; if (!driver->iProduct) driver->iProduct = driver->name; composite_driver.function = (char *) driver->name; composite_driver.driver.name = driver->name; composite = driver; composite_gadget_bind = bind; return usb_gadget_probe_driver(&composite_driver, composite_bind); } /** * usb_composite_unregister() - unregister a composite driver * @driver: the driver to unregister * * This function is used to unregister drivers using the composite * driver framework. */ void usb_composite_unregister(struct usb_composite_driver *driver) { if (composite != driver) return; usb_gadget_unregister_driver(&composite_driver); } /** * usb_composite_setup_continue() - Continue with the control transfer * @cdev: the composite device who's control transfer was kept waiting * * This function must be called by the USB function driver to continue * with the control transfer's data/status stage in case it had requested to * delay the data/status stages. A USB function's setup handler (e.g. set_alt()) * can request the composite framework to delay the setup request's data/status * stages by returning USB_GADGET_DELAYED_STATUS. */ void usb_composite_setup_continue(struct usb_composite_dev *cdev) { int value; struct usb_request *req = cdev->req; unsigned long flags; DBG(cdev, "%s\n", __func__); spin_lock_irqsave(&cdev->lock, flags); if (cdev->delayed_status == 0) { WARN(cdev, "%s: Unexpected call\n", __func__); } else if (--cdev->delayed_status == 0) { DBG(cdev, "%s: Completing delayed status\n", __func__); req->length = 0; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) { DBG(cdev, "ep_queue --> %d\n", value); req->status = 0; composite_setup_complete(cdev->gadget->ep0, req); } } spin_unlock_irqrestore(&cdev->lock, flags); }
gpl-2.0
fabianbergmark/linux-sctp
drivers/gpu/drm/msm/msm_atomic.c
46
7494
/* * Copyright (C) 2014 Red Hat * Author: Rob Clark <robdclark@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "msm_drv.h" #include "msm_kms.h" #include "msm_gem.h" #include "msm_fence.h" struct msm_commit { struct drm_device *dev; struct drm_atomic_state *state; struct work_struct work; uint32_t crtc_mask; }; static void commit_worker(struct work_struct *work); /* block until specified crtcs are no longer pending update, and * atomically mark them as pending update */ static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) { int ret; spin_lock(&priv->pending_crtcs_event.lock); ret = wait_event_interruptible_locked(priv->pending_crtcs_event, !(priv->pending_crtcs & crtc_mask)); if (ret == 0) { DBG("start: %08x", crtc_mask); priv->pending_crtcs |= crtc_mask; } spin_unlock(&priv->pending_crtcs_event.lock); return ret; } /* clear specified crtcs (no longer pending update) */ static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) { spin_lock(&priv->pending_crtcs_event.lock); DBG("end: %08x", crtc_mask); priv->pending_crtcs &= ~crtc_mask; wake_up_all_locked(&priv->pending_crtcs_event); spin_unlock(&priv->pending_crtcs_event.lock); } static struct msm_commit *commit_init(struct drm_atomic_state *state) { struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return NULL; c->dev = state->dev; c->state = state; INIT_WORK(&c->work, commit_worker); return c; } static void commit_destroy(struct msm_commit *c) { end_atomic(c->dev->dev_private, c->crtc_mask); kfree(c); } static void msm_atomic_wait_for_commit_done(struct drm_device *dev, struct drm_atomic_state *old_state) { struct drm_crtc *crtc; struct msm_drm_private *priv = old_state->dev->dev_private; struct msm_kms *kms = priv->kms; int ncrtcs = old_state->dev->mode_config.num_crtc; int i; for (i = 0; i < ncrtcs; i++) { crtc = old_state->crtcs[i]; if (!crtc) continue; if (!crtc->state->enable) continue; /* Legacy cursor ioctls are completely unsynced, and userspace * relies on that (by doing tons of cursor updates). */ if (old_state->legacy_cursor_update) continue; kms->funcs->wait_for_crtc_commit_done(kms, crtc); } } /* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ static void complete_commit(struct msm_commit *c, bool async) { struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; drm_atomic_helper_wait_for_fences(dev, state); kms->funcs->prepare_commit(kms, state); drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state, false); drm_atomic_helper_commit_modeset_enables(dev, state); /* NOTE: _wait_for_vblanks() only waits for vblank on * enabled CRTCs. So we end up faulting when disabling * due to (potentially) unref'ing the outgoing fb's * before the vblank when the disable has latched. * * But if it did wait on disabled (or newly disabled) * CRTCs, that would be racy (ie. we could have missed * the irq. We need some way to poll for pipe shut * down. Or just live with occasionally hitting the * timeout in the CRTC disable path (which really should * not be critical path) */ msm_atomic_wait_for_commit_done(dev, state); drm_atomic_helper_cleanup_planes(dev, state); kms->funcs->complete_commit(kms, state); drm_atomic_state_free(state); commit_destroy(c); } static void commit_worker(struct work_struct *work) { complete_commit(container_of(work, struct msm_commit, work), true); } int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { int ret; /* * msm ->atomic_check can update ->mode_changed for pixel format * changes, hence must be run before we check the modeset changes. */ ret = drm_atomic_helper_check_planes(dev, state); if (ret) return ret; ret = drm_atomic_helper_check_modeset(dev, state); if (ret) return ret; return ret; } /** * drm_atomic_helper_commit - commit validated state object * @dev: DRM device * @state: the driver state object * @nonblock: nonblocking commit * * This function commits a with drm_atomic_helper_check() pre-validated state * object. This can still fail when e.g. the framebuffer reservation fails. * * RETURNS * Zero for success or -errno. */ int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock) { struct msm_drm_private *priv = dev->dev_private; int nplanes = dev->mode_config.num_total_plane; int ncrtcs = dev->mode_config.num_crtc; struct msm_commit *c; int i, ret; ret = drm_atomic_helper_prepare_planes(dev, state); if (ret) return ret; c = commit_init(state); if (!c) { ret = -ENOMEM; goto error; } /* * Figure out what crtcs we have: */ for (i = 0; i < ncrtcs; i++) { struct drm_crtc *crtc = state->crtcs[i]; if (!crtc) continue; c->crtc_mask |= (1 << drm_crtc_index(crtc)); } /* * Figure out what fence to wait for: */ for (i = 0; i < nplanes; i++) { struct drm_plane *plane = state->planes[i]; struct drm_plane_state *new_state = state->plane_states[i]; if (!plane) continue; if ((plane->state->fb != new_state->fb) && new_state->fb) { struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0); struct msm_gem_object *msm_obj = to_msm_bo(obj); new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); } } /* * Wait for pending updates on any of the same crtc's and then * mark our set of crtc's as busy: */ ret = start_atomic(dev->dev_private, c->crtc_mask); if (ret) { kfree(c); goto error; } /* * This is the point of no return - everything below never fails except * when the hw goes bonghits. Which means we can commit the new state on * the software side now. */ drm_atomic_helper_swap_state(dev, state); /* * Everything below can be run asynchronously without the need to grab * any modeset locks at all under one conditions: It must be guaranteed * that the asynchronous work has either been cancelled (if the driver * supports it, which at least requires that the framebuffers get * cleaned up with drm_atomic_helper_cleanup_planes()) or completed * before the new state gets committed on the software side with * drm_atomic_helper_swap_state(). * * This scheme allows new atomic state updates to be prepared and * checked in parallel to the asynchronous completion of the previous * update. Which is important since compositors need to figure out the * composition of the next frame right after having submitted the * current layout. */ if (nonblock) { queue_work(priv->atomic_wq, &c->work); return 0; } complete_commit(c, false); return 0; error: drm_atomic_helper_cleanup_planes(dev, state); return ret; }
gpl-2.0
novaspirit/tf101-l4t-3.1
drivers/video/mb862xx/mb862xxfb_accel.c
46
8381
/* * drivers/mb862xx/mb862xxfb_accel.c * * Fujitsu Carmine/Coral-P(A)/Lime framebuffer driver acceleration support * * (C) 2007 Alexander Shishkin <virtuoso@slind.org> * (C) 2009 Valentin Sitdikov <valentin.sitdikov@siemens.com> * (C) 2009 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/fb.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #if defined(CONFIG_OF) #include <linux/of_platform.h> #endif #include "mb862xxfb.h" #include "mb862xx_reg.h" #include "mb862xxfb_accel.h" static void mb862xxfb_write_fifo(u32 count, u32 *data, struct fb_info *info) { struct mb862xxfb_par *par = info->par; static u32 free; u32 total = 0; while (total < count) { if (free) { outreg(geo, GDC_GEO_REG_INPUT_FIFO, data[total]); total++; free--; } else { free = (u32) inreg(draw, GDC_REG_FIFO_COUNT); } } } static void mb86290fb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { __u32 cmd[6]; cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP; /* Set raster operation */ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9); cmd[2] = GDC_TYPE_BLTCOPYP << 24; if (area->sx >= area->dx && area->sy >= area->dy) cmd[2] |= GDC_CMD_BLTCOPY_TOP_LEFT << 16; else if (area->sx >= area->dx && area->sy <= area->dy) cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_LEFT << 16; else if (area->sx <= area->dx && area->sy >= area->dy) cmd[2] |= GDC_CMD_BLTCOPY_TOP_RIGHT << 16; else cmd[2] |= GDC_CMD_BLTCOPY_BOTTOM_RIGHT << 16; cmd[3] = (area->sy << 16) | area->sx; cmd[4] = (area->dy << 16) | area->dx; cmd[5] = (area->height << 16) | area->width; mb862xxfb_write_fifo(6, cmd, info); } /* * Fill in the cmd array /GDC FIFO commands/ to draw a 1bit image. * Make sure cmd has enough room! */ static void mb86290fb_imageblit1(u32 *cmd, u16 step, u16 dx, u16 dy, u16 width, u16 height, u32 fgcolor, u32 bgcolor, const struct fb_image *image, struct fb_info *info) { int i; unsigned const char *line; u16 bytes; /* set colors and raster operation regs */ cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP; /* Set raster operation */ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9); cmd[2] = (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16); cmd[3] = fgcolor; cmd[4] = (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_BACK_COLOR << 16); cmd[5] = bgcolor; i = 0; line = image->data; bytes = (image->width + 7) >> 3; /* and the image */ cmd[6] = (GDC_TYPE_DRAWBITMAPP << 24) | (GDC_CMD_BITMAP << 16) | (2 + (step * height)); cmd[7] = (dy << 16) | dx; cmd[8] = (height << 16) | width; while (i < height) { memcpy(&cmd[9 + i * step], line, step << 2); #ifdef __LITTLE_ENDIAN { int k = 0; for (k = 0; k < step; k++) cmd[9 + i * step + k] = cpu_to_be32(cmd[9 + i * step + k]); } #endif line += bytes; i++; } } /* * Fill in the cmd array /GDC FIFO commands/ to draw a 8bit image. * Make sure cmd has enough room! */ static void mb86290fb_imageblit8(u32 *cmd, u16 step, u16 dx, u16 dy, u16 width, u16 height, u32 fgcolor, u32 bgcolor, const struct fb_image *image, struct fb_info *info) { int i, j; unsigned const char *line, *ptr; u16 bytes; cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) | (GDC_CMD_BLT_DRAW << 16) | (2 + (height * step)); cmd[1] = (dy << 16) | dx; cmd[2] = (height << 16) | width; i = 0; line = ptr = image->data; bytes = image->width; while (i < height) { ptr = line; for (j = 0; j < step; j++) { cmd[3 + i * step + j] = (((u32 *) (info->pseudo_palette))[*ptr]) & 0xffff; ptr++; cmd[3 + i * step + j] |= ((((u32 *) (info-> pseudo_palette))[*ptr]) & 0xffff) << 16; ptr++; } line += bytes; i++; } } /* * Fill in the cmd array /GDC FIFO commands/ to draw a 16bit image. * Make sure cmd has enough room! */ static void mb86290fb_imageblit16(u32 *cmd, u16 step, u16 dx, u16 dy, u16 width, u16 height, u32 fgcolor, u32 bgcolor, const struct fb_image *image, struct fb_info *info) { int i; unsigned const char *line; u16 bytes; i = 0; line = image->data; bytes = image->width << 1; cmd[0] = (GDC_TYPE_DRAWBITMAPP << 24) | (GDC_CMD_BLT_DRAW << 16) | (2 + step * height); cmd[1] = (dy << 16) | dx; cmd[2] = (height << 16) | width; while (i < height) { memcpy(&cmd[3 + i * step], line, step); line += bytes; i++; } } static void mb86290fb_imageblit(struct fb_info *info, const struct fb_image *image) { int mdr; u32 *cmd = NULL; void (*cmdfn) (u32 *, u16, u16, u16, u16, u16, u32, u32, const struct fb_image *, struct fb_info *) = NULL; u32 cmdlen; u32 fgcolor = 0, bgcolor = 0; u16 step; u16 width = image->width, height = image->height; u16 dx = image->dx, dy = image->dy; int x2, y2, vxres, vyres; mdr = (GDC_ROP_COPY << 9); x2 = image->dx + image->width; y2 = image->dy + image->height; vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; x2 = min(x2, vxres); y2 = min(y2, vyres); width = x2 - dx; height = y2 - dy; switch (image->depth) { case 1: step = (width + 31) >> 5; cmdlen = 9 + height * step; cmdfn = mb86290fb_imageblit1; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fgcolor = ((u32 *) (info->pseudo_palette))[image->fg_color]; bgcolor = ((u32 *) (info->pseudo_palette))[image->bg_color]; } else { fgcolor = image->fg_color; bgcolor = image->bg_color; } break; case 8: step = (width + 1) >> 1; cmdlen = 3 + height * step; cmdfn = mb86290fb_imageblit8; break; case 16: step = (width + 1) >> 1; cmdlen = 3 + height * step; cmdfn = mb86290fb_imageblit16; break; default: cfb_imageblit(info, image); return; } cmd = kmalloc(cmdlen * 4, GFP_DMA); if (!cmd) return cfb_imageblit(info, image); cmdfn(cmd, step, dx, dy, width, height, fgcolor, bgcolor, image, info); mb862xxfb_write_fifo(cmdlen, cmd, info); kfree(cmd); } static void mb86290fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { u32 x2, y2, vxres, vyres, height, width, fg; u32 cmd[7]; vxres = info->var.xres_virtual; vyres = info->var.yres_virtual; if (!rect->width || !rect->height || rect->dx > vxres || rect->dy > vyres) return; /* We could use hardware clipping but on many cards you get around * hardware clipping by writing to framebuffer directly. */ x2 = rect->dx + rect->width; y2 = rect->dy + rect->height; x2 = min(x2, vxres); y2 = min(y2, vyres); width = x2 - rect->dx; height = y2 - rect->dy; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) fg = ((u32 *) (info->pseudo_palette))[rect->color]; else fg = rect->color; switch (rect->rop) { case ROP_XOR: /* Set raster operation */ cmd[1] = (2 << 7) | (GDC_ROP_XOR << 9); break; case ROP_COPY: /* Set raster operation */ cmd[1] = (2 << 7) | (GDC_ROP_COPY << 9); break; } cmd[0] = (GDC_TYPE_SETREGISTER << 24) | (1 << 16) | GDC_REG_MODE_BITMAP; /* cmd[1] set earlier */ cmd[2] = (GDC_TYPE_SETCOLORREGISTER << 24) | (GDC_CMD_BODY_FORE_COLOR << 16); cmd[3] = fg; cmd[4] = (GDC_TYPE_DRAWRECTP << 24) | (GDC_CMD_BLT_FILL << 16); cmd[5] = (rect->dy << 16) | (rect->dx); cmd[6] = (height << 16) | width; mb862xxfb_write_fifo(7, cmd, info); } void mb862xxfb_init_accel(struct fb_info *info, int xres) { struct mb862xxfb_par *par = info->par; if (info->var.bits_per_pixel == 32) { info->fbops->fb_fillrect = cfb_fillrect; info->fbops->fb_copyarea = cfb_copyarea; info->fbops->fb_imageblit = cfb_imageblit; } else { outreg(disp, GC_L0EM, 3); info->fbops->fb_fillrect = mb86290fb_fillrect; info->fbops->fb_copyarea = mb86290fb_copyarea; info->fbops->fb_imageblit = mb86290fb_imageblit; } outreg(draw, GDC_REG_DRAW_BASE, 0); outreg(draw, GDC_REG_MODE_MISC, 0x8000); outreg(draw, GDC_REG_X_RESOLUTION, xres); info->flags |= FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; info->fix.accel = 0xff; /*FIXME: add right define */ } EXPORT_SYMBOL(mb862xxfb_init_accel);
gpl-2.0
tamland/xbmc
lib/ffmpeg/libpostproc/postprocess_altivec_template.c
46
54274
/* * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org> * * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at) * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/avutil.h" #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \ do { \ __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \ __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \ __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \ __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \ tempA1 = vec_mergeh (src_a, src_e); \ tempB1 = vec_mergel (src_a, src_e); \ tempC1 = vec_mergeh (src_b, src_f); \ tempD1 = vec_mergel (src_b, src_f); \ tempE1 = vec_mergeh (src_c, src_g); \ tempF1 = vec_mergel (src_c, src_g); \ tempG1 = vec_mergeh (src_d, src_h); \ tempH1 = vec_mergel (src_d, src_h); \ tempA2 = vec_mergeh (tempA1, tempE1); \ tempB2 = vec_mergel (tempA1, tempE1); \ tempC2 = vec_mergeh (tempB1, tempF1); \ tempD2 = vec_mergel (tempB1, tempF1); \ tempE2 = vec_mergeh (tempC1, tempG1); \ tempF2 = vec_mergel (tempC1, tempG1); \ tempG2 = vec_mergeh (tempD1, tempH1); \ tempH2 = vec_mergel (tempD1, tempH1); \ src_a = vec_mergeh (tempA2, tempE2); \ src_b = vec_mergel (tempA2, tempE2); \ src_c = vec_mergeh (tempB2, tempF2); \ src_d = vec_mergel (tempB2, tempF2); \ src_e = vec_mergeh (tempC2, tempG2); \ src_f = vec_mergel (tempC2, tempG2); \ src_g = vec_mergeh (tempD2, tempH2); \ src_h = vec_mergel (tempD2, tempH2); \ } while (0) static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) { /* this code makes no assumption on src or stride. One could remove the recomputation of the perm vector by assuming (stride % 16) == 0, unfortunately this is not always true. */ short data_0 = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1; DECLARE_ALIGNED(16, short, data)[8] = { data_0, data_0 * 2 + 1, c->QP * 2, c->QP * 4 }; int numEq; uint8_t *src2 = src; vector signed short v_dcOffset; vector signed short v2QP; vector unsigned short v4QP; vector unsigned short v_dcThreshold; const int properStride = (stride % 16); const int srcAlign = ((unsigned long)src2 % 16); const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0; const vector signed int zero = vec_splat_s32(0); const vector signed short mask = vec_splat_s16(1); vector signed int v_numEq = vec_splat_s32(0); vector signed short v_data = vec_ld(0, data); vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3, v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7; //FIXME avoid this mess if possible register int j0 = 0, j1 = stride, j2 = 2 * stride, j3 = 3 * stride, j4 = 4 * stride, j5 = 5 * stride, j6 = 6 * stride, j7 = 7 * stride; vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3, v_srcA4, v_srcA5, v_srcA6, v_srcA7; v_dcOffset = vec_splat(v_data, 0); v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1); v2QP = vec_splat(v_data, 2); v4QP = (vector unsigned short)vec_splat(v_data, 3); src2 += stride * 4; #define LOAD_LINE(i) \ { \ vector unsigned char perm##i = vec_lvsl(j##i, src2); \ vector unsigned char v_srcA2##i; \ vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \ if (two_vectors) \ v_srcA2##i = vec_ld(j##i + 16, src2); \ v_srcA##i = \ vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \ v_srcAss##i = \ (vector signed short)vec_mergeh((vector signed char)zero, \ (vector signed char)v_srcA##i); } #define LOAD_LINE_ALIGNED(i) \ v_srcA##i = vec_ld(j##i, src2); \ v_srcAss##i = \ (vector signed short)vec_mergeh((vector signed char)zero, \ (vector signed char)v_srcA##i) /* Special-casing the aligned case is worthwhile, as all calls from * the (transposed) horizontable deblocks will be aligned, in addition * to the naturally aligned vertical deblocks. */ if (properStride && srcAlign) { LOAD_LINE_ALIGNED(0); LOAD_LINE_ALIGNED(1); LOAD_LINE_ALIGNED(2); LOAD_LINE_ALIGNED(3); LOAD_LINE_ALIGNED(4); LOAD_LINE_ALIGNED(5); LOAD_LINE_ALIGNED(6); LOAD_LINE_ALIGNED(7); } else { LOAD_LINE(0); LOAD_LINE(1); LOAD_LINE(2); LOAD_LINE(3); LOAD_LINE(4); LOAD_LINE(5); LOAD_LINE(6); LOAD_LINE(7); } #undef LOAD_LINE #undef LOAD_LINE_ALIGNED #define ITER(i, j) \ const vector signed short v_diff##i = \ vec_sub(v_srcAss##i, v_srcAss##j); \ const vector signed short v_sum##i = \ vec_add(v_diff##i, v_dcOffset); \ const vector signed short v_comp##i = \ (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \ v_dcThreshold); \ const vector signed short v_part##i = vec_and(mask, v_comp##i); { ITER(0, 1) ITER(1, 2) ITER(2, 3) ITER(3, 4) ITER(4, 5) ITER(5, 6) ITER(6, 7) v_numEq = vec_sum4s(v_part0, v_numEq); v_numEq = vec_sum4s(v_part1, v_numEq); v_numEq = vec_sum4s(v_part2, v_numEq); v_numEq = vec_sum4s(v_part3, v_numEq); v_numEq = vec_sum4s(v_part4, v_numEq); v_numEq = vec_sum4s(v_part5, v_numEq); v_numEq = vec_sum4s(v_part6, v_numEq); } #undef ITER v_numEq = vec_sums(v_numEq, zero); v_numEq = vec_splat(v_numEq, 3); vec_ste(v_numEq, 0, &numEq); if (numEq > c->ppMode.flatnessThreshold){ const vector unsigned char mmoP1 = (const vector unsigned char) {0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B}; const vector unsigned char mmoP2 = (const vector unsigned char) {0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f}; const vector unsigned char mmoP = (const vector unsigned char) vec_lvsl(8, (unsigned char*)0); vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1); vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2); vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP); vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1); vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2); vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP); vector signed short mmoDiff = vec_sub(mmoL, mmoR); vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP); if (vec_any_gt(mmoSum, v4QP)) return 0; else return 1; } else return 2; } static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) { /* this code makes no assumption on src or stride. One could remove the recomputation of the perm vector by assuming (stride % 16) == 0, unfortunately this is not always true. Quite a lot of load/stores can be removed by assuming proper alignment of src & stride :-( */ uint8_t *src2 = src; const vector signed int zero = vec_splat_s32(0); const int properStride = (stride % 16); const int srcAlign = ((unsigned long)src2 % 16); DECLARE_ALIGNED(16, short, qp)[8] = {c->QP}; vector signed short vqp = vec_ld(0, qp); vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9; vector unsigned char vbA0, av_uninit(vbA1), av_uninit(vbA2), av_uninit(vbA3), av_uninit(vbA4), av_uninit(vbA5), av_uninit(vbA6), av_uninit(vbA7), av_uninit(vbA8), vbA9; vector unsigned char vbB0, av_uninit(vbB1), av_uninit(vbB2), av_uninit(vbB3), av_uninit(vbB4), av_uninit(vbB5), av_uninit(vbB6), av_uninit(vbB7), av_uninit(vbB8), vbB9; vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9; vector unsigned char perml0, perml1, perml2, perml3, perml4, perml5, perml6, perml7, perml8, perml9; register int j0 = 0, j1 = stride, j2 = 2 * stride, j3 = 3 * stride, j4 = 4 * stride, j5 = 5 * stride, j6 = 6 * stride, j7 = 7 * stride, j8 = 8 * stride, j9 = 9 * stride; vqp = vec_splat(vqp, 0); src2 += stride*3; #define LOAD_LINE(i) \ perml##i = vec_lvsl(i * stride, src2); \ vbA##i = vec_ld(i * stride, src2); \ vbB##i = vec_ld(i * stride + 16, src2); \ vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \ vb##i = \ (vector signed short)vec_mergeh((vector unsigned char)zero, \ (vector unsigned char)vbT##i) #define LOAD_LINE_ALIGNED(i) \ vbT##i = vec_ld(j##i, src2); \ vb##i = \ (vector signed short)vec_mergeh((vector signed char)zero, \ (vector signed char)vbT##i) /* Special-casing the aligned case is worthwhile, as all calls from * the (transposed) horizontable deblocks will be aligned, in addition * to the naturally aligned vertical deblocks. */ if (properStride && srcAlign) { LOAD_LINE_ALIGNED(0); LOAD_LINE_ALIGNED(1); LOAD_LINE_ALIGNED(2); LOAD_LINE_ALIGNED(3); LOAD_LINE_ALIGNED(4); LOAD_LINE_ALIGNED(5); LOAD_LINE_ALIGNED(6); LOAD_LINE_ALIGNED(7); LOAD_LINE_ALIGNED(8); LOAD_LINE_ALIGNED(9); } else { LOAD_LINE(0); LOAD_LINE(1); LOAD_LINE(2); LOAD_LINE(3); LOAD_LINE(4); LOAD_LINE(5); LOAD_LINE(6); LOAD_LINE(7); LOAD_LINE(8); LOAD_LINE(9); } #undef LOAD_LINE #undef LOAD_LINE_ALIGNED { const vector unsigned short v_2 = vec_splat_u16(2); const vector unsigned short v_4 = vec_splat_u16(4); const vector signed short v_diff01 = vec_sub(vb0, vb1); const vector unsigned short v_cmp01 = (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp); const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01); const vector signed short v_diff89 = vec_sub(vb8, vb9); const vector unsigned short v_cmp89 = (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp); const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89); const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1); const vector signed short temp02 = vec_add(vb2, vb3); const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4); const vector signed short v_sumsB0 = vec_add(temp02, temp03); const vector signed short temp11 = vec_sub(v_sumsB0, v_first); const vector signed short v_sumsB1 = vec_add(temp11, vb4); const vector signed short temp21 = vec_sub(v_sumsB1, v_first); const vector signed short v_sumsB2 = vec_add(temp21, vb5); const vector signed short temp31 = vec_sub(v_sumsB2, v_first); const vector signed short v_sumsB3 = vec_add(temp31, vb6); const vector signed short temp41 = vec_sub(v_sumsB3, v_first); const vector signed short v_sumsB4 = vec_add(temp41, vb7); const vector signed short temp51 = vec_sub(v_sumsB4, vb1); const vector signed short v_sumsB5 = vec_add(temp51, vb8); const vector signed short temp61 = vec_sub(v_sumsB5, vb2); const vector signed short v_sumsB6 = vec_add(temp61, v_last); const vector signed short temp71 = vec_sub(v_sumsB6, vb3); const vector signed short v_sumsB7 = vec_add(temp71, v_last); const vector signed short temp81 = vec_sub(v_sumsB7, vb4); const vector signed short v_sumsB8 = vec_add(temp81, v_last); const vector signed short temp91 = vec_sub(v_sumsB8, vb5); const vector signed short v_sumsB9 = vec_add(temp91, v_last); #define COMPUTE_VR(i, j, k) \ const vector signed short temps1##i = \ vec_add(v_sumsB##i, v_sumsB##k); \ const vector signed short temps2##i = \ vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \ const vector signed short vr##j = vec_sra(temps2##i, v_4) COMPUTE_VR(0, 1, 2); COMPUTE_VR(1, 2, 3); COMPUTE_VR(2, 3, 4); COMPUTE_VR(3, 4, 5); COMPUTE_VR(4, 5, 6); COMPUTE_VR(5, 6, 7); COMPUTE_VR(6, 7, 8); COMPUTE_VR(7, 8, 9); const vector signed char neg1 = vec_splat_s8(-1); const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; #define PACK_AND_STORE(i) \ { const vector unsigned char perms##i = \ vec_lvsr(i * stride, src2); \ const vector unsigned char vf##i = \ vec_packsu(vr##i, (vector signed short)zero); \ const vector unsigned char vg##i = \ vec_perm(vf##i, vbT##i, permHH); \ const vector unsigned char mask##i = \ vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \ const vector unsigned char vg2##i = \ vec_perm(vg##i, vg##i, perms##i); \ const vector unsigned char svA##i = \ vec_sel(vbA##i, vg2##i, mask##i); \ const vector unsigned char svB##i = \ vec_sel(vg2##i, vbB##i, mask##i); \ vec_st(svA##i, i * stride, src2); \ vec_st(svB##i, i * stride + 16, src2);} #define PACK_AND_STORE_ALIGNED(i) \ { const vector unsigned char vf##i = \ vec_packsu(vr##i, (vector signed short)zero); \ const vector unsigned char vg##i = \ vec_perm(vf##i, vbT##i, permHH); \ vec_st(vg##i, i * stride, src2);} /* Special-casing the aligned case is worthwhile, as all calls from * the (transposed) horizontable deblocks will be aligned, in addition * to the naturally aligned vertical deblocks. */ if (properStride && srcAlign) { PACK_AND_STORE_ALIGNED(1) PACK_AND_STORE_ALIGNED(2) PACK_AND_STORE_ALIGNED(3) PACK_AND_STORE_ALIGNED(4) PACK_AND_STORE_ALIGNED(5) PACK_AND_STORE_ALIGNED(6) PACK_AND_STORE_ALIGNED(7) PACK_AND_STORE_ALIGNED(8) } else { PACK_AND_STORE(1) PACK_AND_STORE(2) PACK_AND_STORE(3) PACK_AND_STORE(4) PACK_AND_STORE(5) PACK_AND_STORE(6) PACK_AND_STORE(7) PACK_AND_STORE(8) } #undef PACK_AND_STORE #undef PACK_AND_STORE_ALIGNED } } static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) { /* this code makes no assumption on src or stride. One could remove the recomputation of the perm vector by assuming (stride % 16) == 0, unfortunately this is not always true. Quite a lot of load/stores can be removed by assuming proper alignment of src & stride :-( */ uint8_t *src2 = src + stride*3; const vector signed int zero = vec_splat_s32(0); DECLARE_ALIGNED(16, short, qp)[8] = {8*c->QP}; vector signed short vqp = vec_splat( (vector signed short)vec_ld(0, qp), 0); #define LOAD_LINE(i) \ const vector unsigned char perm##i = \ vec_lvsl(i * stride, src2); \ const vector unsigned char vbA##i = \ vec_ld(i * stride, src2); \ const vector unsigned char vbB##i = \ vec_ld(i * stride + 16, src2); \ const vector unsigned char vbT##i = \ vec_perm(vbA##i, vbB##i, perm##i); \ const vector signed short vb##i = \ (vector signed short)vec_mergeh((vector unsigned char)zero, \ (vector unsigned char)vbT##i) LOAD_LINE(1); LOAD_LINE(2); LOAD_LINE(3); LOAD_LINE(4); LOAD_LINE(5); LOAD_LINE(6); LOAD_LINE(7); LOAD_LINE(8); #undef LOAD_LINE const vector signed short v_1 = vec_splat_s16(1); const vector signed short v_2 = vec_splat_s16(2); const vector signed short v_5 = vec_splat_s16(5); const vector signed short v_32 = vec_sl(v_1, (vector unsigned short)v_5); /* middle energy */ const vector signed short l3minusl6 = vec_sub(vb3, vb6); const vector signed short l5minusl4 = vec_sub(vb5, vb4); const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero); const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6); const vector signed short absmE = vec_abs(mE); /* left & right energy */ const vector signed short l1minusl4 = vec_sub(vb1, vb4); const vector signed short l3minusl2 = vec_sub(vb3, vb2); const vector signed short l5minusl8 = vec_sub(vb5, vb8); const vector signed short l7minusl6 = vec_sub(vb7, vb6); const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero); const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero); const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4); const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8); /* d */ const vector signed short ddiff = vec_sub(absmE, vec_min(vec_abs(lE), vec_abs(rE))); const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero); const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32); const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6)); const vector signed short minusd = vec_sub((vector signed short)zero, d); const vector signed short finald = vec_sel(minusd, d, vec_cmpgt(vec_sub((vector signed short)zero, mE), (vector signed short)zero)); /* q */ const vector signed short qtimes2 = vec_sub(vb4, vb5); /* for a shift right to behave like /2, we need to add one to all negative integer */ const vector signed short rounddown = vec_sel((vector signed short)zero, v_1, vec_cmplt(qtimes2, (vector signed short)zero)); const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1)); /* clamp */ const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald); const vector signed short dclamp_P = vec_min(dclamp_P1, q); const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald); const vector signed short dclamp_N = vec_max(dclamp_N1, q); const vector signed short dclampedfinal = vec_sel(dclamp_N, dclamp_P, vec_cmpgt(q, (vector signed short)zero)); const vector signed short dornotd = vec_sel((vector signed short)zero, dclampedfinal, vec_cmplt(absmE, vqp)); /* add/subtract to l4 and l5 */ const vector signed short vb4minusd = vec_sub(vb4, dornotd); const vector signed short vb5plusd = vec_add(vb5, dornotd); /* finally, stores */ const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero); const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero); const vector signed char neg1 = vec_splat_s8(-1); const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; #define STORE(i) \ { const vector unsigned char perms##i = \ vec_lvsr(i * stride, src2); \ const vector unsigned char vg##i = \ vec_perm(st##i, vbT##i, permHH); \ const vector unsigned char mask##i = \ vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \ const vector unsigned char vg2##i = \ vec_perm(vg##i, vg##i, perms##i); \ const vector unsigned char svA##i = \ vec_sel(vbA##i, vg2##i, mask##i); \ const vector unsigned char svB##i = \ vec_sel(vg2##i, vbB##i, mask##i); \ vec_st(svA##i, i * stride, src2); \ vec_st(svB##i, i * stride + 16, src2);} STORE(4) STORE(5) } static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) { const vector signed int vsint32_8 = vec_splat_s32(8); const vector unsigned int vuint32_4 = vec_splat_u32(4); const vector signed char neg1 = vec_splat_s8(-1); const vector unsigned char permA1 = (vector unsigned char) {0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F}; const vector unsigned char permA2 = (vector unsigned char) {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11, 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F}; const vector unsigned char permA1inc = (vector unsigned char) {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; const vector unsigned char permA2inc = (vector unsigned char) {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; const vector unsigned char magic = (vector unsigned char) {0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; const vector unsigned char extractPerm = (vector unsigned char) {0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01}; const vector unsigned char extractPermInc = (vector unsigned char) {0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01}; const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0); const vector unsigned char tenRight = (vector unsigned char) {0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; const vector unsigned char eightLeft = (vector unsigned char) {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08}; /* this code makes no assumption on src or stride. One could remove the recomputation of the perm vector by assuming (stride % 16) == 0, unfortunately this is not always true. Quite a lot of load/stores can be removed by assuming proper alignment of src & stride :-( */ uint8_t *srcCopy = src; DECLARE_ALIGNED(16, uint8_t, dt)[16] = { deringThreshold }; const vector signed int zero = vec_splat_s32(0); vector unsigned char v_dt = vec_splat(vec_ld(0, dt), 0); #define LOAD_LINE(i) \ const vector unsigned char perm##i = \ vec_lvsl(i * stride, srcCopy); \ vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \ vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \ vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i) LOAD_LINE(0); LOAD_LINE(1); LOAD_LINE(2); LOAD_LINE(3); LOAD_LINE(4); LOAD_LINE(5); LOAD_LINE(6); LOAD_LINE(7); LOAD_LINE(8); LOAD_LINE(9); #undef LOAD_LINE vector unsigned char v_avg; DECLARE_ALIGNED(16, signed int, S)[8]; DECLARE_ALIGNED(16, int, tQP2)[4] = { c->QP/2 + 1 }; vector signed int vQP2 = vec_ld(0, tQP2); vQP2 = vec_splat(vQP2, 0); { const vector unsigned char trunc_perm = (vector unsigned char) {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}; const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm); const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm); const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm); const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm); #define EXTRACT(op) do { \ const vector unsigned char s_1 = vec_##op(trunc_src12, trunc_src34); \ const vector unsigned char s_2 = vec_##op(trunc_src56, trunc_src78); \ const vector unsigned char s_6 = vec_##op(s_1, s_2); \ const vector unsigned char s_8h = vec_mergeh(s_6, s_6); \ const vector unsigned char s_8l = vec_mergel(s_6, s_6); \ const vector unsigned char s_9 = vec_##op(s_8h, s_8l); \ const vector unsigned char s_9h = vec_mergeh(s_9, s_9); \ const vector unsigned char s_9l = vec_mergel(s_9, s_9); \ const vector unsigned char s_10 = vec_##op(s_9h, s_9l); \ const vector unsigned char s_10h = vec_mergeh(s_10, s_10); \ const vector unsigned char s_10l = vec_mergel(s_10, s_10); \ const vector unsigned char s_11 = vec_##op(s_10h, s_10l); \ const vector unsigned char s_11h = vec_mergeh(s_11, s_11); \ const vector unsigned char s_11l = vec_mergel(s_11, s_11); \ v_##op = vec_##op(s_11h, s_11l); \ } while (0) vector unsigned char v_min; vector unsigned char v_max; EXTRACT(min); EXTRACT(max); #undef EXTRACT if (vec_all_lt(vec_sub(v_max, v_min), v_dt)) return; v_avg = vec_avg(v_min, v_max); } { const vector unsigned short mask1 = (vector unsigned short) {0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080}; const vector unsigned short mask2 = (vector unsigned short) {0x0100, 0x0200, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}; const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4)); const vector unsigned int vuint32_1 = vec_splat_u32(1); vector signed int sumA2; vector signed int sumB2; vector signed int sum0, sum1, sum2, sum3, sum4; vector signed int sum5, sum6, sum7, sum8, sum9; #define COMPARE(i) \ do { \ const vector unsigned char cmp = \ (vector unsigned char)vec_cmpgt(src##i, v_avg); \ const vector unsigned short cmpHi = \ (vector unsigned short)vec_mergeh(cmp, cmp); \ const vector unsigned short cmpLi = \ (vector unsigned short)vec_mergel(cmp, cmp); \ const vector signed short cmpHf = \ (vector signed short)vec_and(cmpHi, mask1); \ const vector signed short cmpLf = \ (vector signed short)vec_and(cmpLi, mask2); \ const vector signed int sump = vec_sum4s(cmpHf, zero); \ const vector signed int sumq = vec_sum4s(cmpLf, sump); \ sum##i = vec_sums(sumq, zero); \ } while (0) COMPARE(0); COMPARE(1); COMPARE(2); COMPARE(3); COMPARE(4); COMPARE(5); COMPARE(6); COMPARE(7); COMPARE(8); COMPARE(9); #undef COMPARE { const vector signed int sump02 = vec_mergel(sum0, sum2); const vector signed int sump13 = vec_mergel(sum1, sum3); const vector signed int sumA = vec_mergel(sump02, sump13); const vector signed int sump46 = vec_mergel(sum4, sum6); const vector signed int sump57 = vec_mergel(sum5, sum7); const vector signed int sumB = vec_mergel(sump46, sump57); const vector signed int sump8A = vec_mergel(sum8, zero); const vector signed int sump9B = vec_mergel(sum9, zero); const vector signed int sumC = vec_mergel(sump8A, sump9B); const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16); const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16); const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16); const vector signed int t2A = vec_or(sumA, tA); const vector signed int t2B = vec_or(sumB, tB); const vector signed int t2C = vec_or(sumC, tC); const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1), vec_sl(t2A, vuint32_1)); const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1), vec_sl(t2B, vuint32_1)); const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1), vec_sl(t2C, vuint32_1)); const vector signed int yA = vec_and(t2A, t3A); const vector signed int yB = vec_and(t2B, t3B); const vector signed int yC = vec_and(t2C, t3C); const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0); const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0); const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1); const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2); const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1); const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2); const vector signed int sumAp = vec_and(yA, vec_and(sumAd4,sumAd8)); const vector signed int sumBp = vec_and(yB, vec_and(sumBd4,sumBd8)); sumA2 = vec_or(sumAp, vec_sra(sumAp, vuint32_16)); sumB2 = vec_or(sumBp, vec_sra(sumBp, vuint32_16)); } vec_st(sumA2, 0, S); vec_st(sumB2, 16, S); } /* I'm not sure the following is actually faster than straight, unvectorized C code :-( */ #define F_INIT() \ vector unsigned char tenRightM = tenRight; \ vector unsigned char permA1M = permA1; \ vector unsigned char permA2M = permA2; \ vector unsigned char extractPermM = extractPerm #define F2(i, j, k, l) \ if (S[i] & (1 << (l+1))) { \ const vector unsigned char a_A = vec_perm(src##i, src##j, permA1M); \ const vector unsigned char a_B = vec_perm(a_A, src##k, permA2M); \ const vector signed int a_sump = \ (vector signed int)vec_msum(a_B, magic, (vector unsigned int)zero);\ vector signed int F = vec_sr(vec_sums(a_sump, vsint32_8), vuint32_4); \ const vector signed int p = \ (vector signed int)vec_perm(src##j, (vector unsigned char)zero, \ extractPermM); \ const vector signed int sum = vec_add(p, vQP2); \ const vector signed int diff = vec_sub(p, vQP2); \ vector signed int newpm; \ vector unsigned char newpm2, mask; \ F = vec_splat(F, 3); \ if (vec_all_lt(sum, F)) \ newpm = sum; \ else if (vec_all_gt(diff, F)) \ newpm = diff; \ else newpm = F; \ newpm2 = vec_splat((vector unsigned char)newpm, 15); \ mask = vec_add(identity, tenRightM); \ src##j = vec_perm(src##j, newpm2, mask); \ } \ permA1M = vec_add(permA1M, permA1inc); \ permA2M = vec_add(permA2M, permA2inc); \ tenRightM = vec_sro(tenRightM, eightLeft); \ extractPermM = vec_add(extractPermM, extractPermInc) #define ITER(i, j, k) do { \ F_INIT(); \ F2(i, j, k, 0); \ F2(i, j, k, 1); \ F2(i, j, k, 2); \ F2(i, j, k, 3); \ F2(i, j, k, 4); \ F2(i, j, k, 5); \ F2(i, j, k, 6); \ F2(i, j, k, 7); \ } while (0) ITER(0, 1, 2); ITER(1, 2, 3); ITER(2, 3, 4); ITER(3, 4, 5); ITER(4, 5, 6); ITER(5, 6, 7); ITER(6, 7, 8); ITER(7, 8, 9); #define STORE_LINE(i) do { \ const vector unsigned char permST = \ vec_lvsr(i * stride, srcCopy); \ const vector unsigned char maskST = \ vec_perm((vector unsigned char)zero, \ (vector unsigned char)neg1, permST); \ src##i = vec_perm(src##i ,src##i, permST); \ sA##i= vec_sel(sA##i, src##i, maskST); \ sB##i= vec_sel(src##i, sB##i, maskST); \ vec_st(sA##i, i * stride, srcCopy); \ vec_st(sB##i, i * stride + 16, srcCopy); \ } while (0) STORE_LINE(1); STORE_LINE(2); STORE_LINE(3); STORE_LINE(4); STORE_LINE(5); STORE_LINE(6); STORE_LINE(7); STORE_LINE(8); #undef STORE_LINE #undef ITER #undef F2 } #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a) #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a) #define do_a_deblock_altivec(a...) do_a_deblock_C(a) static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride, uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise) { const vector signed char neg1 = vec_splat_s8(-1); const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; const vector signed int zero = vec_splat_s32(0); const vector signed short vsint16_1 = vec_splat_s16(1); vector signed int v_dp = zero; vector signed int v_sysdp = zero; int d, sysd, i; #define LOAD_LINE(src, i) \ register int j##src##i = i * stride; \ vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \ const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \ const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \ const vector unsigned char v_##src##A##i = \ vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \ vector signed short v_##src##Ass##i = \ (vector signed short)vec_mergeh((vector signed char)zero, \ (vector signed char)v_##src##A##i) LOAD_LINE(src, 0); LOAD_LINE(src, 1); LOAD_LINE(src, 2); LOAD_LINE(src, 3); LOAD_LINE(src, 4); LOAD_LINE(src, 5); LOAD_LINE(src, 6); LOAD_LINE(src, 7); LOAD_LINE(tempBlurred, 0); LOAD_LINE(tempBlurred, 1); LOAD_LINE(tempBlurred, 2); LOAD_LINE(tempBlurred, 3); LOAD_LINE(tempBlurred, 4); LOAD_LINE(tempBlurred, 5); LOAD_LINE(tempBlurred, 6); LOAD_LINE(tempBlurred, 7); #undef LOAD_LINE #define ACCUMULATE_DIFFS(i) do { \ vector signed short v_d = vec_sub(v_tempBlurredAss##i, \ v_srcAss##i); \ v_dp = vec_msums(v_d, v_d, v_dp); \ v_sysdp = vec_msums(v_d, vsint16_1, v_sysdp); \ } while (0) ACCUMULATE_DIFFS(0); ACCUMULATE_DIFFS(1); ACCUMULATE_DIFFS(2); ACCUMULATE_DIFFS(3); ACCUMULATE_DIFFS(4); ACCUMULATE_DIFFS(5); ACCUMULATE_DIFFS(6); ACCUMULATE_DIFFS(7); #undef ACCUMULATE_DIFFS tempBlurredPast[127]= maxNoise[0]; tempBlurredPast[128]= maxNoise[1]; tempBlurredPast[129]= maxNoise[2]; v_dp = vec_sums(v_dp, zero); v_sysdp = vec_sums(v_sysdp, zero); v_dp = vec_splat(v_dp, 3); v_sysdp = vec_splat(v_sysdp, 3); vec_ste(v_dp, 0, &d); vec_ste(v_sysdp, 0, &sysd); i = d; d = (4*d +(*(tempBlurredPast-256)) +(*(tempBlurredPast-1))+ (*(tempBlurredPast+1)) +(*(tempBlurredPast+256)) +4)>>3; *tempBlurredPast=i; if (d > maxNoise[1]) { if (d < maxNoise[2]) { #define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i); OP(0); OP(1); OP(2); OP(3); OP(4); OP(5); OP(6); OP(7); #undef OP } else { #define OP(i) v_tempBlurredAss##i = v_srcAss##i; OP(0); OP(1); OP(2); OP(3); OP(4); OP(5); OP(6); OP(7); #undef OP } } else { if (d < maxNoise[0]) { const vector signed short vsint16_7 = vec_splat_s16(7); const vector signed short vsint16_4 = vec_splat_s16(4); const vector unsigned short vuint16_3 = vec_splat_u16(3); #define OP(i) do { \ const vector signed short v_temp = \ vec_mladd(v_tempBlurredAss##i, vsint16_7, v_srcAss##i); \ const vector signed short v_temp2 = vec_add(v_temp, vsint16_4); \ v_tempBlurredAss##i = vec_sr(v_temp2, vuint16_3); \ } while (0) OP(0); OP(1); OP(2); OP(3); OP(4); OP(5); OP(6); OP(7); #undef OP } else { const vector signed short vsint16_3 = vec_splat_s16(3); const vector signed short vsint16_2 = vec_splat_s16(2); #define OP(i) do { \ const vector signed short v_temp = \ vec_mladd(v_tempBlurredAss##i, vsint16_3, v_srcAss##i); \ const vector signed short v_temp2 = vec_add(v_temp, vsint16_2); \ v_tempBlurredAss##i = \ vec_sr(v_temp2, (vector unsigned short)vsint16_2); \ } while (0) OP(0); OP(1); OP(2); OP(3); OP(4); OP(5); OP(6); OP(7); #undef OP } } #define PACK_AND_STORE(src, i) do { \ const vector unsigned char perms = vec_lvsr(i * stride, src); \ const vector unsigned char vf = \ vec_packsu(v_tempBlurredAss##1, (vector signed short)zero); \ const vector unsigned char vg = vec_perm(vf, v_##src##A##i, permHH); \ const vector unsigned char mask = \ vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms); \ const vector unsigned char vg2 = vec_perm(vg, vg, perms); \ const vector unsigned char svA = vec_sel(v_##src##A1##i, vg2, mask); \ const vector unsigned char svB = vec_sel(vg2, v_##src##A2##i, mask); \ vec_st(svA, i * stride, src); \ vec_st(svB, i * stride + 16, src); \ } while (0) PACK_AND_STORE(src, 0); PACK_AND_STORE(src, 1); PACK_AND_STORE(src, 2); PACK_AND_STORE(src, 3); PACK_AND_STORE(src, 4); PACK_AND_STORE(src, 5); PACK_AND_STORE(src, 6); PACK_AND_STORE(src, 7); PACK_AND_STORE(tempBlurred, 0); PACK_AND_STORE(tempBlurred, 1); PACK_AND_STORE(tempBlurred, 2); PACK_AND_STORE(tempBlurred, 3); PACK_AND_STORE(tempBlurred, 4); PACK_AND_STORE(tempBlurred, 5); PACK_AND_STORE(tempBlurred, 6); PACK_AND_STORE(tempBlurred, 7); #undef PACK_AND_STORE } static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) { const vector unsigned char zero = vec_splat_u8(0); #define LOAD_DOUBLE_LINE(i, j) \ vector unsigned char perm1##i = vec_lvsl(i * stride, src); \ vector unsigned char perm2##i = vec_lvsl(j * stride, src); \ vector unsigned char srcA##i = vec_ld(i * stride, src); \ vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \ vector unsigned char srcC##i = vec_ld(j * stride, src); \ vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \ vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \ vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i) LOAD_DOUBLE_LINE(0, 1); LOAD_DOUBLE_LINE(2, 3); LOAD_DOUBLE_LINE(4, 5); LOAD_DOUBLE_LINE(6, 7); #undef LOAD_DOUBLE_LINE vector unsigned char tempA = vec_mergeh(src0, zero); vector unsigned char tempB = vec_mergel(src0, zero); vector unsigned char tempC = vec_mergeh(src1, zero); vector unsigned char tempD = vec_mergel(src1, zero); vector unsigned char tempE = vec_mergeh(src2, zero); vector unsigned char tempF = vec_mergel(src2, zero); vector unsigned char tempG = vec_mergeh(src3, zero); vector unsigned char tempH = vec_mergel(src3, zero); vector unsigned char tempI = vec_mergeh(src4, zero); vector unsigned char tempJ = vec_mergel(src4, zero); vector unsigned char tempK = vec_mergeh(src5, zero); vector unsigned char tempL = vec_mergel(src5, zero); vector unsigned char tempM = vec_mergeh(src6, zero); vector unsigned char tempN = vec_mergel(src6, zero); vector unsigned char tempO = vec_mergeh(src7, zero); vector unsigned char tempP = vec_mergel(src7, zero); vector unsigned char temp0 = vec_mergeh(tempA, tempI); vector unsigned char temp1 = vec_mergel(tempA, tempI); vector unsigned char temp2 = vec_mergeh(tempB, tempJ); vector unsigned char temp3 = vec_mergel(tempB, tempJ); vector unsigned char temp4 = vec_mergeh(tempC, tempK); vector unsigned char temp5 = vec_mergel(tempC, tempK); vector unsigned char temp6 = vec_mergeh(tempD, tempL); vector unsigned char temp7 = vec_mergel(tempD, tempL); vector unsigned char temp8 = vec_mergeh(tempE, tempM); vector unsigned char temp9 = vec_mergel(tempE, tempM); vector unsigned char temp10 = vec_mergeh(tempF, tempN); vector unsigned char temp11 = vec_mergel(tempF, tempN); vector unsigned char temp12 = vec_mergeh(tempG, tempO); vector unsigned char temp13 = vec_mergel(tempG, tempO); vector unsigned char temp14 = vec_mergeh(tempH, tempP); vector unsigned char temp15 = vec_mergel(tempH, tempP); tempA = vec_mergeh(temp0, temp8); tempB = vec_mergel(temp0, temp8); tempC = vec_mergeh(temp1, temp9); tempD = vec_mergel(temp1, temp9); tempE = vec_mergeh(temp2, temp10); tempF = vec_mergel(temp2, temp10); tempG = vec_mergeh(temp3, temp11); tempH = vec_mergel(temp3, temp11); tempI = vec_mergeh(temp4, temp12); tempJ = vec_mergel(temp4, temp12); tempK = vec_mergeh(temp5, temp13); tempL = vec_mergel(temp5, temp13); tempM = vec_mergeh(temp6, temp14); tempN = vec_mergel(temp6, temp14); tempO = vec_mergeh(temp7, temp15); tempP = vec_mergel(temp7, temp15); temp0 = vec_mergeh(tempA, tempI); temp1 = vec_mergel(tempA, tempI); temp2 = vec_mergeh(tempB, tempJ); temp3 = vec_mergel(tempB, tempJ); temp4 = vec_mergeh(tempC, tempK); temp5 = vec_mergel(tempC, tempK); temp6 = vec_mergeh(tempD, tempL); temp7 = vec_mergel(tempD, tempL); temp8 = vec_mergeh(tempE, tempM); temp9 = vec_mergel(tempE, tempM); temp10 = vec_mergeh(tempF, tempN); temp11 = vec_mergel(tempF, tempN); temp12 = vec_mergeh(tempG, tempO); temp13 = vec_mergel(tempG, tempO); temp14 = vec_mergeh(tempH, tempP); temp15 = vec_mergel(tempH, tempP); vec_st(temp0, 0, dst); vec_st(temp1, 16, dst); vec_st(temp2, 32, dst); vec_st(temp3, 48, dst); vec_st(temp4, 64, dst); vec_st(temp5, 80, dst); vec_st(temp6, 96, dst); vec_st(temp7, 112, dst); vec_st(temp8, 128, dst); vec_st(temp9, 144, dst); vec_st(temp10, 160, dst); vec_st(temp11, 176, dst); vec_st(temp12, 192, dst); vec_st(temp13, 208, dst); vec_st(temp14, 224, dst); vec_st(temp15, 240, dst); } static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) { const vector unsigned char zero = vec_splat_u8(0); const vector signed char neg1 = vec_splat_s8(-1); #define LOAD_DOUBLE_LINE(i, j) \ vector unsigned char src##i = vec_ld(i * 16, src); \ vector unsigned char src##j = vec_ld(j * 16, src) LOAD_DOUBLE_LINE(0, 1); LOAD_DOUBLE_LINE(2, 3); LOAD_DOUBLE_LINE(4, 5); LOAD_DOUBLE_LINE(6, 7); LOAD_DOUBLE_LINE(8, 9); LOAD_DOUBLE_LINE(10, 11); LOAD_DOUBLE_LINE(12, 13); LOAD_DOUBLE_LINE(14, 15); #undef LOAD_DOUBLE_LINE vector unsigned char tempA = vec_mergeh(src0, src8); vector unsigned char tempB; vector unsigned char tempC = vec_mergeh(src1, src9); vector unsigned char tempD; vector unsigned char tempE = vec_mergeh(src2, src10); vector unsigned char tempG = vec_mergeh(src3, src11); vector unsigned char tempI = vec_mergeh(src4, src12); vector unsigned char tempJ; vector unsigned char tempK = vec_mergeh(src5, src13); vector unsigned char tempL; vector unsigned char tempM = vec_mergeh(src6, src14); vector unsigned char tempO = vec_mergeh(src7, src15); vector unsigned char temp0 = vec_mergeh(tempA, tempI); vector unsigned char temp1 = vec_mergel(tempA, tempI); vector unsigned char temp2; vector unsigned char temp3; vector unsigned char temp4 = vec_mergeh(tempC, tempK); vector unsigned char temp5 = vec_mergel(tempC, tempK); vector unsigned char temp6; vector unsigned char temp7; vector unsigned char temp8 = vec_mergeh(tempE, tempM); vector unsigned char temp9 = vec_mergel(tempE, tempM); vector unsigned char temp12 = vec_mergeh(tempG, tempO); vector unsigned char temp13 = vec_mergel(tempG, tempO); tempA = vec_mergeh(temp0, temp8); tempB = vec_mergel(temp0, temp8); tempC = vec_mergeh(temp1, temp9); tempD = vec_mergel(temp1, temp9); tempI = vec_mergeh(temp4, temp12); tempJ = vec_mergel(temp4, temp12); tempK = vec_mergeh(temp5, temp13); tempL = vec_mergel(temp5, temp13); temp0 = vec_mergeh(tempA, tempI); temp1 = vec_mergel(tempA, tempI); temp2 = vec_mergeh(tempB, tempJ); temp3 = vec_mergel(tempB, tempJ); temp4 = vec_mergeh(tempC, tempK); temp5 = vec_mergel(tempC, tempK); temp6 = vec_mergeh(tempD, tempL); temp7 = vec_mergel(tempD, tempL); #define STORE_DOUBLE_LINE(i, j) do { \ vector unsigned char dstAi = vec_ld(i * stride, dst); \ vector unsigned char dstBi = vec_ld(i * stride + 16, dst); \ vector unsigned char dstAj = vec_ld(j * stride, dst); \ vector unsigned char dstBj = vec_ld(j * stride+ 16, dst); \ vector unsigned char aligni = vec_lvsr(i * stride, dst); \ vector unsigned char alignj = vec_lvsr(j * stride, dst); \ vector unsigned char maski = \ vec_perm(zero, (vector unsigned char)neg1, aligni); \ vector unsigned char maskj = \ vec_perm(zero, (vector unsigned char)neg1, alignj); \ vector unsigned char dstRi = vec_perm(temp##i, temp##i, aligni); \ vector unsigned char dstRj = vec_perm(temp##j, temp##j, alignj); \ vector unsigned char dstAFi = vec_sel(dstAi, dstRi, maski); \ vector unsigned char dstBFi = vec_sel(dstRi, dstBi, maski); \ vector unsigned char dstAFj = vec_sel(dstAj, dstRj, maskj); \ vector unsigned char dstBFj = vec_sel(dstRj, dstBj, maskj); \ vec_st(dstAFi, i * stride, dst); \ vec_st(dstBFi, i * stride + 16, dst); \ vec_st(dstAFj, j * stride, dst); \ vec_st(dstBFj, j * stride + 16, dst); \ } while (0) STORE_DOUBLE_LINE(0,1); STORE_DOUBLE_LINE(2,3); STORE_DOUBLE_LINE(4,5); STORE_DOUBLE_LINE(6,7); }
gpl-2.0
tonghua208/ok6410_uboot_1_6
board/MAI/bios_emulator/scitech/src/biosemu/warmboot.c
46
19150
/**************************************************************************** * * BIOS emulator and interface * to Realmode X86 Emulator Library * * Copyright (C) 1996-1999 SciTech Software, Inc. * * ======================================================================== * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, * provided that the above copyright notice appear in all copies and that * both that copyright notice and this permission notice appear in * supporting documentation, and that the name of the authors not be used * in advertising or publicity pertaining to distribution of the software * without specific, written prior permission. The authors makes no * representations about the suitability of this software for any purpose. * It is provided "as is" without express or implied warranty. * * THE AUTHORS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE AUTHORS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. * * ======================================================================== * * Language: ANSI C * Environment: Any * Developer: Kendall Bennett * * Description: Module to implement warm booting of all PCI/AGP controllers * on the bus. We use the x86 real mode emulator to run the * BIOS on the primary and secondary controllers to bring * the cards up. * ****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include "biosemu.h" #ifndef _MAX_PATH #define _MAX_PATH 256 #endif /*------------------------- Global Variables ------------------------------*/ static PCIDeviceInfo PCI[MAX_PCI_DEVICES]; static int NumPCI = -1; static int BridgeIndex[MAX_PCI_DEVICES] = {0}; static int NumBridges; static PCIBridgeInfo *AGPBridge = NULL; static int DeviceIndex[MAX_PCI_DEVICES] = {0}; static int NumDevices; static u32 debugFlags = 0; static BE_VGAInfo VGAInfo[MAX_PCI_DEVICES] = {{0}}; static ibool useV86 = false; static ibool forcePost = false; /* Length of the BIOS image */ #define MAX_BIOSLEN (64 * 1024L) #define FINAL_BIOSLEN (32 * 1024L) /* Macro to determine if the VGA is enabled and responding */ #define VGA_NOT_ACTIVE() (forcePost || (PM_inpb(0x3CC) == 0xFF) || ((PM_inpb(0x3CC) & 0x2) == 0)) #define ENABLE_DEVICE(device) \ PCI_writePCIRegB(0x4,PCI[DeviceIndex[device]].Command | 0x7,device) #define DISABLE_DEVICE(device) \ PCI_writePCIRegB(0x4,0,device) /* Macros to enable and disable AGP VGA resources */ #define ENABLE_AGP_VGA() \ PCI_accessReg(0x3E,AGPBridge->BridgeControl | 0x8,PCI_WRITE_WORD,(PCIDeviceInfo*)AGPBridge) #define DISABLE_AGP_VGA() \ PCI_accessReg(0x3E,AGPBridge->BridgeControl & ~0x8,PCI_WRITE_WORD,(PCIDeviceInfo*)AGPBridge) #define RESTORE_AGP_VGA() \ PCI_accessReg(0x3E,AGPBridge->BridgeControl,PCI_WRITE_WORD,(PCIDeviceInfo*)AGPBridge) /*-------------------------- Implementation -------------------------------*/ /**************************************************************************** RETURNS: The address to use to map the secondary BIOS (PCI/AGP devices) REMARKS: Searches all the PCI base address registers for the device looking for a memory mapping that is large enough to hold our ROM BIOS. We usually end up finding the framebuffer mapping (usually BAR 0x10), and we use this mapping to map the BIOS for the device into. We use a mapping that is already assigned to the device to ensure the memory range will be passed through by any PCI->PCI or AGP->PCI bridge that may be present. NOTE: Usually this function is only used for AGP devices, but it may be used for PCI devices that have already been POST'ed and the BIOS ROM base address has been zero'ed out. ****************************************************************************/ static ulong PCI_findBIOSAddr( int device) { ulong base,size; int bar; for (bar = 0x10; bar <= 0x14; bar++) { base = PCI_readPCIRegL(bar,device) & ~0xFF; if (!(base & 0x1)) { PCI_writePCIRegL(bar,0xFFFFFFFF,device); size = PCI_readPCIRegL(bar,device) & ~0xFF; size = ~size+1; PCI_writePCIRegL(bar,0,device); if (size >= MAX_BIOSLEN) return base; } } return 0; } /**************************************************************************** REMARKS: Re-writes the PCI base address registers for the secondary PCI controller with the values from our initial PCI bus enumeration. This fixes up the values after we have POST'ed the secondary display controller BIOS, which may have incorrectly re-programmed the base registers the same as the primary display controller (the case for identical S3 cards). ****************************************************************************/ static void _PCI_fixupSecondaryBARs(void) { int i; for (i = 0; i < NumDevices; i++) { PCI_writePCIRegL(0x10,PCI[DeviceIndex[i]].BaseAddress10,i); PCI_writePCIRegL(0x14,PCI[DeviceIndex[i]].BaseAddress14,i); PCI_writePCIRegL(0x18,PCI[DeviceIndex[i]].BaseAddress18,i); PCI_writePCIRegL(0x1C,PCI[DeviceIndex[i]].BaseAddress1C,i); PCI_writePCIRegL(0x20,PCI[DeviceIndex[i]].BaseAddress20,i); PCI_writePCIRegL(0x24,PCI[DeviceIndex[i]].BaseAddress24,i); } } /**************************************************************************** RETURNS: True if successfully initialised, false if not. REMARKS: This function executes the BIOS POST code on the controller. We assume that at this stage the controller has its I/O and memory space enabled and that all other controllers are in a disabled state. ****************************************************************************/ static void PCI_doBIOSPOST( int device, ulong BIOSPhysAddr, void *mappedBIOS, ulong BIOSLen) { RMREGS regs; RMSREGS sregs; /* Determine the value to store in AX for BIOS POST */ regs.x.ax = (u16)(PCI[DeviceIndex[device]].slot.i >> 8); if (useV86) { /* Post the BIOS using the PM functions (ie: v86 mode on Linux) */ if (!PM_doBIOSPOST(regs.x.ax,BIOSPhysAddr,mappedBIOS,BIOSLen)) { /* If the PM function fails, this probably means are we are on */ /* DOS and can't re-map the real mode 0xC0000 region. In thise */ /* case if the device is the primary, we can use the real */ /* BIOS at 0xC0000 directly. */ if (device == 0) PM_doBIOSPOST(regs.x.ax,0xC0000,mappedBIOS,BIOSLen); } } else { /* Setup the X86 emulator for the VGA BIOS */ BE_setVGA(&VGAInfo[device]); /* Execute the BIOS POST code */ BE_callRealMode(0xC000,0x0003,&regs,&sregs); /* Cleanup and exit */ BE_getVGA(&VGAInfo[device]); } } /**************************************************************************** RETURNS: True if successfully initialised, false if not. REMARKS: Loads and POST's the secondary controllers BIOS, directly from the BIOS image we can extract over the PCI bus. ****************************************************************************/ static ibool PCI_postControllers(void) { int device; ulong BIOSImageLen,mappedBIOSPhys; uchar *mappedBIOS,*copyOfBIOS; char filename[_MAX_PATH]; FILE *f; /* Disable the primary display controller and AGP VGA pass-through */ DISABLE_DEVICE(0); if (AGPBridge) DISABLE_AGP_VGA(); /* Now POST all the secondary controllers */ for (device = 0; device < NumDevices; device++) { /* Skip the device if it is not enabled (probably an ISA device) */ if (DeviceIndex[device] == -1) continue; /* Enable secondary display controller. If the secondary controller */ /* is on the AGP bus, then enable VGA resources for the AGP device. */ ENABLE_DEVICE(device); if (AGPBridge && AGPBridge->SecondayBusNumber == PCI[DeviceIndex[device]].slot.p.Bus) ENABLE_AGP_VGA(); /* Check if the controller has already been POST'ed */ if (VGA_NOT_ACTIVE()) { /* Find a viable place to map the secondary PCI BIOS image and map it */ printk("Device %d not enabled, so attempting warm boot it\n", device); /* For AGP devices (and PCI devices that do have the ROM base */ /* address zero'ed out) we have to map the BIOS to a location */ /* that is passed by the AGP bridge to the bus. Some AGP devices */ /* have the ROM base address already set up for us, and some */ /* do not (we map to one of the existing BAR locations in */ /* this case). */ mappedBIOS = NULL; if (PCI[DeviceIndex[device]].ROMBaseAddress != 0) mappedBIOSPhys = PCI[DeviceIndex[device]].ROMBaseAddress & ~0xF; else mappedBIOSPhys = PCI_findBIOSAddr(device); printk("Mapping BIOS image to 0x%08X\n", mappedBIOSPhys); mappedBIOS = PM_mapPhysicalAddr(mappedBIOSPhys,MAX_BIOSLEN-1,false); PCI_writePCIRegL(0x30,mappedBIOSPhys | 0x1,device); BIOSImageLen = mappedBIOS[2] * 512; if ((copyOfBIOS = malloc(BIOSImageLen)) == NULL) return false; memcpy(copyOfBIOS,mappedBIOS,BIOSImageLen); PM_freePhysicalAddr(mappedBIOS,MAX_BIOSLEN-1); /* Allocate memory to store copy of BIOS from secondary controllers */ VGAInfo[device].pciInfo = &PCI[DeviceIndex[device]]; VGAInfo[device].BIOSImage = copyOfBIOS; VGAInfo[device].BIOSImageLen = BIOSImageLen; /* Restore device mappings */ PCI_writePCIRegL(0x30,PCI[DeviceIndex[device]].ROMBaseAddress,device); PCI_writePCIRegL(0x10,PCI[DeviceIndex[device]].BaseAddress10,device); PCI_writePCIRegL(0x14,PCI[DeviceIndex[device]].BaseAddress14,device); /* Now execute the BIOS POST for the device */ if (copyOfBIOS[0] == 0x55 && copyOfBIOS[1] == 0xAA) { printk("Executing BIOS POST for controller.\n"); PCI_doBIOSPOST(device,mappedBIOSPhys,copyOfBIOS,BIOSImageLen); } /* Reset the size of the BIOS image to the final size */ VGAInfo[device].BIOSImageLen = FINAL_BIOSLEN; /* Save the BIOS and interrupt vector information to disk */ sprintf(filename,"%s/bios.%02d",PM_getNucleusConfigPath(),device); if ((f = fopen(filename,"wb")) != NULL) { fwrite(copyOfBIOS,1,FINAL_BIOSLEN,f); fwrite(VGAInfo[device].LowMem,1,sizeof(VGAInfo[device].LowMem),f); fclose(f); } } else { /* Allocate memory to store copy of BIOS from secondary controllers */ if ((copyOfBIOS = malloc(FINAL_BIOSLEN)) == NULL) return false; VGAInfo[device].pciInfo = &PCI[DeviceIndex[device]]; VGAInfo[device].BIOSImage = copyOfBIOS; VGAInfo[device].BIOSImageLen = FINAL_BIOSLEN; /* Load the BIOS and interrupt vector information from disk */ sprintf(filename,"%s/bios.%02d",PM_getNucleusConfigPath(),device); if ((f = fopen(filename,"rb")) != NULL) { fread(copyOfBIOS,1,FINAL_BIOSLEN,f); fread(VGAInfo[device].LowMem,1,sizeof(VGAInfo[device].LowMem),f); fclose(f); } } /* Fix up all the secondary PCI base address registers */ /* (restores them all from the values we read previously) */ _PCI_fixupSecondaryBARs(); /* Disable the secondary controller and AGP VGA pass-through */ DISABLE_DEVICE(device); if (AGPBridge) DISABLE_AGP_VGA(); } /* Reenable primary display controller and reset AGP bridge control */ if (AGPBridge) RESTORE_AGP_VGA(); ENABLE_DEVICE(0); /* Free physical BIOS image mapping */ PM_freePhysicalAddr(mappedBIOS,MAX_BIOSLEN-1); /* Restore the X86 emulator BIOS info to primary controller */ if (!useV86) BE_setVGA(&VGAInfo[0]); return true; } /**************************************************************************** REMARKS: Enumerates the PCI bus and dumps the PCI configuration information to the log file. ****************************************************************************/ static void EnumeratePCI(void) { int i,index; PCIBridgeInfo *info; printk("Displaying enumeration of PCI bus (%d devices, %d display devices)\n", NumPCI, NumDevices); for (index = 0; index < NumDevices; index++) printk(" Display device %d is PCI device %d\n",index,DeviceIndex[index]); printk("\n"); printk("Bus Slot Fnc DeviceID SubSystem Rev Class IRQ Int Cmd\n"); for (i = 0; i < NumPCI; i++) { printk("%2d %2d %2d %04X:%04X %04X:%04X %02X %02X:%02X %02X %02X %04X ", PCI[i].slot.p.Bus, PCI[i].slot.p.Device, PCI[i].slot.p.Function, PCI[i].VendorID, PCI[i].DeviceID, PCI[i].SubSystemVendorID, PCI[i].SubSystemID, PCI[i].RevID, PCI[i].BaseClass, PCI[i].SubClass, PCI[i].InterruptLine, PCI[i].InterruptPin, PCI[i].Command); for (index = 0; index < NumDevices; index++) { if (DeviceIndex[index] == i) break; } if (index < NumDevices) printk("<- %d\n", index); else printk("\n"); } printk("\n"); printk("DeviceID Stat Ifc Cch Lat Hdr BIST\n"); for (i = 0; i < NumPCI; i++) { printk("%04X:%04X %04X %02X %02X %02X %02X %02X ", PCI[i].VendorID, PCI[i].DeviceID, PCI[i].Status, PCI[i].Interface, PCI[i].CacheLineSize, PCI[i].LatencyTimer, PCI[i].HeaderType, PCI[i].BIST); for (index = 0; index < NumDevices; index++) { if (DeviceIndex[index] == i) break; } if (index < NumDevices) printk("<- %d\n", index); else printk("\n"); } printk("\n"); printk("DeviceID Base10h Base14h Base18h Base1Ch Base20h Base24h ROMBase\n"); for (i = 0; i < NumPCI; i++) { printk("%04X:%04X %08X %08X %08X %08X %08X %08X %08X ", PCI[i].VendorID, PCI[i].DeviceID, PCI[i].BaseAddress10, PCI[i].BaseAddress14, PCI[i].BaseAddress18, PCI[i].BaseAddress1C, PCI[i].BaseAddress20, PCI[i].BaseAddress24, PCI[i].ROMBaseAddress); for (index = 0; index < NumDevices; index++) { if (DeviceIndex[index] == i) break; } if (index < NumDevices) printk("<- %d\n", index); else printk("\n"); } printk("\n"); printk("DeviceID BAR10Len BAR14Len BAR18Len BAR1CLen BAR20Len BAR24Len ROMLen\n"); for (i = 0; i < NumPCI; i++) { printk("%04X:%04X %08X %08X %08X %08X %08X %08X %08X ", PCI[i].VendorID, PCI[i].DeviceID, PCI[i].BaseAddress10Len, PCI[i].BaseAddress14Len, PCI[i].BaseAddress18Len, PCI[i].BaseAddress1CLen, PCI[i].BaseAddress20Len, PCI[i].BaseAddress24Len, PCI[i].ROMBaseAddressLen); for (index = 0; index < NumDevices; index++) { if (DeviceIndex[index] == i) break; } if (index < NumDevices) printk("<- %d\n", index); else printk("\n"); } printk("\n"); printk("Displaying enumeration of %d bridge devices\n",NumBridges); printk("\n"); printk("DeviceID P# S# B# IOB IOL MemBase MemLimit PreBase PreLimit Ctrl\n"); for (i = 0; i < NumBridges; i++) { info = (PCIBridgeInfo*)&PCI[BridgeIndex[i]]; printk("%04X:%04X %02X %02X %02X %04X %04X %08X %08X %08X %08X %04X\n", info->VendorID, info->DeviceID, info->PrimaryBusNumber, info->SecondayBusNumber, info->SubordinateBusNumber, ((u16)info->IOBase << 8) & 0xF000, info->IOLimit ? ((u16)info->IOLimit << 8) | 0xFFF : 0, ((u32)info->MemoryBase << 16) & 0xFFF00000, info->MemoryLimit ? ((u32)info->MemoryLimit << 16) | 0xFFFFF : 0, ((u32)info->PrefetchableMemoryBase << 16) & 0xFFF00000, info->PrefetchableMemoryLimit ? ((u32)info->PrefetchableMemoryLimit << 16) | 0xFFFFF : 0, info->BridgeControl); } printk("\n"); } /**************************************************************************** RETURNS: Number of display devices found. REMARKS: This function enumerates the number of available display devices on the PCI bus, and returns the number found. ****************************************************************************/ static int PCI_enumerateDevices(void) { int i,j; PCIBridgeInfo *info; /* If this is the first time we have been called, enumerate all */ /* devices on the PCI bus. */ if (NumPCI == -1) { for (i = 0; i < MAX_PCI_DEVICES; i++) PCI[i].dwSize = sizeof(PCI[i]); if ((NumPCI = PCI_enumerate(PCI,MAX_PCI_DEVICES)) == 0) return -1; /* Build a list of all PCI bridge devices */ for (i = 0,NumBridges = 0,BridgeIndex[0] = -1; i < NumPCI; i++) { if (PCI[i].BaseClass == PCI_BRIDGE_CLASS) { if (NumBridges < MAX_PCI_DEVICES) BridgeIndex[NumBridges++] = i; } } /* Now build a list of all display class devices */ for (i = 0,NumDevices = 1,DeviceIndex[0] = -1; i < NumPCI; i++) { if (PCI_IS_DISPLAY_CLASS(&PCI[i])) { if ((PCI[i].Command & 0x3) == 0x3) { DeviceIndex[0] = i; } else { if (NumDevices < MAX_PCI_DEVICES) DeviceIndex[NumDevices++] = i; } if (PCI[i].slot.p.Bus != 0) { /* This device is on a different bus than the primary */ /* PCI bus, so it is probably an AGP device. Find the */ /* AGP bus device that controls that bus so we can */ /* control it. */ for (j = 0; j < NumBridges; j++) { info = (PCIBridgeInfo*)&PCI[BridgeIndex[j]]; if (info->SecondayBusNumber == PCI[i].slot.p.Bus) { AGPBridge = info; break; } } } } } /* Enumerate all PCI and bridge devices to log file */ EnumeratePCI(); } return NumDevices; } FILE *logfile; void printk(const char *fmt, ...) { va_list argptr; va_start(argptr, fmt); vfprintf(logfile, fmt, argptr); fflush(logfile); va_end(argptr); } int main(int argc,char *argv[]) { while (argc > 1) { if (stricmp(argv[1],"-usev86") == 0) { useV86 = true; } else if (stricmp(argv[1],"-force") == 0) { forcePost = true; } #ifdef DEBUG else if (stricmp(argv[1],"-decode") == 0) { debugFlags |= DEBUG_DECODE_F; } else if (stricmp(argv[1],"-iotrace") == 0) { debugFlags |= DEBUG_IO_TRACE_F; } #endif else { printf("Usage: warmboot [-usev86] [-force] [-decode] [-iotrace]\n"); exit(-1); } argc--; argv++; } if ((logfile = fopen("warmboot.log","w")) == NULL) exit(1); PM_init(); if (!useV86) { /* Initialise the x86 BIOS emulator */ BE_init(false,debugFlags,65536,&VGAInfo[0]); } /* Enumerate all devices (which POST's them at the same time) */ if (PCI_enumerateDevices() < 1) { printk("No PCI display devices found!\n"); return -1; } /* Post all the display controller BIOS'es */ PCI_postControllers(); /* Cleanup and exit the emulator */ if (!useV86) BE_exit(); fclose(logfile); return 0; }
gpl-2.0
perheld/qmk_firmware
keyboards/clueboard/60/led.c
46
1195
/* * Copyright 2017 skully <skullydazed@gmail.com> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <hal.h> #include "print.h" #ifdef BACKLIGHT_ENABLE #include "backlight.h" void backlight_init_ports(void) { printf("backlight_init_ports()\n"); palSetPadMode(GPIOB, 8, PAL_MODE_OUTPUT_PUSHPULL); palSetPad(GPIOB, 8); } void backlight_set(uint8_t level) { printf("backlight_set(%d)\n", level); if (level == 0) { // Turn backlight off palSetPad(GPIOB, 8); } else { // Turn backlight on palClearPad(GPIOB, 8); } } #endif
gpl-2.0
SystemTera/SystemTera.Server-V-3.2-Kernel
drivers/pcmcia/pxa2xx_palmtc.c
558
4145
/* * linux/drivers/pcmcia/pxa2xx_palmtc.c * * Driver for Palm Tungsten|C PCMCIA * * Copyright (C) 2008 Alex Osborne <ato@meshy.org> * Copyright (C) 2009-2011 Marek Vasut <marek.vasut@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/delay.h> #include <asm/mach-types.h> #include <mach/palmtc.h> #include "soc_common.h" static struct gpio palmtc_pcmcia_gpios[] = { { GPIO_NR_PALMTC_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" }, { GPIO_NR_PALMTC_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" }, { GPIO_NR_PALMTC_PCMCIA_POWER3, GPIOF_INIT_LOW, "PCMCIA Power 3" }, { GPIO_NR_PALMTC_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" }, { GPIO_NR_PALMTC_PCMCIA_READY, GPIOF_IN, "PCMCIA Ready" }, { GPIO_NR_PALMTC_PCMCIA_PWRREADY, GPIOF_IN, "PCMCIA Power Ready" }, }; static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { int ret; ret = gpio_request_array(palmtc_pcmcia_gpios, ARRAY_SIZE(palmtc_pcmcia_gpios)); skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY); return ret; } static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) { gpio_free_array(palmtc_pcmcia_gpios, ARRAY_SIZE(palmtc_pcmcia_gpios)); } static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) { state->detect = 1; /* always inserted */ state->ready = !!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_READY); state->bvd1 = 1; state->bvd2 = 1; state->wrprot = 0; state->vs_3v = 1; state->vs_Xv = 0; } static int palmtc_wifi_powerdown(void) { gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1); gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 0); mdelay(40); gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 0); return 0; } static int palmtc_wifi_powerup(void) { int timeout = 50; gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 1); mdelay(50); /* Power up the card, 1.8V first, after a while 3.3V */ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 1); mdelay(100); gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 1); /* Wait till the card is ready */ while (!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_PWRREADY) && timeout) { mdelay(1); timeout--; } /* Power down the WiFi in case of error */ if (!timeout) { palmtc_wifi_powerdown(); return 1; } /* Reset the card */ gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1); mdelay(20); gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 0); mdelay(25); gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 0); return 0; } static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) { int ret = 1; if (state->Vcc == 0) ret = palmtc_wifi_powerdown(); else if (state->Vcc == 33) ret = palmtc_wifi_powerup(); return ret; } static struct pcmcia_low_level palmtc_pcmcia_ops = { .owner = THIS_MODULE, .first = 0, .nr = 1, .hw_init = palmtc_pcmcia_hw_init, .hw_shutdown = palmtc_pcmcia_hw_shutdown, .socket_state = palmtc_pcmcia_socket_state, .configure_socket = palmtc_pcmcia_configure_socket, }; static struct platform_device *palmtc_pcmcia_device; static int __init palmtc_pcmcia_init(void) { int ret; if (!machine_is_palmtc()) return -ENODEV; palmtc_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); if (!palmtc_pcmcia_device) return -ENOMEM; ret = platform_device_add_data(palmtc_pcmcia_device, &palmtc_pcmcia_ops, sizeof(palmtc_pcmcia_ops)); if (!ret) ret = platform_device_add(palmtc_pcmcia_device); if (ret) platform_device_put(palmtc_pcmcia_device); return ret; } static void __exit palmtc_pcmcia_exit(void) { platform_device_unregister(palmtc_pcmcia_device); } module_init(palmtc_pcmcia_init); module_exit(palmtc_pcmcia_exit); MODULE_AUTHOR("Alex Osborne <ato@meshy.org>," " Marek Vasut <marek.vasut@gmail.com>"); MODULE_DESCRIPTION("PCMCIA support for Palm Tungsten|C"); MODULE_ALIAS("platform:pxa2xx-pcmcia"); MODULE_LICENSE("GPL");
gpl-2.0
TEAM-Gummy/android_kernel_sony_msm8x27
drivers/staging/prima/CORE/WDI/DP/src/wlan_qct_wdi_ds.c
814
17822
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /**========================================================================= * * \file wlan_qct_wdi_ds.c * * \brief define Dataservice API * * WLAN Device Abstraction layer External API for Dataservice * DESCRIPTION * This file contains the external API implemntation exposed by the * wlan device abstarction layer module. * * Copyright (c) 2008 QUALCOMM Incorporated. All Rights Reserved. * Qualcomm Confidential and Proprietary */ #include "wlan_qct_wdi.h" #include "wlan_qct_wdi_i.h" #include "wlan_qct_wdi_ds.h" #include "wlan_qct_wdi_ds_i.h" #include "wlan_qct_wdi_dts.h" #include "wlan_qct_wdi_dp.h" #include "wlan_qct_wdi_sta.h" /* DAL registration function. * Parameters: * pContext:Cookie that should be passed back to the caller along * with the callback. * pfnTxCompleteCallback:Callback function that is to be invoked to return * packets which have been transmitted. * pfnRxPacketCallback:Callback function that is to be invoked to deliver * packets which have been received * pfnTxFlowControlCallback:Callback function that is to be invoked to * indicate/clear congestion. * * Return Value: SUCCESS Completed successfully. * FAILURE_XXX Request was rejected due XXX Reason. * */ WDI_Status WDI_DS_Register( void *pContext, WDI_DS_TxCompleteCallback pfnTxCompleteCallback, WDI_DS_RxPacketCallback pfnRxPacketCallback, WDI_DS_TxFlowControlCallback pfnTxFlowControlCallback, void *pCallbackContext) { WDI_DS_ClientDataType *pClientData; wpt_uint8 bssLoop; // Do Sanity checks if (NULL == pContext || NULL == pCallbackContext || NULL == pfnTxCompleteCallback || NULL == pfnRxPacketCallback || NULL == pfnTxFlowControlCallback) { return WDI_STATUS_E_FAILURE; } pClientData = (WDI_DS_ClientDataType *)WDI_DS_GetDatapathContext(pContext); if (NULL == pClientData) { return WDI_STATUS_MEM_FAILURE; } // Store callbacks in client structure pClientData->pcontext = pContext; pClientData->receiveFrameCB = pfnRxPacketCallback; pClientData->txCompleteCB = pfnTxCompleteCallback; pClientData->txResourceCB = pfnTxFlowControlCallback; pClientData->pCallbackContext = pCallbackContext; for(bssLoop = 0; bssLoop < WDI_DS_MAX_SUPPORTED_BSS; bssLoop++) { pClientData->staIdxPerBssIdxTable[bssLoop].isUsed = 0; pClientData->staIdxPerBssIdxTable[bssLoop].bssIdx = WDI_DS_INDEX_INVALID; pClientData->staIdxPerBssIdxTable[bssLoop].staIdx = WDI_DS_INDEX_INVALID; } return WDI_STATUS_SUCCESS; } /* DAL Transmit function. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * pFrame:Refernce to PAL frame. * more: Does the invokee have more than one packet pending? * Return Value: SUCCESS Completed successfully. * FAILURE_XXX Request was rejected due XXX Reason. * */ WDI_Status WDI_DS_TxPacket(void *pContext, wpt_packet *pFrame, wpt_boolean more) { WDI_DS_ClientDataType *pClientData; wpt_uint8 ucSwFrameTXXlation; wpt_uint8 ucUP; wpt_uint8 ucTypeSubtype; wpt_uint8 alignment; wpt_uint8 ucTxFlag; wpt_uint8* pSTAMACAddress; wpt_uint8* pAddr2MACAddress; WDI_DS_TxMetaInfoType *pTxMetadata; void *physBDHeader, *pvBDHeader; wpt_uint8 ucType; WDI_DS_BdMemPoolType *pMemPool; wpt_uint8 ucBdPoolType; wpt_uint8 staId; WDI_Status wdiStatus; // Do Sanity checks if (NULL == pContext) { return WDI_STATUS_E_FAILURE; } pClientData = (WDI_DS_ClientDataType *) WDI_DS_GetDatapathContext(pContext); if (NULL == pClientData || pClientData->suspend) { return WDI_STATUS_E_FAILURE; } // extract metadata from PAL packet pTxMetadata = WDI_DS_ExtractTxMetaData(pFrame); ucSwFrameTXXlation = pTxMetadata->fdisableFrmXlt; ucTypeSubtype = pTxMetadata->typeSubtype; ucUP = pTxMetadata->fUP; ucTxFlag = pTxMetadata->txFlags; pSTAMACAddress = &(pTxMetadata->fSTAMACAddress[0]); pAddr2MACAddress = &(pTxMetadata->addr2MACAddress[0]); /*------------------------------------------------------------------------ Get type and subtype of the frame first ------------------------------------------------------------------------*/ ucType = (ucTypeSubtype & WDI_FRAME_TYPE_MASK) >> WDI_FRAME_TYPE_OFFSET; switch(ucType) { case WDI_MAC_DATA_FRAME: #ifdef FEATURE_WLAN_TDLS /* I utilizes TDLS mgmt frame always sent at BD_RATE2. (See limProcessTdls.c) Assumption here is data frame sent by WDA_TxPacket() <- HalTxFrame/HalTxFrameWithComplete() should take managment path. As of today, only TDLS feature has special data frame which needs to be treated as mgmt. */ if((!pTxMetadata->isEapol) && ((pTxMetadata->txFlags & WDI_USE_BD_RATE2_FOR_MANAGEMENT_FRAME) != WDI_USE_BD_RATE2_FOR_MANAGEMENT_FRAME)) #else if(!pTxMetadata->isEapol) #endif { pMemPool = &(pClientData->dataMemPool); ucBdPoolType = WDI_DATA_POOL_ID; break; } // intentional fall-through to handle eapol packet as mgmt case WDI_MAC_MGMT_FRAME: pMemPool = &(pClientData->mgmtMemPool); ucBdPoolType = WDI_MGMT_POOL_ID; break; default: return WDI_STATUS_E_FAILURE; } // Allocate BD header from pool pvBDHeader = WDI_DS_MemPoolAlloc(pMemPool, &physBDHeader, ucBdPoolType); if(NULL == pvBDHeader) return WDI_STATUS_E_FAILURE; WDI_SetBDPointers(pFrame, pvBDHeader, physBDHeader); alignment = 0; WDI_DS_PrepareBDHeader(pFrame, ucSwFrameTXXlation, alignment); wdiStatus = WDI_FillTxBd(pContext, ucTypeSubtype, pSTAMACAddress, pAddr2MACAddress, &ucUP, 1, pvBDHeader, ucTxFlag /* No ACK */, 0, &staId); if(WDI_STATUS_SUCCESS != wdiStatus) { WDI_DS_MemPoolFree(pMemPool, pvBDHeader, physBDHeader); return wdiStatus; } pTxMetadata->staIdx = staId; // Send packet to transport layer. if(eWLAN_PAL_STATUS_SUCCESS !=WDTS_TxPacket(pContext, pFrame)){ WDI_DS_MemPoolFree(pMemPool, pvBDHeader, physBDHeader); return WDI_STATUS_E_FAILURE; } /* resource count only for data packet */ // EAPOL packet doesn't use data mem pool if being treated as higher priority #ifdef FEATURE_WLAN_TDLS /* I utilizes TDLS mgmt frame always sent at BD_RATE2. (See limProcessTdls.c) Assumption here is data frame sent by WDA_TxPacket() <- HalTxFrame/HalTxFrameWithComplete() should take managment path. As of today, only TDLS feature has special data frame which needs to be treated as mgmt. */ if((WDI_MAC_DATA_FRAME == ucType) && (!pTxMetadata->isEapol) && ((pTxMetadata->txFlags & WDI_USE_BD_RATE2_FOR_MANAGEMENT_FRAME) != WDI_USE_BD_RATE2_FOR_MANAGEMENT_FRAME)) #else if(WDI_MAC_DATA_FRAME == ucType && (!pTxMetadata->isEapol)) #endif { WDI_DS_MemPoolIncreaseReserveCount(pMemPool, staId); } return WDI_STATUS_SUCCESS; } /* DAL Transmit Complete function. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * ucTxResReq:TX resource number required by TL * Return Value: SUCCESS Completed successfully. * FAILURE_XXX Request was rejected due XXX Reason. * */ WDI_Status WDI_DS_TxComplete(void *pContext, wpt_uint32 ucTxResReq) { // Do Sanity checks if(NULL == pContext) return WDI_STATUS_E_FAILURE; // Send notification to transport layer. if(eWLAN_PAL_STATUS_SUCCESS !=WDTS_CompleteTx(pContext, ucTxResReq)) { return WDI_STATUS_E_FAILURE; } return WDI_STATUS_SUCCESS; } /* DAL Suspend Transmit function. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * Return Value: SUCCESS Completed successfully. * FAILURE_XXX Request was rejected due XXX Reason. * */ WDI_Status WDI_DS_TxSuspend(void *pContext) { WDI_DS_ClientDataType *pClientData = (WDI_DS_ClientDataType *) WDI_DS_GetDatapathContext(pContext); pClientData->suspend = 1; return WDI_STATUS_SUCCESS; } /* DAL Resume Transmit function. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * Return Value: SUCCESS Completed successfully. * FAILURE_XXX Request was rejected due XXX Reason. * */ WDI_Status WDI_DS_TxResume(void *pContext) { WDI_DS_ClientDataType *pClientData = (WDI_DS_ClientDataType *) WDI_DS_GetDatapathContext(pContext); pClientData->suspend = 0; return WDI_STATUS_SUCCESS; } /* DAL Get Available Resource Count. * This is the number of free descririptor in DXE * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * wdiResPool: - identifier of resource pool * Return Value: number of resources available * This is the number of free descririptor in DXE * */ wpt_uint32 WDI_GetAvailableResCount(void *pContext,WDI_ResPoolType wdiResPool) { WDI_DS_ClientDataType *pClientData = (WDI_DS_ClientDataType *) WDI_DS_GetDatapathContext(pContext); switch(wdiResPool) { case WDI_MGMT_POOL_ID: return (WDI_DS_HI_PRI_RES_NUM - 2*WDI_DS_GetAvailableResCount(&pClientData->mgmtMemPool)); case WDI_DATA_POOL_ID: return WDTS_GetFreeTxDataResNumber(pContext); default: return 0; } } /* DAL Get resrved Resource Count per STA. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * wdiResPool: - identifier of resource pool * staId: STA ID * Return Value: number of resources reserved per STA * */ wpt_uint32 WDI_DS_GetReservedResCountPerSTA(void *pContext,WDI_ResPoolType wdiResPool, wpt_uint8 staId) { WDI_DS_ClientDataType *pClientData = (WDI_DS_ClientDataType *) WDI_DS_GetDatapathContext(pContext); switch(wdiResPool) { case WDI_MGMT_POOL_ID: return WDI_DS_MemPoolGetRsvdResCountPerSTA(&pClientData->mgmtMemPool, staId); case WDI_DATA_POOL_ID: return WDI_DS_MemPoolGetRsvdResCountPerSTA(&pClientData->dataMemPool, staId); default: return 0; } } /* DAL STA info add into memPool. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * staId: STA ID * Return Value: number of resources reserved per STA * */ WDI_Status WDI_DS_AddSTAMemPool(void *pContext, wpt_uint8 staIndex) { WDI_Status status = WDI_STATUS_SUCCESS; WDI_DS_ClientDataType *pClientData = (WDI_DS_ClientDataType *) WDI_DS_GetDatapathContext(pContext); status = WDI_DS_MemPoolAddSTA(&pClientData->mgmtMemPool, staIndex); if(WDI_STATUS_SUCCESS != status) { /* Add STA into MGMT memPool Fail */ return status; } status = WDI_DS_MemPoolAddSTA(&pClientData->dataMemPool, staIndex); if(WDI_STATUS_SUCCESS != status) { /* Add STA into DATA memPool Fail */ return status; } return WDI_STATUS_SUCCESS; } /* DAL STA info del from memPool. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * staId: STA ID * Return Value: number of resources reserved per STA * */ WDI_Status WDI_DS_DelSTAMemPool(void *pContext, wpt_uint8 staIndex) { WDI_Status status = WDI_STATUS_SUCCESS; WDI_DS_ClientDataType *pClientData = (WDI_DS_ClientDataType *) WDI_DS_GetDatapathContext(pContext); status = WDI_DS_MemPoolDelSTA(&pClientData->mgmtMemPool, staIndex); if(WDI_STATUS_SUCCESS != status) { /* Del STA from MGMT memPool Fail */ return status; } status = WDI_DS_MemPoolDelSTA(&pClientData->dataMemPool, staIndex); if(WDI_STATUS_SUCCESS != status) { /* Del STA from DATA memPool Fail */ return status; } return WDI_STATUS_SUCCESS; } /* DAL Set STA index associated with BSS index. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * bssIdx: BSS index * staId: STA index associated with BSS index * Return Status: Found empty slot * */ WDI_Status WDI_DS_SetStaIdxPerBssIdx(void *pContext, wpt_uint8 bssIdx, wpt_uint8 staIdx) { WDI_DS_ClientDataType *pClientData; wpt_uint8 bssLoop; pClientData = (WDI_DS_ClientDataType *)WDI_DS_GetDatapathContext(pContext); for (bssLoop = 0; bssLoop < WDI_DS_MAX_SUPPORTED_BSS; bssLoop++) { if ((pClientData->staIdxPerBssIdxTable[bssLoop].isUsed) && (bssIdx == pClientData->staIdxPerBssIdxTable[bssLoop].bssIdx) && (staIdx == pClientData->staIdxPerBssIdxTable[bssLoop].staIdx)) { return WDI_STATUS_SUCCESS; } if (0 == pClientData->staIdxPerBssIdxTable[bssLoop].isUsed) { pClientData->staIdxPerBssIdxTable[bssLoop].bssIdx = bssIdx; pClientData->staIdxPerBssIdxTable[bssLoop].staIdx = staIdx; pClientData->staIdxPerBssIdxTable[bssLoop].isUsed = 1; return WDI_STATUS_SUCCESS; } } /* Could not find empty slot */ return WDI_STATUS_E_FAILURE; } /* DAL Get STA index associated with BSS index. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * bssIdx: BSS index * staId: STA index associated with BSS index * Return Status: Found empty slot * */ WDI_Status WDI_DS_GetStaIdxFromBssIdx(void *pContext, wpt_uint8 bssIdx, wpt_uint8 *staIdx) { WDI_DS_ClientDataType *pClientData; wpt_uint8 bssLoop; pClientData = (WDI_DS_ClientDataType *)WDI_DS_GetDatapathContext(pContext); for(bssLoop = 0; bssLoop < WDI_DS_MAX_SUPPORTED_BSS; bssLoop++) { if(bssIdx == pClientData->staIdxPerBssIdxTable[bssLoop].bssIdx) { /* Found BSS index from slot */ *staIdx = pClientData->staIdxPerBssIdxTable[bssLoop].staIdx; return WDI_STATUS_SUCCESS; } } /* Could not find associated STA index with BSS index */ return WDI_STATUS_E_FAILURE; } /* DAL Clear STA index associated with BSS index. * Parameters: * pContext:Cookie that should be passed back to the caller along with the callback. * bssIdx: BSS index * staId: STA index associated with BSS index * Return Status: Found empty slot * */ WDI_Status WDI_DS_ClearStaIdxPerBssIdx(void *pContext, wpt_uint8 bssIdx, wpt_uint8 staIdx) { WDI_DS_ClientDataType *pClientData; wpt_uint8 bssLoop; pClientData = (WDI_DS_ClientDataType *)WDI_DS_GetDatapathContext(pContext); for(bssLoop = 0; bssLoop < WDI_DS_MAX_SUPPORTED_BSS; bssLoop++) { if((bssIdx == pClientData->staIdxPerBssIdxTable[bssLoop].bssIdx) && (staIdx == pClientData->staIdxPerBssIdxTable[bssLoop].staIdx)) { pClientData->staIdxPerBssIdxTable[bssLoop].bssIdx = WDI_DS_INDEX_INVALID; pClientData->staIdxPerBssIdxTable[bssLoop].staIdx = WDI_DS_INDEX_INVALID; pClientData->staIdxPerBssIdxTable[bssLoop].isUsed = 0; return WDI_STATUS_SUCCESS; } } /* Could not find associated STA index with BSS index */ return WDI_STATUS_E_FAILURE; } /* @brief: WDI_DS_GetTrafficStats * This function should be invoked to fetch the current stats * Parameters: * pStats:Pointer to the collected stats * len: length of buffer pointed to by pStats * Return Status: None */ void WDI_DS_GetTrafficStats(WDI_TrafficStatsType** pStats, wpt_uint32 *len) { return WDTS_GetTrafficStats(pStats, len); } /* @brief: WDI_DS_DeactivateTrafficStats * This function should be invoked to deactivate traffic stats collection * Parameters: None * Return Status: None */ void WDI_DS_DeactivateTrafficStats(void) { return WDTS_DeactivateTrafficStats(); } /* @brief: WDI_DS_ActivateTrafficStats * This function should be invoked to activate traffic stats collection * Parameters: None * Return Status: None */ void WDI_DS_ActivateTrafficStats(void) { return WDTS_ActivateTrafficStats(); } /* @brief: WDI_DS_ClearTrafficStats * This function should be invoked to clear all past stats * Parameters: None * Return Status: None */ void WDI_DS_ClearTrafficStats(void) { return WDTS_ClearTrafficStats(); }
gpl-2.0
akellar/android_kernel_motorola_shamu
drivers/net/wireless/ath/wil6210/main.c
1326
9701
/* * Copyright (c) 2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/moduleparam.h> #include <linux/if_arp.h> #include "wil6210.h" /* * Due to a hardware issue, * one has to read/write to/from NIC in 32-bit chunks; * regular memcpy_fromio and siblings will * not work on 64-bit platform - it uses 64-bit transactions * * Force 32-bit transactions to enable NIC on 64-bit platforms * * To avoid byte swap on big endian host, __raw_{read|write}l * should be used - {read|write}l would swap bytes to provide * little endian on PCI value in host endianness. */ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src, size_t count) { u32 *d = dst; const volatile u32 __iomem *s = src; /* size_t is unsigned, if (count%4 != 0) it will wrap */ for (count += 4; count > 4; count -= 4) *d++ = __raw_readl(s++); } void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src, size_t count) { volatile u32 __iomem *d = dst; const u32 *s = src; for (count += 4; count > 4; count -= 4) __raw_writel(*s++, d++); } static void _wil6210_disconnect(struct wil6210_priv *wil, void *bssid) { uint i; struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; wil_dbg_misc(wil, "%s()\n", __func__); wil_link_off(wil); clear_bit(wil_status_fwconnected, &wil->status); switch (wdev->sme_state) { case CFG80211_SME_CONNECTED: cfg80211_disconnected(ndev, WLAN_STATUS_UNSPECIFIED_FAILURE, NULL, 0, GFP_KERNEL); break; case CFG80211_SME_CONNECTING: cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); break; default: break; } for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) wil_vring_fini_tx(wil, i); clear_bit(wil_status_dontscan, &wil->status); } static void wil_disconnect_worker(struct work_struct *work) { struct wil6210_priv *wil = container_of(work, struct wil6210_priv, disconnect_worker); _wil6210_disconnect(wil, NULL); } static void wil_connect_timer_fn(ulong x) { struct wil6210_priv *wil = (void *)x; wil_dbg_misc(wil, "Connect timeout\n"); /* reschedule to thread context - disconnect won't * run from atomic context */ schedule_work(&wil->disconnect_worker); } static void wil_connect_worker(struct work_struct *work) { int rc; struct wil6210_priv *wil = container_of(work, struct wil6210_priv, connect_worker); int cid = wil->pending_connect_cid; if (cid < 0) { wil_err(wil, "No connection pending\n"); return; } wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid); rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, cid, 0); wil->pending_connect_cid = -1; if (rc == 0) wil_link_on(wil); } int wil_priv_init(struct wil6210_priv *wil) { wil_dbg_misc(wil, "%s()\n", __func__); mutex_init(&wil->mutex); mutex_init(&wil->wmi_mutex); init_completion(&wil->wmi_ready); wil->pending_connect_cid = -1; setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); INIT_WORK(&wil->connect_worker, wil_connect_worker); INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); INIT_LIST_HEAD(&wil->pending_wmi_ev); spin_lock_init(&wil->wmi_ev_lock); wil->wmi_wq = create_singlethread_workqueue(WIL_NAME"_wmi"); if (!wil->wmi_wq) return -EAGAIN; wil->wmi_wq_conn = create_singlethread_workqueue(WIL_NAME"_connect"); if (!wil->wmi_wq_conn) { destroy_workqueue(wil->wmi_wq); return -EAGAIN; } return 0; } void wil6210_disconnect(struct wil6210_priv *wil, void *bssid) { del_timer_sync(&wil->connect_timer); _wil6210_disconnect(wil, bssid); } void wil_priv_deinit(struct wil6210_priv *wil) { cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL); wmi_event_flush(wil); destroy_workqueue(wil->wmi_wq_conn); destroy_workqueue(wil->wmi_wq); } static void wil_target_reset(struct wil6210_priv *wil) { wil_dbg_misc(wil, "Resetting...\n"); /* register write */ #define W(a, v) iowrite32(v, wil->csr + HOSTADDR(a)) /* register set = read, OR, write */ #define S(a, v) iowrite32(ioread32(wil->csr + HOSTADDR(a)) | v, \ wil->csr + HOSTADDR(a)) /* hpal_perst_from_pad_src_n_mask */ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(6)); /* car_perst_rst_src_n_mask */ S(RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT(7)); W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */ W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */ W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170); W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00); W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000001); W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080); W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); wil_dbg_misc(wil, "Reset completed\n"); #undef W #undef S } void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r) { le32_to_cpus(&r->base); le16_to_cpus(&r->entry_size); le16_to_cpus(&r->size); le32_to_cpus(&r->tail); le32_to_cpus(&r->head); } static int wil_wait_for_fw_ready(struct wil6210_priv *wil) { ulong to = msecs_to_jiffies(1000); ulong left = wait_for_completion_timeout(&wil->wmi_ready, to); if (0 == left) { wil_err(wil, "Firmware not ready\n"); return -ETIME; } else { wil_dbg_misc(wil, "FW ready after %d ms\n", jiffies_to_msecs(to-left)); } return 0; } /* * We reset all the structures, and we reset the UMAC. * After calling this routine, you're expected to reload * the firmware. */ int wil_reset(struct wil6210_priv *wil) { int rc; cancel_work_sync(&wil->disconnect_worker); wil6210_disconnect(wil, NULL); wil6210_disable_irq(wil); wil->status = 0; wmi_event_flush(wil); flush_workqueue(wil->wmi_wq_conn); flush_workqueue(wil->wmi_wq); /* TODO: put MAC in reset */ wil_target_reset(wil); /* init after reset */ wil->pending_connect_cid = -1; INIT_COMPLETION(wil->wmi_ready); /* TODO: release MAC reset */ wil6210_enable_irq(wil); /* we just started MAC, wait for FW ready */ rc = wil_wait_for_fw_ready(wil); return rc; } void wil_link_on(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); wil_dbg_misc(wil, "%s()\n", __func__); netif_carrier_on(ndev); netif_tx_wake_all_queues(ndev); } void wil_link_off(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); wil_dbg_misc(wil, "%s()\n", __func__); netif_tx_stop_all_queues(ndev); netif_carrier_off(ndev); } static int __wil_up(struct wil6210_priv *wil) { struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; struct ieee80211_channel *channel = wdev->preset_chandef.chan; int rc; int bi; u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype); rc = wil_reset(wil); if (rc) return rc; /* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */ wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC); switch (wdev->iftype) { case NL80211_IFTYPE_STATION: wil_dbg_misc(wil, "type: STATION\n"); bi = 0; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_AP: wil_dbg_misc(wil, "type: AP\n"); bi = 100; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_P2P_CLIENT: wil_dbg_misc(wil, "type: P2P_CLIENT\n"); bi = 0; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_P2P_GO: wil_dbg_misc(wil, "type: P2P_GO\n"); bi = 100; ndev->type = ARPHRD_ETHER; break; case NL80211_IFTYPE_MONITOR: wil_dbg_misc(wil, "type: Monitor\n"); bi = 0; ndev->type = ARPHRD_IEEE80211_RADIOTAP; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */ break; default: return -EOPNOTSUPP; } /* Apply profile in the following order: */ /* SSID and channel for the AP */ switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (wdev->ssid_len == 0) { wil_err(wil, "SSID not set\n"); return -EINVAL; } rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid); if (rc) return rc; break; default: break; } /* MAC address - pre-requisite for other commands */ wmi_set_mac_address(wil, ndev->dev_addr); /* Set up beaconing if required. */ if (bi > 0) { rc = wmi_pcp_start(wil, bi, wmi_nettype, (channel ? channel->hw_value : 0)); if (rc) return rc; } /* Rx VRING. After MAC and beacon */ wil_rx_init(wil); return 0; } int wil_up(struct wil6210_priv *wil) { int rc; mutex_lock(&wil->mutex); rc = __wil_up(wil); mutex_unlock(&wil->mutex); return rc; } static int __wil_down(struct wil6210_priv *wil) { if (wil->scan_request) { cfg80211_scan_done(wil->scan_request, true); wil->scan_request = NULL; } wil6210_disconnect(wil, NULL); wil_rx_fini(wil); return 0; } int wil_down(struct wil6210_priv *wil) { int rc; mutex_lock(&wil->mutex); rc = __wil_down(wil); mutex_unlock(&wil->mutex); return rc; }
gpl-2.0
TeslaProject/android_kernel_moto_shamu
arch/s390/mm/cmm.c
2094
10942
/* * Collaborative memory management interface. * * Copyright IBM Corp 2003, 2010 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, * */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/sched.h> #include <linux/sysctl.h> #include <linux/ctype.h> #include <linux/swap.h> #include <linux/kthread.h> #include <linux/oom.h> #include <linux/suspend.h> #include <linux/uaccess.h> #include <asm/pgalloc.h> #include <asm/diag.h> #ifdef CONFIG_CMM_IUCV static char *cmm_default_sender = "VMRMSVM"; #endif static char *sender; module_param(sender, charp, 0400); MODULE_PARM_DESC(sender, "Guest name that may send SMSG messages (default VMRMSVM)"); #include "../../../drivers/s390/net/smsgiucv.h" #define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2) struct cmm_page_array { struct cmm_page_array *next; unsigned long index; unsigned long pages[CMM_NR_PAGES]; }; static long cmm_pages; static long cmm_timed_pages; static volatile long cmm_pages_target; static volatile long cmm_timed_pages_target; static long cmm_timeout_pages; static long cmm_timeout_seconds; static int cmm_suspended; static struct cmm_page_array *cmm_page_list; static struct cmm_page_array *cmm_timed_page_list; static DEFINE_SPINLOCK(cmm_lock); static struct task_struct *cmm_thread_ptr; static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait); static DEFINE_TIMER(cmm_timer, NULL, 0, 0); static void cmm_timer_fn(unsigned long); static void cmm_set_timer(void); static long cmm_alloc_pages(long nr, long *counter, struct cmm_page_array **list) { struct cmm_page_array *pa, *npa; unsigned long addr; while (nr) { addr = __get_free_page(GFP_NOIO); if (!addr) break; spin_lock(&cmm_lock); pa = *list; if (!pa || pa->index >= CMM_NR_PAGES) { /* Need a new page for the page list. */ spin_unlock(&cmm_lock); npa = (struct cmm_page_array *) __get_free_page(GFP_NOIO); if (!npa) { free_page(addr); break; } spin_lock(&cmm_lock); pa = *list; if (!pa || pa->index >= CMM_NR_PAGES) { npa->next = pa; npa->index = 0; pa = npa; *list = pa; } else free_page((unsigned long) npa); } diag10_range(addr >> PAGE_SHIFT, 1); pa->pages[pa->index++] = addr; (*counter)++; spin_unlock(&cmm_lock); nr--; } return nr; } static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list) { struct cmm_page_array *pa; unsigned long addr; spin_lock(&cmm_lock); pa = *list; while (nr) { if (!pa || pa->index <= 0) break; addr = pa->pages[--pa->index]; if (pa->index == 0) { pa = pa->next; free_page((unsigned long) *list); *list = pa; } free_page(addr); (*counter)--; nr--; } spin_unlock(&cmm_lock); return nr; } static int cmm_oom_notify(struct notifier_block *self, unsigned long dummy, void *parm) { unsigned long *freed = parm; long nr = 256; nr = cmm_free_pages(nr, &cmm_timed_pages, &cmm_timed_page_list); if (nr > 0) nr = cmm_free_pages(nr, &cmm_pages, &cmm_page_list); cmm_pages_target = cmm_pages; cmm_timed_pages_target = cmm_timed_pages; *freed += 256 - nr; return NOTIFY_OK; } static struct notifier_block cmm_oom_nb = { .notifier_call = cmm_oom_notify, }; static int cmm_thread(void *dummy) { int rc; while (1) { rc = wait_event_interruptible(cmm_thread_wait, (!cmm_suspended && (cmm_pages != cmm_pages_target || cmm_timed_pages != cmm_timed_pages_target)) || kthread_should_stop()); if (kthread_should_stop() || rc == -ERESTARTSYS) { cmm_pages_target = cmm_pages; cmm_timed_pages_target = cmm_timed_pages; break; } if (cmm_pages_target > cmm_pages) { if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list)) cmm_pages_target = cmm_pages; } else if (cmm_pages_target < cmm_pages) { cmm_free_pages(1, &cmm_pages, &cmm_page_list); } if (cmm_timed_pages_target > cmm_timed_pages) { if (cmm_alloc_pages(1, &cmm_timed_pages, &cmm_timed_page_list)) cmm_timed_pages_target = cmm_timed_pages; } else if (cmm_timed_pages_target < cmm_timed_pages) { cmm_free_pages(1, &cmm_timed_pages, &cmm_timed_page_list); } if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer)) cmm_set_timer(); } return 0; } static void cmm_kick_thread(void) { wake_up(&cmm_thread_wait); } static void cmm_set_timer(void) { if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) { if (timer_pending(&cmm_timer)) del_timer(&cmm_timer); return; } if (timer_pending(&cmm_timer)) { if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ)) return; } cmm_timer.function = cmm_timer_fn; cmm_timer.data = 0; cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ; add_timer(&cmm_timer); } static void cmm_timer_fn(unsigned long ignored) { long nr; nr = cmm_timed_pages_target - cmm_timeout_pages; if (nr < 0) cmm_timed_pages_target = 0; else cmm_timed_pages_target = nr; cmm_kick_thread(); cmm_set_timer(); } static void cmm_set_pages(long nr) { cmm_pages_target = nr; cmm_kick_thread(); } static long cmm_get_pages(void) { return cmm_pages; } static void cmm_add_timed_pages(long nr) { cmm_timed_pages_target += nr; cmm_kick_thread(); } static long cmm_get_timed_pages(void) { return cmm_timed_pages; } static void cmm_set_timeout(long nr, long seconds) { cmm_timeout_pages = nr; cmm_timeout_seconds = seconds; cmm_set_timer(); } static int cmm_skip_blanks(char *cp, char **endp) { char *str; for (str = cp; *str == ' ' || *str == '\t'; str++) ; *endp = str; return str != cp; } static struct ctl_table cmm_table[]; static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { char buf[16], *p; long nr; int len; if (!*lenp || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { len = *lenp; if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) return -EFAULT; buf[sizeof(buf) - 1] = '\0'; cmm_skip_blanks(buf, &p); nr = simple_strtoul(p, &p, 0); if (ctl == &cmm_table[0]) cmm_set_pages(nr); else cmm_add_timed_pages(nr); } else { if (ctl == &cmm_table[0]) nr = cmm_get_pages(); else nr = cmm_get_timed_pages(); len = sprintf(buf, "%ld\n", nr); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buf, len)) return -EFAULT; } *lenp = len; *ppos += len; return 0; } static int cmm_timeout_handler(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { char buf[64], *p; long nr, seconds; int len; if (!*lenp || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { len = *lenp; if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) return -EFAULT; buf[sizeof(buf) - 1] = '\0'; cmm_skip_blanks(buf, &p); nr = simple_strtoul(p, &p, 0); cmm_skip_blanks(p, &p); seconds = simple_strtoul(p, &p, 0); cmm_set_timeout(nr, seconds); } else { len = sprintf(buf, "%ld %ld\n", cmm_timeout_pages, cmm_timeout_seconds); if (len > *lenp) len = *lenp; if (copy_to_user(buffer, buf, len)) return -EFAULT; } *lenp = len; *ppos += len; return 0; } static struct ctl_table cmm_table[] = { { .procname = "cmm_pages", .mode = 0644, .proc_handler = cmm_pages_handler, }, { .procname = "cmm_timed_pages", .mode = 0644, .proc_handler = cmm_pages_handler, }, { .procname = "cmm_timeout", .mode = 0644, .proc_handler = cmm_timeout_handler, }, { } }; static struct ctl_table cmm_dir_table[] = { { .procname = "vm", .maxlen = 0, .mode = 0555, .child = cmm_table, }, { } }; #ifdef CONFIG_CMM_IUCV #define SMSG_PREFIX "CMM" static void cmm_smsg_target(const char *from, char *msg) { long nr, seconds; if (strlen(sender) > 0 && strcmp(from, sender) != 0) return; if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg)) return; if (strncmp(msg, "SHRINK", 6) == 0) { if (!cmm_skip_blanks(msg + 6, &msg)) return; nr = simple_strtoul(msg, &msg, 0); cmm_skip_blanks(msg, &msg); if (*msg == '\0') cmm_set_pages(nr); } else if (strncmp(msg, "RELEASE", 7) == 0) { if (!cmm_skip_blanks(msg + 7, &msg)) return; nr = simple_strtoul(msg, &msg, 0); cmm_skip_blanks(msg, &msg); if (*msg == '\0') cmm_add_timed_pages(nr); } else if (strncmp(msg, "REUSE", 5) == 0) { if (!cmm_skip_blanks(msg + 5, &msg)) return; nr = simple_strtoul(msg, &msg, 0); if (!cmm_skip_blanks(msg, &msg)) return; seconds = simple_strtoul(msg, &msg, 0); cmm_skip_blanks(msg, &msg); if (*msg == '\0') cmm_set_timeout(nr, seconds); } } #endif static struct ctl_table_header *cmm_sysctl_header; static int cmm_suspend(void) { cmm_suspended = 1; cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); return 0; } static int cmm_resume(void) { cmm_suspended = 0; cmm_kick_thread(); return 0; } static int cmm_power_event(struct notifier_block *this, unsigned long event, void *ptr) { switch (event) { case PM_POST_HIBERNATION: return cmm_resume(); case PM_HIBERNATION_PREPARE: return cmm_suspend(); default: return NOTIFY_DONE; } } static struct notifier_block cmm_power_notifier = { .notifier_call = cmm_power_event, }; static int __init cmm_init(void) { int rc = -ENOMEM; cmm_sysctl_header = register_sysctl_table(cmm_dir_table); if (!cmm_sysctl_header) goto out_sysctl; #ifdef CONFIG_CMM_IUCV /* convert sender to uppercase characters */ if (sender) { int len = strlen(sender); while (len--) sender[len] = toupper(sender[len]); } else { sender = cmm_default_sender; } rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target); if (rc < 0) goto out_smsg; #endif rc = register_oom_notifier(&cmm_oom_nb); if (rc < 0) goto out_oom_notify; rc = register_pm_notifier(&cmm_power_notifier); if (rc) goto out_pm; cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); if (!IS_ERR(cmm_thread_ptr)) return 0; rc = PTR_ERR(cmm_thread_ptr); unregister_pm_notifier(&cmm_power_notifier); out_pm: unregister_oom_notifier(&cmm_oom_nb); out_oom_notify: #ifdef CONFIG_CMM_IUCV smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); out_smsg: #endif unregister_sysctl_table(cmm_sysctl_header); out_sysctl: del_timer_sync(&cmm_timer); return rc; } module_init(cmm_init); static void __exit cmm_exit(void) { unregister_sysctl_table(cmm_sysctl_header); #ifdef CONFIG_CMM_IUCV smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); #endif unregister_pm_notifier(&cmm_power_notifier); unregister_oom_notifier(&cmm_oom_nb); kthread_stop(cmm_thread_ptr); del_timer_sync(&cmm_timer); cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list); cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list); } module_exit(cmm_exit); MODULE_LICENSE("GPL");
gpl-2.0
Zenfone2-Dev/Kernel-for-Asus-Zenfone-2
drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
3374
6673
/* * Mainly by David Woodhouse, somewhat modified by Jordan Crouse * * Copyright © 2006-2007 Red Hat, Inc. * Copyright © 2006-2007 Advanced Micro Devices, Inc. * Copyright © 2009 VIA Technology, Inc. * Copyright (c) 2010 Andres Salomon <dilinger@queued.net> * * This program is free software. You can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cs5535.h> #include <linux/gpio.h> #include <linux/delay.h> #include <asm/olpc.h> #include "olpc_dcon.h" static int dcon_init_xo_1(struct dcon_priv *dcon) { unsigned char lob; if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) { pr_err("failed to request STAT0 GPIO\n"); return -EIO; } if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) { pr_err("failed to request STAT1 GPIO\n"); goto err_gp_stat1; } if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) { pr_err("failed to request IRQ GPIO\n"); goto err_gp_irq; } if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) { pr_err("failed to request LOAD GPIO\n"); goto err_gp_load; } if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) { pr_err("failed to request BLANK GPIO\n"); goto err_gp_blank; } /* Turn off the event enable for GPIO7 just to be safe */ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); /* * Determine the current state by reading the GPIO bit; earlier * stages of the boot process have established the state. * * Note that we read GPIO_OUPUT_VAL rather than GPIO_READ_BACK here; * this is because OFW will disable input for the pin and set a value.. * READ_BACK will only contain a valid value if input is enabled and * then a value is set. So, future readings of the pin can use * READ_BACK, but the first one cannot. Awesome, huh? */ dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL) ? DCON_SOURCE_CPU : DCON_SOURCE_DCON; dcon->pending_src = dcon->curr_src; /* Set the directions for the GPIO pins */ gpio_direction_input(OLPC_GPIO_DCON_STAT0); gpio_direction_input(OLPC_GPIO_DCON_STAT1); gpio_direction_input(OLPC_GPIO_DCON_IRQ); gpio_direction_input(OLPC_GPIO_DCON_BLANK); gpio_direction_output(OLPC_GPIO_DCON_LOAD, dcon->curr_src == DCON_SOURCE_CPU); /* Set up the interrupt mappings */ /* Set the IRQ to pair 2 */ cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0); /* Enable group 2 to trigger the DCON interrupt */ cs5535_gpio_set_irq(2, DCON_IRQ); /* Select edge level for interrupt (in PIC) */ lob = inb(0x4d0); lob &= ~(1 << DCON_IRQ); outb(lob, 0x4d0); /* Register the interrupt handler */ if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) { pr_err("failed to request DCON's irq\n"); goto err_req_irq; } /* Clear INV_EN for GPIO7 (DCONIRQ) */ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT); /* Enable filter for GPIO12 (DCONBLANK) */ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER); /* Disable filter for GPIO7 */ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER); /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT); cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT); /* Add GPIO12 to the Filter Event Pair #7 */ cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL); /* Turn off negative Edge Enable for GPIO12 */ cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN); /* Enable negative Edge Enable for GPIO7 */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN); /* Zero the filter amount for Filter Event Pair #7 */ cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT); /* Clear the negative edge status for GPIO7 and GPIO12 */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS); /* FIXME: Clear the positive status as well, just to be sure */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS); cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS); /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE); return 0; err_req_irq: gpio_free(OLPC_GPIO_DCON_BLANK); err_gp_blank: gpio_free(OLPC_GPIO_DCON_LOAD); err_gp_load: gpio_free(OLPC_GPIO_DCON_IRQ); err_gp_irq: gpio_free(OLPC_GPIO_DCON_STAT1); err_gp_stat1: gpio_free(OLPC_GPIO_DCON_STAT0); return -EIO; } static void dcon_wiggle_xo_1(void) { int x; /* * According to HiMax, when powering the DCON up we should hold * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON * state machine to reset to a (sane) initial state. Mitch Bradley * did some testing and discovered that holding for 16 SMB_CLK cycles * worked a lot more reliably, so that's what we do here. * * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and * GPIO15. */ cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL); cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE); cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE); cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2); cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2); cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); for (x = 0; x < 16; x++) { udelay(5); cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); udelay(5); cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); } udelay(5); cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); } static void dcon_set_dconload_1(int val) { gpio_set_value(OLPC_GPIO_DCON_LOAD, val); } static int dcon_read_status_xo_1(u8 *status) { *status = gpio_get_value(OLPC_GPIO_DCON_STAT0); *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1; /* Clear the negative edge status for GPIO7 */ cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); return 0; } struct dcon_platform_data dcon_pdata_xo_1 = { .init = dcon_init_xo_1, .bus_stabilize_wiggle = dcon_wiggle_xo_1, .set_dconload = dcon_set_dconload_1, .read_status = dcon_read_status_xo_1, };
gpl-2.0
StarKissed/starkissed-kernel-mecha
net/netfilter/xt_osf.c
7982
10051
/* * Copyright (c) 2003+ Evgeniy Polyakov <zbr@ioremap.net> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/if.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/list.h> #include <linux/rculist.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <net/ip.h> #include <net/tcp.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> #include <linux/netfilter/xt_osf.h> struct xt_osf_finger { struct rcu_head rcu_head; struct list_head finger_entry; struct xt_osf_user_finger finger; }; enum osf_fmatch_states { /* Packet does not match the fingerprint */ FMATCH_WRONG = 0, /* Packet matches the fingerprint */ FMATCH_OK, /* Options do not match the fingerprint, but header does */ FMATCH_OPT_WRONG, }; /* * Indexed by dont-fragment bit. * It is the only constant value in the fingerprint. */ static struct list_head xt_osf_fingers[2]; static const struct nla_policy xt_osf_policy[OSF_ATTR_MAX + 1] = { [OSF_ATTR_FINGER] = { .len = sizeof(struct xt_osf_user_finger) }, }; static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const osf_attrs[]) { struct xt_osf_user_finger *f; struct xt_osf_finger *kf = NULL, *sf; int err = 0; if (!osf_attrs[OSF_ATTR_FINGER]) return -EINVAL; if (!(nlh->nlmsg_flags & NLM_F_CREATE)) return -EINVAL; f = nla_data(osf_attrs[OSF_ATTR_FINGER]); kf = kmalloc(sizeof(struct xt_osf_finger), GFP_KERNEL); if (!kf) return -ENOMEM; memcpy(&kf->finger, f, sizeof(struct xt_osf_user_finger)); list_for_each_entry(sf, &xt_osf_fingers[!!f->df], finger_entry) { if (memcmp(&sf->finger, f, sizeof(struct xt_osf_user_finger))) continue; kfree(kf); kf = NULL; if (nlh->nlmsg_flags & NLM_F_EXCL) err = -EEXIST; break; } /* * We are protected by nfnl mutex. */ if (kf) list_add_tail_rcu(&kf->finger_entry, &xt_osf_fingers[!!f->df]); return err; } static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const osf_attrs[]) { struct xt_osf_user_finger *f; struct xt_osf_finger *sf; int err = -ENOENT; if (!osf_attrs[OSF_ATTR_FINGER]) return -EINVAL; f = nla_data(osf_attrs[OSF_ATTR_FINGER]); list_for_each_entry(sf, &xt_osf_fingers[!!f->df], finger_entry) { if (memcmp(&sf->finger, f, sizeof(struct xt_osf_user_finger))) continue; /* * We are protected by nfnl mutex. */ list_del_rcu(&sf->finger_entry); kfree_rcu(sf, rcu_head); err = 0; break; } return err; } static const struct nfnl_callback xt_osf_nfnetlink_callbacks[OSF_MSG_MAX] = { [OSF_MSG_ADD] = { .call = xt_osf_add_callback, .attr_count = OSF_ATTR_MAX, .policy = xt_osf_policy, }, [OSF_MSG_REMOVE] = { .call = xt_osf_remove_callback, .attr_count = OSF_ATTR_MAX, .policy = xt_osf_policy, }, }; static const struct nfnetlink_subsystem xt_osf_nfnetlink = { .name = "osf", .subsys_id = NFNL_SUBSYS_OSF, .cb_count = OSF_MSG_MAX, .cb = xt_osf_nfnetlink_callbacks, }; static inline int xt_osf_ttl(const struct sk_buff *skb, const struct xt_osf_info *info, unsigned char f_ttl) { const struct iphdr *ip = ip_hdr(skb); if (info->flags & XT_OSF_TTL) { if (info->ttl == XT_OSF_TTL_TRUE) return ip->ttl == f_ttl; if (info->ttl == XT_OSF_TTL_NOCHECK) return 1; else if (ip->ttl <= f_ttl) return 1; else { struct in_device *in_dev = __in_dev_get_rcu(skb->dev); int ret = 0; for_ifa(in_dev) { if (inet_ifa_match(ip->saddr, ifa)) { ret = (ip->ttl == f_ttl); break; } } endfor_ifa(in_dev); return ret; } } return ip->ttl == f_ttl; } static bool xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p) { const struct xt_osf_info *info = p->matchinfo; const struct iphdr *ip = ip_hdr(skb); const struct tcphdr *tcp; struct tcphdr _tcph; int fmatch = FMATCH_WRONG, fcount = 0; unsigned int optsize = 0, check_WSS = 0; u16 window, totlen, mss = 0; bool df; const unsigned char *optp = NULL, *_optp = NULL; unsigned char opts[MAX_IPOPTLEN]; const struct xt_osf_finger *kf; const struct xt_osf_user_finger *f; if (!info) return false; tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); if (!tcp) return false; if (!tcp->syn) return false; totlen = ntohs(ip->tot_len); df = ntohs(ip->frag_off) & IP_DF; window = ntohs(tcp->window); if (tcp->doff * 4 > sizeof(struct tcphdr)) { optsize = tcp->doff * 4 - sizeof(struct tcphdr); _optp = optp = skb_header_pointer(skb, ip_hdrlen(skb) + sizeof(struct tcphdr), optsize, opts); } rcu_read_lock(); list_for_each_entry_rcu(kf, &xt_osf_fingers[df], finger_entry) { f = &kf->finger; if (!(info->flags & XT_OSF_LOG) && strcmp(info->genre, f->genre)) continue; optp = _optp; fmatch = FMATCH_WRONG; if (totlen == f->ss && xt_osf_ttl(skb, info, f->ttl)) { int foptsize, optnum; /* * Should not happen if userspace parser was written correctly. */ if (f->wss.wc >= OSF_WSS_MAX) continue; /* Check options */ foptsize = 0; for (optnum = 0; optnum < f->opt_num; ++optnum) foptsize += f->opt[optnum].length; if (foptsize > MAX_IPOPTLEN || optsize > MAX_IPOPTLEN || optsize != foptsize) continue; check_WSS = f->wss.wc; for (optnum = 0; optnum < f->opt_num; ++optnum) { if (f->opt[optnum].kind == (*optp)) { __u32 len = f->opt[optnum].length; const __u8 *optend = optp + len; int loop_cont = 0; fmatch = FMATCH_OK; switch (*optp) { case OSFOPT_MSS: mss = optp[3]; mss <<= 8; mss |= optp[2]; mss = ntohs(mss); break; case OSFOPT_TS: loop_cont = 1; break; } optp = optend; } else fmatch = FMATCH_OPT_WRONG; if (fmatch != FMATCH_OK) break; } if (fmatch != FMATCH_OPT_WRONG) { fmatch = FMATCH_WRONG; switch (check_WSS) { case OSF_WSS_PLAIN: if (f->wss.val == 0 || window == f->wss.val) fmatch = FMATCH_OK; break; case OSF_WSS_MSS: /* * Some smart modems decrease mangle MSS to * SMART_MSS_2, so we check standard, decreased * and the one provided in the fingerprint MSS * values. */ #define SMART_MSS_1 1460 #define SMART_MSS_2 1448 if (window == f->wss.val * mss || window == f->wss.val * SMART_MSS_1 || window == f->wss.val * SMART_MSS_2) fmatch = FMATCH_OK; break; case OSF_WSS_MTU: if (window == f->wss.val * (mss + 40) || window == f->wss.val * (SMART_MSS_1 + 40) || window == f->wss.val * (SMART_MSS_2 + 40)) fmatch = FMATCH_OK; break; case OSF_WSS_MODULO: if ((window % f->wss.val) == 0) fmatch = FMATCH_OK; break; } } if (fmatch != FMATCH_OK) continue; fcount++; if (info->flags & XT_OSF_LOG) nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL, "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n", f->genre, f->version, f->subtype, &ip->saddr, ntohs(tcp->source), &ip->daddr, ntohs(tcp->dest), f->ttl - ip->ttl); if ((info->flags & XT_OSF_LOG) && info->loglevel == XT_OSF_LOGLEVEL_FIRST) break; } } rcu_read_unlock(); if (!fcount && (info->flags & XT_OSF_LOG)) nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL, "Remote OS is not known: %pI4:%u -> %pI4:%u\n", &ip->saddr, ntohs(tcp->source), &ip->daddr, ntohs(tcp->dest)); if (fcount) fmatch = FMATCH_OK; return fmatch == FMATCH_OK; } static struct xt_match xt_osf_match = { .name = "osf", .revision = 0, .family = NFPROTO_IPV4, .proto = IPPROTO_TCP, .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_FORWARD), .match = xt_osf_match_packet, .matchsize = sizeof(struct xt_osf_info), .me = THIS_MODULE, }; static int __init xt_osf_init(void) { int err = -EINVAL; int i; for (i=0; i<ARRAY_SIZE(xt_osf_fingers); ++i) INIT_LIST_HEAD(&xt_osf_fingers[i]); err = nfnetlink_subsys_register(&xt_osf_nfnetlink); if (err < 0) { pr_err("Failed to register OSF nsfnetlink helper (%d)\n", err); goto err_out_exit; } err = xt_register_match(&xt_osf_match); if (err) { pr_err("Failed to register OS fingerprint " "matching module (%d)\n", err); goto err_out_remove; } return 0; err_out_remove: nfnetlink_subsys_unregister(&xt_osf_nfnetlink); err_out_exit: return err; } static void __exit xt_osf_fini(void) { struct xt_osf_finger *f; int i; nfnetlink_subsys_unregister(&xt_osf_nfnetlink); xt_unregister_match(&xt_osf_match); rcu_read_lock(); for (i=0; i<ARRAY_SIZE(xt_osf_fingers); ++i) { list_for_each_entry_rcu(f, &xt_osf_fingers[i], finger_entry) { list_del_rcu(&f->finger_entry); kfree_rcu(f, rcu_head); } } rcu_read_unlock(); rcu_barrier(); } module_init(xt_osf_init); module_exit(xt_osf_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Passive OS fingerprint matching."); MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
gpl-2.0
jcsullins/kernel-tenderloin-3.0
arch/mips/pnx8550/common/pci.c
11822
3832
/* * * BRIEF MODULE DESCRIPTION * * Author: source@mvista.com * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <pci.h> #include <glb.h> #include <nand.h> static struct resource pci_io_resource = { .start = PNX8550_PCIIO + 0x1000, /* reserve regacy I/O space */ .end = PNX8550_PCIIO + PNX8550_PCIIO_SIZE, .name = "pci IO space", .flags = IORESOURCE_IO }; static struct resource pci_mem_resource = { .start = PNX8550_PCIMEM, .end = PNX8550_PCIMEM + PNX8550_PCIMEM_SIZE - 1, .name = "pci memory space", .flags = IORESOURCE_MEM }; extern struct pci_ops pnx8550_pci_ops; static struct pci_controller pnx8550_controller = { .pci_ops = &pnx8550_pci_ops, .io_map_base = PNX8550_PORT_BASE, .io_resource = &pci_io_resource, .mem_resource = &pci_mem_resource, }; /* Return the total size of DRAM-memory, (RANK0 + RANK1) */ static inline unsigned long get_system_mem_size(void) { /* Read IP2031_RANK0_ADDR_LO */ unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010); /* Read IP2031_RANK1_ADDR_HI */ unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018); return dram_r1_hi - dram_r0_lo + 1; } static int __init pnx8550_pci_setup(void) { int pci_mem_code; int mem_size = get_system_mem_size() >> 20; /* Clear the Global 2 Register, PCI Inta Output Enable Registers Bit 1:Enable DAC Powerdown -> 0:DACs are enabled and are working normally 1:DACs are powerdown Bit 0:Enable of PCI inta output -> 0 = Disable PCI inta output 1 = Enable PCI inta output */ PNX8550_GLB2_ENAB_INTA_O = 0; /* Calc the PCI mem size code */ if (mem_size >= 128) pci_mem_code = SIZE_128M; else if (mem_size >= 64) pci_mem_code = SIZE_64M; else if (mem_size >= 32) pci_mem_code = SIZE_32M; else pci_mem_code = SIZE_16M; /* Set PCI_XIO registers */ outl(pci_mem_resource.start, PCI_BASE | PCI_BASE1_LO); outl(pci_mem_resource.end + 1, PCI_BASE | PCI_BASE1_HI); outl(pci_io_resource.start, PCI_BASE | PCI_BASE2_LO); outl(pci_io_resource.end, PCI_BASE | PCI_BASE2_HI); /* Send memory transaction via PCI_BASE2 */ outl(0x00000001, PCI_BASE | PCI_IO); /* Unlock the setup register */ outl(0xca, PCI_BASE | PCI_UNLOCKREG); /* * BAR0 of PNX8550 (pci base 10) must be zero in order for ide * to work, and in order for bus_to_baddr to work without any * hacks. */ outl(0x00000000, PCI_BASE | PCI_BASE10); /* *These two bars are set by default or the boot code. * However, it's safer to set them here so we're not boot * code dependent. */ outl(0x1be00000, PCI_BASE | PCI_BASE14); /* PNX MMIO */ outl(PNX8550_NAND_BASE_ADDR, PCI_BASE | PCI_BASE18); /* XIO */ outl(PCI_EN_TA | PCI_EN_PCI2MMI | PCI_EN_XIO | PCI_SETUP_BASE18_SIZE(SIZE_32M) | PCI_SETUP_BASE18_EN | PCI_SETUP_BASE14_EN | PCI_SETUP_BASE10_PREF | PCI_SETUP_BASE10_SIZE(pci_mem_code) | PCI_SETUP_CFGMANAGE_EN | PCI_SETUP_PCIARB_EN, PCI_BASE | PCI_SETUP); /* PCI_SETUP */ outl(0x00000000, PCI_BASE | PCI_CTRL); /* PCI_CONTROL */ register_pci_controller(&pnx8550_controller); return 0; } arch_initcall(pnx8550_pci_setup);
gpl-2.0
Red680812/X920D
arch/mips/pnx8550/common/pci.c
11822
3832
/* * * BRIEF MODULE DESCRIPTION * * Author: source@mvista.com * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <pci.h> #include <glb.h> #include <nand.h> static struct resource pci_io_resource = { .start = PNX8550_PCIIO + 0x1000, /* reserve regacy I/O space */ .end = PNX8550_PCIIO + PNX8550_PCIIO_SIZE, .name = "pci IO space", .flags = IORESOURCE_IO }; static struct resource pci_mem_resource = { .start = PNX8550_PCIMEM, .end = PNX8550_PCIMEM + PNX8550_PCIMEM_SIZE - 1, .name = "pci memory space", .flags = IORESOURCE_MEM }; extern struct pci_ops pnx8550_pci_ops; static struct pci_controller pnx8550_controller = { .pci_ops = &pnx8550_pci_ops, .io_map_base = PNX8550_PORT_BASE, .io_resource = &pci_io_resource, .mem_resource = &pci_mem_resource, }; /* Return the total size of DRAM-memory, (RANK0 + RANK1) */ static inline unsigned long get_system_mem_size(void) { /* Read IP2031_RANK0_ADDR_LO */ unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010); /* Read IP2031_RANK1_ADDR_HI */ unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018); return dram_r1_hi - dram_r0_lo + 1; } static int __init pnx8550_pci_setup(void) { int pci_mem_code; int mem_size = get_system_mem_size() >> 20; /* Clear the Global 2 Register, PCI Inta Output Enable Registers Bit 1:Enable DAC Powerdown -> 0:DACs are enabled and are working normally 1:DACs are powerdown Bit 0:Enable of PCI inta output -> 0 = Disable PCI inta output 1 = Enable PCI inta output */ PNX8550_GLB2_ENAB_INTA_O = 0; /* Calc the PCI mem size code */ if (mem_size >= 128) pci_mem_code = SIZE_128M; else if (mem_size >= 64) pci_mem_code = SIZE_64M; else if (mem_size >= 32) pci_mem_code = SIZE_32M; else pci_mem_code = SIZE_16M; /* Set PCI_XIO registers */ outl(pci_mem_resource.start, PCI_BASE | PCI_BASE1_LO); outl(pci_mem_resource.end + 1, PCI_BASE | PCI_BASE1_HI); outl(pci_io_resource.start, PCI_BASE | PCI_BASE2_LO); outl(pci_io_resource.end, PCI_BASE | PCI_BASE2_HI); /* Send memory transaction via PCI_BASE2 */ outl(0x00000001, PCI_BASE | PCI_IO); /* Unlock the setup register */ outl(0xca, PCI_BASE | PCI_UNLOCKREG); /* * BAR0 of PNX8550 (pci base 10) must be zero in order for ide * to work, and in order for bus_to_baddr to work without any * hacks. */ outl(0x00000000, PCI_BASE | PCI_BASE10); /* *These two bars are set by default or the boot code. * However, it's safer to set them here so we're not boot * code dependent. */ outl(0x1be00000, PCI_BASE | PCI_BASE14); /* PNX MMIO */ outl(PNX8550_NAND_BASE_ADDR, PCI_BASE | PCI_BASE18); /* XIO */ outl(PCI_EN_TA | PCI_EN_PCI2MMI | PCI_EN_XIO | PCI_SETUP_BASE18_SIZE(SIZE_32M) | PCI_SETUP_BASE18_EN | PCI_SETUP_BASE14_EN | PCI_SETUP_BASE10_PREF | PCI_SETUP_BASE10_SIZE(pci_mem_code) | PCI_SETUP_CFGMANAGE_EN | PCI_SETUP_PCIARB_EN, PCI_BASE | PCI_SETUP); /* PCI_SETUP */ outl(0x00000000, PCI_BASE | PCI_CTRL); /* PCI_CONTROL */ register_pci_controller(&pnx8550_controller); return 0; } arch_initcall(pnx8550_pci_setup);
gpl-2.0
zarboz/brick_kernel_msm8960
arch/x86/kernel/pci-iommu_table.c
12078
1789
#include <linux/dma-mapping.h> #include <asm/iommu_table.h> #include <linux/string.h> #include <linux/kallsyms.h> #define DEBUG 1 static struct iommu_table_entry * __init find_dependents_of(struct iommu_table_entry *start, struct iommu_table_entry *finish, struct iommu_table_entry *q) { struct iommu_table_entry *p; if (!q) return NULL; for (p = start; p < finish; p++) if (p->detect == q->depend) return p; return NULL; } void __init sort_iommu_table(struct iommu_table_entry *start, struct iommu_table_entry *finish) { struct iommu_table_entry *p, *q, tmp; for (p = start; p < finish; p++) { again: q = find_dependents_of(start, finish, p); /* We are bit sneaky here. We use the memory address to figure * out if the node we depend on is past our point, if so, swap. */ if (q > p) { tmp = *p; memmove(p, q, sizeof(*p)); *q = tmp; goto again; } } } #ifdef DEBUG void __init check_iommu_entries(struct iommu_table_entry *start, struct iommu_table_entry *finish) { struct iommu_table_entry *p, *q, *x; /* Simple cyclic dependency checker. */ for (p = start; p < finish; p++) { q = find_dependents_of(start, finish, p); x = find_dependents_of(start, finish, q); if (p == x) { printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n", p->detect, q->detect); /* Heavy handed way..*/ x->depend = 0; } } for (p = start; p < finish; p++) { q = find_dependents_of(p, finish, p); if (q && q > p) { printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n", p->detect, q->detect); } } } #else inline void check_iommu_entries(struct iommu_table_entry *start, struct iommu_table_entry *finish) { } #endif
gpl-2.0
virtuous/kernel-7x30-froyo-v2
fs/efs/file.c
12846
1190
/* * file.c * * Copyright (c) 1999 Al Smith * * Portions derived from work (c) 1995,1996 Christian Vogelgsang. */ #include <linux/buffer_head.h> #include "efs.h" int efs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int error = -EROFS; long phys; if (create) return error; if (iblock >= inode->i_blocks) { #ifdef DEBUG /* * i have no idea why this happens as often as it does */ printk(KERN_WARNING "EFS: bmap(): block %d >= %ld (filesize %ld)\n", block, inode->i_blocks, inode->i_size); #endif return 0; } phys = efs_map_block(inode, iblock); if (phys) map_bh(bh_result, inode->i_sb, phys); return 0; } int efs_bmap(struct inode *inode, efs_block_t block) { if (block < 0) { printk(KERN_WARNING "EFS: bmap(): block < 0\n"); return 0; } /* are we about to read past the end of a file ? */ if (!(block < inode->i_blocks)) { #ifdef DEBUG /* * i have no idea why this happens as often as it does */ printk(KERN_WARNING "EFS: bmap(): block %d >= %ld (filesize %ld)\n", block, inode->i_blocks, inode->i_size); #endif return 0; } return efs_map_block(inode, block); }
gpl-2.0
bhuman/KernelV4
drivers/net/wireless/ath/ath5k/pcu.c
47
26897
/* * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> * Copyright (c) 2007-2008 Matthew W. S. Bell <mentor@madwifi.org> * Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu> * Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org> * Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ /*********************************\ * Protocol Control Unit Functions * \*********************************/ #include <asm/unaligned.h> #include "ath5k.h" #include "reg.h" #include "debug.h" #include "base.h" /*******************\ * Generic functions * \*******************/ /** * ath5k_hw_set_opmode - Set PCU operating mode * * @ah: The &struct ath5k_hw * * Initialize PCU for the various operating modes (AP/STA etc) * * NOTE: ah->ah_op_mode must be set before calling this. */ int ath5k_hw_set_opmode(struct ath5k_hw *ah) { struct ath_common *common = ath5k_hw_common(ah); u32 pcu_reg, beacon_reg, low_id, high_id; /* Preserve rest settings */ pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE | (ah->ah_version == AR5K_AR5210 ? (AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0)); beacon_reg = 0; ATH5K_TRACE(ah->ah_sc); switch (ah->ah_op_mode) { case NL80211_IFTYPE_ADHOC: pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE; beacon_reg |= AR5K_BCR_ADHOC; if (ah->ah_version == AR5K_AR5210) pcu_reg |= AR5K_STA_ID1_NO_PSPOLL; else AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS); break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE; beacon_reg |= AR5K_BCR_AP; if (ah->ah_version == AR5K_AR5210) pcu_reg |= AR5K_STA_ID1_NO_PSPOLL; else AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS); break; case NL80211_IFTYPE_STATION: pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE | (ah->ah_version == AR5K_AR5210 ? AR5K_STA_ID1_PWR_SV : 0); case NL80211_IFTYPE_MONITOR: pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE | (ah->ah_version == AR5K_AR5210 ? AR5K_STA_ID1_NO_PSPOLL : 0); break; default: return -EINVAL; } /* * Set PCU registers */ low_id = get_unaligned_le32(common->macaddr); high_id = get_unaligned_le16(common->macaddr + 4); ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); /* * Set Beacon Control Register on 5210 */ if (ah->ah_version == AR5K_AR5210) ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR); return 0; } /** * ath5k_hw_update - Update mib counters (mac layer statistics) * * @ah: The &struct ath5k_hw * @stats: The &struct ieee80211_low_level_stats we use to track * statistics on the driver * * Reads MIB counters from PCU and updates sw statistics. Must be * called after a MIB interrupt. */ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats) { ATH5K_TRACE(ah->ah_sc); /* Read-And-Clear */ stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL); stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL); stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK); stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL); /* XXX: Should we use this to track beacon count ? * -we read it anyway to clear the register */ ath5k_hw_reg_read(ah, AR5K_BEACON_CNT); /* Reset profile count registers on 5212*/ if (ah->ah_version == AR5K_AR5212) { ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX); ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX); ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR); ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE); } /* TODO: Handle ANI stats */ } /** * ath5k_hw_set_ack_bitrate - set bitrate for ACKs * * @ah: The &struct ath5k_hw * @high: Flag to determine if we want to use high transmition rate * for ACKs or not * * If high flag is set, we tell hw to use a set of control rates based on * the current transmition rate (check out control_rates array inside reset.c). * If not hw just uses the lowest rate available for the current modulation * scheme being used (1Mbit for CCK and 6Mbits for OFDM). */ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high) { if (ah->ah_version != AR5K_AR5212) return; else { u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB; if (high) AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val); else AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val); } } /******************\ * ACK/CTS Timeouts * \******************/ /** * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec * * @ah: The &struct ath5k_hw */ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah) { ATH5K_TRACE(ah->ah_sc); return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo); } /** * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU * * @ah: The &struct ath5k_hw * @timeout: Timeout in usec */ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) { ATH5K_TRACE(ah->ah_sc); if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK), ah->ah_turbo) <= timeout) return -EINVAL; AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK, ath5k_hw_htoclock(timeout, ah->ah_turbo)); return 0; } /** * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec * * @ah: The &struct ath5k_hw */ unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah) { ATH5K_TRACE(ah->ah_sc); return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo); } /** * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU * * @ah: The &struct ath5k_hw * @timeout: Timeout in usec */ int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) { ATH5K_TRACE(ah->ah_sc); if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS), ah->ah_turbo) <= timeout) return -EINVAL; AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS, ath5k_hw_htoclock(timeout, ah->ah_turbo)); return 0; } /** * ath5k_hw_set_lladdr - Set station id * * @ah: The &struct ath5k_hw * @mac: The card's mac address * * Set station id on hw using the provided mac address */ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) { struct ath_common *common = ath5k_hw_common(ah); u32 low_id, high_id; u32 pcu_reg; ATH5K_TRACE(ah->ah_sc); /* Set new station ID */ memcpy(common->macaddr, mac, ETH_ALEN); pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; low_id = get_unaligned_le32(mac); high_id = get_unaligned_le16(mac + 4); ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); return 0; } /** * ath5k_hw_set_associd - Set BSSID for association * * @ah: The &struct ath5k_hw * @bssid: BSSID * @assoc_id: Assoc id * * Sets the BSSID which trigers the "SME Join" operation */ void ath5k_hw_set_associd(struct ath5k_hw *ah) { struct ath_common *common = ath5k_hw_common(ah); u16 tim_offset = 0; /* * Set simple BSSID mask on 5212 */ if (ah->ah_version == AR5K_AR5212) ath_hw_setbssidmask(common); /* * Set BSSID which triggers the "SME Join" operation */ ath5k_hw_reg_write(ah, get_unaligned_le32(common->curbssid), AR5K_BSS_ID0); ath5k_hw_reg_write(ah, get_unaligned_le16(common->curbssid + 4) | ((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1); if (common->curaid == 0) { ath5k_hw_disable_pspoll(ah); return; } AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM, tim_offset ? tim_offset + 4 : 0); ath5k_hw_enable_pspoll(ah, NULL, 0); } void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask) { struct ath_common *common = ath5k_hw_common(ah); ATH5K_TRACE(ah->ah_sc); /* Cache bssid mask so that we can restore it * on reset */ memcpy(common->bssidmask, mask, ETH_ALEN); if (ah->ah_version == AR5K_AR5212) ath_hw_setbssidmask(common); } /************\ * RX Control * \************/ /** * ath5k_hw_start_rx_pcu - Start RX engine * * @ah: The &struct ath5k_hw * * Starts RX engine on PCU so that hw can process RXed frames * (ACK etc). * * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma * TODO: Init ANI here */ void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) { ATH5K_TRACE(ah->ah_sc); AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } /** * at5k_hw_stop_rx_pcu - Stop RX engine * * @ah: The &struct ath5k_hw * * Stops RX engine on PCU * * TODO: Detach ANI here */ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah) { ATH5K_TRACE(ah->ah_sc); AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); } /* * Set multicast filter */ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1) { ATH5K_TRACE(ah->ah_sc); /* Set the multicat filter */ ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0); ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); } /* * Set multicast filter by index */ int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index) { ATH5K_TRACE(ah->ah_sc); if (index >= 64) return -EINVAL; else if (index >= 32) AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1, (1 << (index - 32))); else AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index)); return 0; } /* * Clear Multicast filter by index */ int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index) { ATH5K_TRACE(ah->ah_sc); if (index >= 64) return -EINVAL; else if (index >= 32) AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1, (1 << (index - 32))); else AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index)); return 0; } /** * ath5k_hw_get_rx_filter - Get current rx filter * * @ah: The &struct ath5k_hw * * Returns the RX filter by reading rx filter and * phy error filter registers. RX filter is used * to set the allowed frame types that PCU will accept * and pass to the driver. For a list of frame types * check out reg.h. */ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah) { u32 data, filter = 0; ATH5K_TRACE(ah->ah_sc); filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER); /*Radar detection for 5212*/ if (ah->ah_version == AR5K_AR5212) { data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL); if (data & AR5K_PHY_ERR_FIL_RADAR) filter |= AR5K_RX_FILTER_RADARERR; if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK)) filter |= AR5K_RX_FILTER_PHYERR; } return filter; } /** * ath5k_hw_set_rx_filter - Set rx filter * * @ah: The &struct ath5k_hw * @filter: RX filter mask (see reg.h) * * Sets RX filter register and also handles PHY error filter * register on 5212 and newer chips so that we have proper PHY * error reporting. */ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter) { u32 data = 0; ATH5K_TRACE(ah->ah_sc); /* Set PHY error filter register on 5212*/ if (ah->ah_version == AR5K_AR5212) { if (filter & AR5K_RX_FILTER_RADARERR) data |= AR5K_PHY_ERR_FIL_RADAR; if (filter & AR5K_RX_FILTER_PHYERR) data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK; } /* * The AR5210 uses promiscous mode to detect radar activity */ if (ah->ah_version == AR5K_AR5210 && (filter & AR5K_RX_FILTER_RADARERR)) { filter &= ~AR5K_RX_FILTER_RADARERR; filter |= AR5K_RX_FILTER_PROM; } /*Zero length DMA (phy error reporting) */ if (data) AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA); else AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA); /*Write RX Filter register*/ ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER); /*Write PHY error filter register on 5212*/ if (ah->ah_version == AR5K_AR5212) ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL); } /****************\ * Beacon control * \****************/ /** * ath5k_hw_get_tsf32 - Get a 32bit TSF * * @ah: The &struct ath5k_hw * * Returns lower 32 bits of current TSF */ u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah) { ATH5K_TRACE(ah->ah_sc); return ath5k_hw_reg_read(ah, AR5K_TSF_L32); } /** * ath5k_hw_get_tsf64 - Get the full 64bit TSF * * @ah: The &struct ath5k_hw * * Returns the current TSF */ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) { u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32); ATH5K_TRACE(ah->ah_sc); return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32); } /** * ath5k_hw_set_tsf64 - Set a new 64bit TSF * * @ah: The &struct ath5k_hw * @tsf64: The new 64bit TSF * * Sets the new TSF */ void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64) { ATH5K_TRACE(ah->ah_sc); ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32); ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32); } /** * ath5k_hw_reset_tsf - Force a TSF reset * * @ah: The &struct ath5k_hw * * Forces a TSF reset on PCU */ void ath5k_hw_reset_tsf(struct ath5k_hw *ah) { u32 val; ATH5K_TRACE(ah->ah_sc); val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF; /* * Each write to the RESET_TSF bit toggles a hardware internal * signal to reset TSF, but if left high it will cause a TSF reset * on the next chip reset as well. Thus we always write the value * twice to clear the signal. */ ath5k_hw_reg_write(ah, val, AR5K_BEACON); ath5k_hw_reg_write(ah, val, AR5K_BEACON); } /* * Initialize beacon timers */ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval) { u32 timer1, timer2, timer3; ATH5K_TRACE(ah->ah_sc); /* * Set the additional timers by mode */ switch (ah->ah_op_mode) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_STATION: /* In STA mode timer1 is used as next wakeup * timer and timer2 as next CFP duration start * timer. Both in 1/8TUs. */ /* TODO: PCF handling */ if (ah->ah_version == AR5K_AR5210) { timer1 = 0xffffffff; timer2 = 0xffffffff; } else { timer1 = 0x0000ffff; timer2 = 0x0007ffff; } /* Mark associated AP as PCF incapable for now */ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PCF); break; case NL80211_IFTYPE_ADHOC: AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM); default: /* On non-STA modes timer1 is used as next DMA * beacon alert (DBA) timer and timer2 as next * software beacon alert. Both in 1/8TUs. */ timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3; timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3; break; } /* Timer3 marks the end of our ATIM window * a zero length window is not allowed because * we 'll get no beacons */ timer3 = next_beacon + (ah->ah_atim_window ? ah->ah_atim_window : 1); /* * Set the beacon register and enable all timers. */ /* When in AP or Mesh Point mode zero timer0 to start TSF */ if (ah->ah_op_mode == NL80211_IFTYPE_AP || ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT) ath5k_hw_reg_write(ah, 0, AR5K_TIMER0); ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0); ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1); ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2); ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3); /* Force a TSF reset if requested and enable beacons */ if (interval & AR5K_BEACON_RESET_TSF) ath5k_hw_reset_tsf(ah); ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD | AR5K_BEACON_ENABLE), AR5K_BEACON); /* Flush any pending BMISS interrupts on ISR by * performing a clear-on-write operation on PISR * register for the BMISS bit (writing a bit on * ISR togles a reset for that bit and leaves * the rest bits intact) */ if (ah->ah_version == AR5K_AR5210) ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR); else ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR); /* TODO: Set enchanced sleep registers on AR5212 * based on vif->bss_conf params, until then * disable power save reporting.*/ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV); } #if 0 /* * Set beacon timers */ int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, const struct ath5k_beacon_state *state) { u32 cfp_period, next_cfp, dtim, interval, next_beacon; /* * TODO: should be changed through *state * review struct ath5k_beacon_state struct * * XXX: These are used for cfp period bellow, are they * ok ? Is it O.K. for tsf here to be 0 or should we use * get_tsf ? */ u32 dtim_count = 0; /* XXX */ u32 cfp_count = 0; /* XXX */ u32 tsf = 0; /* XXX */ ATH5K_TRACE(ah->ah_sc); /* Return on an invalid beacon state */ if (state->bs_interval < 1) return -EINVAL; interval = state->bs_interval; dtim = state->bs_dtim_period; /* * PCF support? */ if (state->bs_cfp_period > 0) { /* * Enable PCF mode and set the CFP * (Contention Free Period) and timer registers */ cfp_period = state->bs_cfp_period * state->bs_dtim_period * state->bs_interval; next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) * state->bs_interval; AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF); ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD); ath5k_hw_reg_write(ah, state->bs_cfp_max_duration, AR5K_CFP_DUR); ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period : next_cfp)) << 3, AR5K_TIMER2); } else { /* Disable PCF mode */ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF); } /* * Enable the beacon timer register */ ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0); /* * Start the beacon timers */ ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) & ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) | AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0, AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval, AR5K_BEACON_PERIOD), AR5K_BEACON); /* * Write new beacon miss threshold, if it appears to be valid * XXX: Figure out right values for min <= bs_bmiss_threshold <= max * and return if its not in range. We can test this by reading value and * setting value to a largest value and seeing which values register. */ AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS, state->bs_bmiss_threshold); /* * Set sleep control register * XXX: Didn't find this in 5210 code but since this register * exists also in ar5k's 5210 headers i leave it as common code. */ AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR, (state->bs_sleep_duration - 3) << 3); /* * Set enhanced sleep registers on 5212 */ if (ah->ah_version == AR5K_AR5212) { if (state->bs_sleep_duration > state->bs_interval && roundup(state->bs_sleep_duration, interval) == state->bs_sleep_duration) interval = state->bs_sleep_duration; if (state->bs_sleep_duration > dtim && (dtim == 0 || roundup(state->bs_sleep_duration, dtim) == state->bs_sleep_duration)) dtim = state->bs_sleep_duration; if (interval > dtim) return -EINVAL; next_beacon = interval == dtim ? state->bs_next_dtim : state->bs_next_beacon; ath5k_hw_reg_write(ah, AR5K_REG_SM((state->bs_next_dtim - 3) << 3, AR5K_SLEEP0_NEXT_DTIM) | AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) | AR5K_SLEEP0_ENH_SLEEP_EN | AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0); ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3, AR5K_SLEEP1_NEXT_TIM) | AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1); ath5k_hw_reg_write(ah, AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) | AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2); } return 0; } /* * Reset beacon timers */ void ath5k_hw_reset_beacon(struct ath5k_hw *ah) { ATH5K_TRACE(ah->ah_sc); /* * Disable beacon timer */ ath5k_hw_reg_write(ah, 0, AR5K_TIMER0); /* * Disable some beacon register values */ AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF); ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON); } /* * Wait for beacon queue to finish */ int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr) { unsigned int i; int ret; ATH5K_TRACE(ah->ah_sc); /* 5210 doesn't have QCU*/ if (ah->ah_version == AR5K_AR5210) { /* * Wait for beaconn queue to finish by checking * Control Register and Beacon Status Register. */ for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) { if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F) || !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F)) break; udelay(10); } /* Timeout... */ if (i <= 0) { /* * Re-schedule the beacon queue */ ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1); ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, AR5K_BCR); return -EIO; } ret = 0; } else { /*5211/5212*/ ret = ath5k_hw_register_timeout(ah, AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON), AR5K_QCU_STS_FRMPENDCNT, 0, false); if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON)) return -EIO; } return ret; } #endif /*********************\ * Key table functions * \*********************/ /* * Reset a key entry on the table */ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry) { unsigned int i, type; u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET; ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); type = ath5k_hw_reg_read(ah, AR5K_KEYTABLE_TYPE(entry)); for (i = 0; i < AR5K_KEYCACHE_SIZE; i++) ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i)); /* Reset associated MIC entry if TKIP * is enabled located at offset (entry + 64) */ if (type == AR5K_KEYTABLE_TYPE_TKIP) { AR5K_ASSERT_ENTRY(micentry, AR5K_KEYTABLE_SIZE); for (i = 0; i < AR5K_KEYCACHE_SIZE / 2 ; i++) ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(micentry, i)); } /* * Set NULL encryption on AR5212+ * * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5) * AR5K_KEYTABLE_TYPE_NULL -> 0x00000007 * * Note2: Windows driver (ndiswrapper) sets this to * 0x00000714 instead of 0x00000007 */ if (ah->ah_version >= AR5K_AR5211) { ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, AR5K_KEYTABLE_TYPE(entry)); if (type == AR5K_KEYTABLE_TYPE_TKIP) { ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, AR5K_KEYTABLE_TYPE(micentry)); } } return 0; } /* * Check if a table entry is valid */ int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry) { ATH5K_TRACE(ah->ah_sc); AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); /* Check the validation flag at the end of the entry */ return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) & AR5K_KEYTABLE_VALID; } static int ath5k_keycache_type(const struct ieee80211_key_conf *key) { switch (key->alg) { case ALG_TKIP: return AR5K_KEYTABLE_TYPE_TKIP; case ALG_CCMP: return AR5K_KEYTABLE_TYPE_CCM; case ALG_WEP: if (key->keylen == WLAN_KEY_LEN_WEP40) return AR5K_KEYTABLE_TYPE_40; else if (key->keylen == WLAN_KEY_LEN_WEP104) return AR5K_KEYTABLE_TYPE_104; return -EINVAL; default: return -EINVAL; } return -EINVAL; } /* * Set a key entry on the table */ int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac) { unsigned int i; int keylen; __le32 key_v[5] = {}; __le32 key0 = 0, key1 = 0; __le32 *rxmic, *txmic; int keytype; u16 micentry = entry + AR5K_KEYTABLE_MIC_OFFSET; bool is_tkip; const u8 *key_ptr; ATH5K_TRACE(ah->ah_sc); is_tkip = (key->alg == ALG_TKIP); /* * key->keylen comes in from mac80211 in bytes. * TKIP is 128 bit + 128 bit mic */ keylen = (is_tkip) ? (128 / 8) : key->keylen; if (entry > AR5K_KEYTABLE_SIZE || (is_tkip && micentry > AR5K_KEYTABLE_SIZE)) return -EOPNOTSUPP; if (unlikely(keylen > 16)) return -EOPNOTSUPP; keytype = ath5k_keycache_type(key); if (keytype < 0) return keytype; /* * each key block is 6 bytes wide, written as pairs of * alternating 32 and 16 bit le values. */ key_ptr = key->key; for (i = 0; keylen >= 6; keylen -= 6) { memcpy(&key_v[i], key_ptr, 6); i += 2; key_ptr += 6; } if (keylen) memcpy(&key_v[i], key_ptr, keylen); /* intentionally corrupt key until mic is installed */ if (is_tkip) { key0 = key_v[0] = ~key_v[0]; key1 = key_v[1] = ~key_v[1]; } for (i = 0; i < ARRAY_SIZE(key_v); i++) ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]), AR5K_KEYTABLE_OFF(entry, i)); ath5k_hw_reg_write(ah, keytype, AR5K_KEYTABLE_TYPE(entry)); if (is_tkip) { /* Install rx/tx MIC */ rxmic = (__le32 *) &key->key[16]; txmic = (__le32 *) &key->key[24]; if (ah->ah_combined_mic) { key_v[0] = rxmic[0]; key_v[1] = cpu_to_le32(le32_to_cpu(txmic[0]) >> 16); key_v[2] = rxmic[1]; key_v[3] = cpu_to_le32(le32_to_cpu(txmic[0]) & 0xffff); key_v[4] = txmic[1]; } else { key_v[0] = rxmic[0]; key_v[1] = 0; key_v[2] = rxmic[1]; key_v[3] = 0; key_v[4] = 0; } for (i = 0; i < ARRAY_SIZE(key_v); i++) ath5k_hw_reg_write(ah, le32_to_cpu(key_v[i]), AR5K_KEYTABLE_OFF(micentry, i)); ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, AR5K_KEYTABLE_TYPE(micentry)); ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC0(micentry)); ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_MAC1(micentry)); /* restore first 2 words of key */ ath5k_hw_reg_write(ah, le32_to_cpu(~key0), AR5K_KEYTABLE_OFF(entry, 0)); ath5k_hw_reg_write(ah, le32_to_cpu(~key1), AR5K_KEYTABLE_OFF(entry, 1)); } return ath5k_hw_set_key_lladdr(ah, entry, mac); } int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac) { u32 low_id, high_id; ATH5K_TRACE(ah->ah_sc); /* Invalid entry (key table overflow) */ AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); /* * MAC may be NULL if it's a broadcast key. In this case no need to * to compute get_unaligned_le32 and get_unaligned_le16 as we * already know it. */ if (!mac) { low_id = 0xffffffff; high_id = 0xffff | AR5K_KEYTABLE_VALID; } else { low_id = get_unaligned_le32(mac); high_id = get_unaligned_le16(mac + 4) | AR5K_KEYTABLE_VALID; } ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry)); ath5k_hw_reg_write(ah, high_id, AR5K_KEYTABLE_MAC1(entry)); return 0; }
gpl-2.0
sigma-random/gcc
libgfortran/generated/_acos_r10.F90
47
1479
! Copyright (C) 2002-2015 Free Software Foundation, Inc. ! Contributed by Paul Brook <paul@nowt.org> ! !This file is part of the GNU Fortran 95 runtime library (libgfortran). ! !GNU libgfortran is free software; you can redistribute it and/or !modify it under the terms of the GNU General Public !License as published by the Free Software Foundation; either !version 3 of the License, or (at your option) any later version. !GNU libgfortran is distributed in the hope that it will be useful, !but WITHOUT ANY WARRANTY; without even the implied warranty of !MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the !GNU General Public License for more details. ! !Under Section 7 of GPL version 3, you are granted additional !permissions described in the GCC Runtime Library Exception, version !3.1, as published by the Free Software Foundation. ! !You should have received a copy of the GNU General Public License and !a copy of the GCC Runtime Library Exception along with this program; !see the files COPYING3 and COPYING.RUNTIME respectively. If not, see !<http://www.gnu.org/licenses/>. ! !This file is machine generated. #include "config.h" #include "kinds.inc" #include "c99_protos.inc" #if defined (HAVE_GFC_REAL_10) #ifdef HAVE_ACOSL elemental function _gfortran_specific__acos_r10 (parm) real (kind=10), intent (in) :: parm real (kind=10) :: _gfortran_specific__acos_r10 _gfortran_specific__acos_r10 = acos (parm) end function #endif #endif
gpl-2.0
jmartinc/video_visstrim
lib/list_debug.c
47
2656
/* * Copyright 2006, Red Hat, Inc., Dave Jones * Released under the General Public License (GPL). * * This file contains the linked list implementations for * DEBUG_LIST. */ #include <linux/export.h> #include <linux/list.h> #include <linux/bug.h> #include <linux/kernel.h> #include <linux/rculist.h> /* * Insert a new entry between two known consecutive entries. * * This is only for internal list manipulation where we know * the prev/next entries already! */ void __list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { WARN(next->prev != prev, "list_add corruption. next->prev should be " "prev (%p), but was %p. (next=%p).\n", prev, next->prev, next); WARN(prev->next != next, "list_add corruption. prev->next should be " "next (%p), but was %p. (prev=%p).\n", next, prev->next, prev); WARN(new == prev || new == next, "list_add double add: new=%p, prev=%p, next=%p.\n", new, prev, next); next->prev = new; new->next = next; new->prev = prev; prev->next = new; } EXPORT_SYMBOL(__list_add); void __list_del_entry(struct list_head *entry) { struct list_head *prev, *next; prev = entry->prev; next = entry->next; if (WARN(next == LIST_POISON1, "list_del corruption, %p->next is LIST_POISON1 (%p)\n", entry, LIST_POISON1) || WARN(prev == LIST_POISON2, "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", entry, LIST_POISON2) || WARN(prev->next != entry, "list_del corruption. prev->next should be %p, " "but was %p\n", entry, prev->next) || WARN(next->prev != entry, "list_del corruption. next->prev should be %p, " "but was %p\n", entry, next->prev)) return; __list_del(prev, next); } EXPORT_SYMBOL(__list_del_entry); /** * list_del - deletes entry from list. * @entry: the element to delete from the list. * Note: list_empty on entry does not return true after this, the entry is * in an undefined state. */ void list_del(struct list_head *entry) { __list_del_entry(entry); entry->next = LIST_POISON1; entry->prev = LIST_POISON2; } EXPORT_SYMBOL(list_del); /* * RCU variants. */ void __list_add_rcu(struct list_head *new, struct list_head *prev, struct list_head *next) { WARN(next->prev != prev, "list_add_rcu corruption. next->prev should be " "prev (%p), but was %p. (next=%p).\n", prev, next->prev, next); WARN(prev->next != next, "list_add_rcu corruption. prev->next should be " "next (%p), but was %p. (prev=%p).\n", next, prev->next, prev); new->next = next; new->prev = prev; rcu_assign_pointer(list_next_rcu(prev), new); next->prev = new; } EXPORT_SYMBOL(__list_add_rcu);
gpl-2.0
cm-3470/android_kernel_samsung_gardalte
drivers/media/video/exynos/gsc/gsc-m2m.c
47
21659
/* linux/drivers/media/video/exynos/gsc/gsc-m2m.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Samsung EXYNOS5 SoC series G-scaler driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 2 of the License, * or (at your option) any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/version.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/bug.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/list.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/clk.h> #include <media/v4l2-ioctl.h> #include "gsc-core.h" static int gsc_ctx_stop_req(struct gsc_ctx *ctx) { struct gsc_ctx *curr_ctx; struct gsc_dev *gsc = ctx->gsc_dev; int ret = 0; unsigned long flags; curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev); if (!gsc_m2m_run(gsc) || (curr_ctx != ctx)) return 0; spin_lock_irqsave(&ctx->slock, flags); ctx->state |= GSC_CTX_STOP_REQ; spin_unlock_irqrestore(&ctx->slock, flags); ret = wait_event_timeout(gsc->irq_queue, !gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx), GSC_SHUTDOWN_TIMEOUT); if (!ret) ret = -EBUSY; return ret; } static int gsc_m2m_stop_streaming(struct vb2_queue *q) { struct gsc_ctx *ctx = q->drv_priv; struct gsc_dev *gsc = ctx->gsc_dev; int ret; vb2_wait_for_all_buffers(q); ret = gsc_ctx_stop_req(ctx); /* FIXME: need to add v4l2_m2m_job_finish(fail) if ret is timeout */ if (ret < 0) dev_err(&gsc->pdev->dev, "wait timeout : %s\n", __func__); return 0; } static void gsc_m2m_job_abort(void *priv) { struct gsc_ctx *ctx = priv; struct gsc_dev *gsc = ctx->gsc_dev; int ret; vb2_wait_for_all_buffers(v4l2_m2m_get_src_vq(ctx->m2m_ctx)); vb2_wait_for_all_buffers(v4l2_m2m_get_dst_vq(ctx->m2m_ctx)); ret = gsc_ctx_stop_req(ctx); /* FIXME: need to add v4l2_m2m_job_finish(fail) if ret is timeout */ if (ret < 0) dev_err(&gsc->pdev->dev, "wait timeout : %s\n", __func__); } int gsc_fill_addr(struct gsc_ctx *ctx) { struct gsc_frame *s_frame, *d_frame; struct vb2_buffer *vb = NULL; int ret = 0; s_frame = &ctx->s_frame; d_frame = &ctx->d_frame; vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx); if (vb->num_planes != s_frame->fmt->num_planes) { gsc_err("gsc(%s): vb(%p) planes=%d s_frame(%p) planes=%d\n", v4l2_m2m_get_src_vq(ctx->m2m_ctx)->name, vb, vb->num_planes, s_frame, s_frame->fmt->num_planes); return -EINVAL; } ret = gsc_prepare_addr(ctx, vb, s_frame, &s_frame->addr); if (ret) return ret; vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); if (vb->num_planes != d_frame->fmt->num_planes) { gsc_err("gsc(%s): vb(%p) planes=%d d_frame(%p) planes=%d\n", v4l2_m2m_get_dst_vq(ctx->m2m_ctx)->name, vb, vb->num_planes, d_frame, d_frame->fmt->num_planes); return -EINVAL; } ret = gsc_prepare_addr(ctx, vb, d_frame, &d_frame->addr); return ret; } void gsc_op_timer_handler(unsigned long arg) { struct gsc_ctx *ctx = (struct gsc_ctx *)arg; struct gsc_dev *gsc = ctx->gsc_dev; struct vb2_buffer *src_vb, *dst_vb; gsc_dump_registers(gsc); clear_bit(ST_M2M_RUN, &gsc->state); pm_runtime_put(&gsc->pdev->dev); src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); if (src_vb && dst_vb) { v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR); v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR); } gsc_err("GSCALER[%d] interrupt hasn't been triggered", gsc->id); gsc_err("erro ctx: %p, ctx->state: 0x%x", ctx, ctx->state); } static void gsc_m2m_device_run(void *priv) { struct gsc_ctx *ctx = priv; struct gsc_dev *gsc; unsigned long flags; int ret; bool is_set = false; if (WARN(!ctx, "null hardware context\n")) return; gsc = ctx->gsc_dev; if (in_irq()) pm_runtime_get(&gsc->pdev->dev); else pm_runtime_get_sync(&gsc->pdev->dev); spin_lock_irqsave(&ctx->slock, flags); /* Reconfigure hardware if the context has changed. */ if (gsc->m2m.ctx != ctx) { gsc_dbg("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p", gsc->m2m.ctx, ctx); ctx->state |= GSC_PARAMS; gsc->m2m.ctx = ctx; } is_set = (ctx->state & GSC_CTX_STOP_REQ) ? 1 : 0; ctx->state &= ~GSC_CTX_STOP_REQ; if (is_set) { wake_up(&gsc->irq_queue); goto put_device; } ret = gsc_fill_addr(ctx); if (ret) { gsc_err("Wrong address"); goto put_device; } if (!gsc->protected_content) { struct gsc_frame *frame = &ctx->s_frame; exynos_sysmmu_set_pbuf(&gsc->pdev->dev, frame->fmt->nr_comp, ctx->prebuf); } if (ctx->state & GSC_PARAMS) { gsc_hw_set_sw_reset(gsc); ret = gsc_wait_reset(gsc); if (ret < 0) { gsc_err("gscaler s/w reset timeout"); goto put_device; } gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false); gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false); gsc_hw_set_frm_done_irq_mask(gsc, false); gsc_hw_set_gsc_irq_enable(gsc, true); gsc_hw_set_one_frm_mode(gsc, true); gsc_hw_set_freerun_clock_mode(gsc, false); if (gsc_set_scaler_info(ctx)) { gsc_err("Scaler setup error"); goto put_device; } gsc_hw_set_input_path(ctx); gsc_hw_set_in_size(ctx); gsc_hw_set_in_image_format(ctx); gsc_hw_set_output_path(ctx); gsc_hw_set_out_size(ctx); gsc_hw_set_out_image_format(ctx); gsc_hw_set_prescaler(ctx); gsc_hw_set_mainscaler(ctx); gsc_hw_set_h_coef(ctx); gsc_hw_set_v_coef(ctx); gsc_hw_set_rotation(ctx); gsc_hw_set_global_alpha(ctx); } gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM); gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM); ctx->state &= ~GSC_PARAMS; if (!test_and_set_bit(ST_M2M_RUN, &gsc->state)) { /* One frame mode sequence GSCALER_ON on -> GSCALER_OP_STATUS is operating -> GSCALER_ON off */ gsc_hw_enable_control(gsc, true); #ifdef GSC_PERF gsc->start_time = sched_clock(); #endif ret = gsc_wait_operating(gsc); if (ret < 0) { gsc_err("gscaler wait operating timeout"); goto put_device; } } ctx->op_timer.expires = (jiffies + 2 * HZ); add_timer(&ctx->op_timer); spin_unlock_irqrestore(&ctx->slock, flags); return; put_device: ctx->state &= ~GSC_PARAMS; spin_unlock_irqrestore(&ctx->slock, flags); pm_runtime_put_sync(&gsc->pdev->dev); } static int gsc_m2m_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { struct gsc_ctx *ctx = vb2_get_drv_priv(vq); struct gsc_frame *frame; int i; frame = ctx_get_frame(ctx, vq->type); if (IS_ERR(frame)) return PTR_ERR(frame); if (!frame->fmt) return -EINVAL; *num_planes = frame->fmt->num_planes; for (i = 0; i < frame->fmt->num_planes; i++) { sizes[i] = get_plane_size(frame, i); allocators[i] = ctx->gsc_dev->alloc_ctx; } return 0; } static int gsc_m2m_buf_prepare(struct vb2_buffer *vb) { struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct gsc_frame *frame; int i; frame = ctx_get_frame(ctx, vb->vb2_queue->type); if (IS_ERR(frame)) return PTR_ERR(frame); if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { for (i = 0; i < frame->fmt->num_planes; i++) vb2_set_plane_payload(vb, i, frame->payload[i]); } vb2_ion_buf_prepare(vb); return 0; } static void gsc_m2m_fence_work(struct work_struct *work) { struct gsc_ctx *ctx = container_of(work, struct gsc_ctx, fence_work); struct v4l2_m2m_buffer *buffer; struct sync_fence *fence; unsigned long flags; int ret; spin_lock_irqsave(&ctx->slock, flags); while (!list_empty(&ctx->fence_wait_list)) { buffer = list_first_entry(&ctx->fence_wait_list, struct v4l2_m2m_buffer, wait); list_del(&buffer->wait); spin_unlock_irqrestore(&ctx->slock, flags); fence = buffer->vb.acquire_fence; if (fence) { buffer->vb.acquire_fence = NULL; ret = sync_fence_wait(fence, 1000); if (ret == -ETIME) { gsc_warn("sync_fence_wait() timeout"); ret = sync_fence_wait(fence, 10 * MSEC_PER_SEC); } if (ret) gsc_warn("sync_fence_wait() error"); sync_fence_put(fence); } if (ctx->m2m_ctx) { v4l2_m2m_buf_queue(ctx->m2m_ctx, &buffer->vb); v4l2_m2m_try_schedule(ctx->m2m_ctx); } spin_lock_irqsave(&ctx->slock, flags); } spin_unlock_irqrestore(&ctx->slock, flags); } static void gsc_m2m_buf_queue(struct vb2_buffer *vb) { struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); unsigned long flags; struct sync_fence *fence; gsc_dbg("ctx: %p, ctx->state: 0x%x", ctx, ctx->state); fence = vb->acquire_fence; if (fence) { spin_lock_irqsave(&ctx->slock, flags); list_add_tail(&b->wait, &ctx->fence_wait_list); spin_unlock_irqrestore(&ctx->slock, flags); queue_work(ctx->gsc_dev->irq_workqueue, &ctx->fence_work); } else { if (ctx->m2m_ctx) v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); } } struct vb2_ops gsc_m2m_qops = { .queue_setup = gsc_m2m_queue_setup, .buf_prepare = gsc_m2m_buf_prepare, .buf_finish = vb2_ion_buf_finish, .buf_queue = gsc_m2m_buf_queue, .wait_prepare = gsc_unlock, .wait_finish = gsc_lock, .stop_streaming = gsc_m2m_stop_streaming, }; static int gsc_m2m_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct gsc_ctx *ctx = fh_to_ctx(fh); struct gsc_dev *gsc = ctx->gsc_dev; strncpy(cap->driver, gsc->pdev->name, sizeof(cap->driver) - 1); strncpy(cap->card, gsc->pdev->name, sizeof(cap->card) - 1); cap->bus_info[0] = 0; cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE; return 0; } static int gsc_m2m_enum_fmt_mplane(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return gsc_enum_fmt_mplane(f); } static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct gsc_ctx *ctx = fh_to_ctx(fh); if ((f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) && (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) return -EINVAL; return gsc_g_fmt_mplane(ctx, f); } static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct gsc_ctx *ctx = fh_to_ctx(fh); if ((f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) && (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) return -EINVAL; return gsc_try_fmt_mplane(ctx, f); } static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct gsc_ctx *ctx = fh_to_ctx(fh); struct vb2_queue *vq; struct gsc_frame *frame; struct v4l2_pix_format_mplane *pix; int i, ret = 0; ret = gsc_m2m_try_fmt_mplane(file, fh, f); if (ret) return ret; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (vb2_is_streaming(vq)) { gsc_err("queue (%d) busy", f->type); return -EBUSY; } if (V4L2_TYPE_IS_OUTPUT(f->type)) { frame = &ctx->s_frame; } else { frame = &ctx->d_frame; } pix = &f->fmt.pix_mp; frame->fmt = find_format(&pix->pixelformat, NULL, 0); if (!frame->fmt) return -EINVAL; for (i = 0; i < frame->fmt->num_planes; i++) frame->payload[i] = pix->plane_fmt[i].sizeimage; gsc_set_frame_size(frame, pix->width, pix->height); if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx); else gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx); gsc_dbg("f_w: %d, f_h: %d", frame->f_width, frame->f_height); return 0; } static int gsc_m2m_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *reqbufs) { struct gsc_ctx *ctx = fh_to_ctx(fh); struct gsc_dev *gsc = ctx->gsc_dev; struct gsc_frame *frame; u32 max_cnt; max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ? gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt; if (reqbufs->count > max_cnt) return -EINVAL; else if (reqbufs->count == 0) { if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) gsc_ctx_state_lock_clear(GSC_SRC_FMT, ctx); else gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx); } gsc_set_protected_content(gsc, ctx->gsc_ctrls.drm_en->cur.val); frame = ctx_get_frame(ctx, reqbufs->type); frame->cacheable = ctx->gsc_ctrls.cacheable->val; gsc->vb2->set_cacheable(gsc->alloc_ctx, frame->cacheable); return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); } static int gsc_m2m_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) { struct gsc_ctx *ctx = fh_to_ctx(fh); return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); } static int gsc_m2m_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) { struct gsc_ctx *ctx = fh_to_ctx(fh); return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); } static int gsc_m2m_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) { struct gsc_ctx *ctx = fh_to_ctx(fh); return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } static int gsc_m2m_streamon(struct file *file, void *fh, enum v4l2_buf_type type) { struct gsc_ctx *ctx = fh_to_ctx(fh); struct gsc_dev *gsc = ctx->gsc_dev; struct exynos_platform_gscaler *pdata = gsc->pdata; /* The source and target color format need to be set */ if (V4L2_TYPE_IS_OUTPUT(type)) { if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx)) return -EINVAL; } else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) { return -EINVAL; } gsc_pm_qos_ctrl(gsc, GSC_QOS_ON, pdata->mif_min, pdata->int_min); return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); } static int gsc_m2m_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct gsc_ctx *ctx = fh_to_ctx(fh); struct gsc_dev *gsc = ctx->gsc_dev; gsc_pm_qos_ctrl(gsc, GSC_QOS_OFF, 0, 0); return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); } static int gsc_m2m_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cr) { struct gsc_frame *frame; struct gsc_ctx *ctx = fh_to_ctx(fh); frame = ctx_get_frame(ctx, cr->type); if (IS_ERR(frame)) return PTR_ERR(frame); cr->bounds.left = 0; cr->bounds.top = 0; cr->bounds.width = frame->f_width; cr->bounds.height = frame->f_height; cr->defrect = cr->bounds; return 0; } static int gsc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr) { struct gsc_ctx *ctx = fh_to_ctx(fh); return gsc_g_crop(ctx, cr); } static int gsc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr) { struct gsc_ctx *ctx = fh_to_ctx(fh); struct gsc_variant *variant = ctx->gsc_dev->variant; struct gsc_frame *f; int ret; ret = gsc_try_crop(ctx, cr); if (ret) return ret; f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ? &ctx->s_frame : &ctx->d_frame; /* Check to see if scaling ratio is within supported range */ if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) { if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { ret = gsc_check_scaler_ratio(variant, cr->c.width, cr->c.height, ctx->d_frame.crop.width, ctx->d_frame.crop.height, ctx->gsc_ctrls.rotate->val, ctx->out_path); } else { ret = gsc_check_scaler_ratio(variant, ctx->s_frame.crop.width, ctx->s_frame.crop.height, cr->c.width, cr->c.height, ctx->gsc_ctrls.rotate->val, ctx->out_path); } if (ret) { gsc_err("Out of scaler range"); return -EINVAL; } } f->crop.left = cr->c.left; f->crop.top = cr->c.top; f->crop.width = cr->c.width; f->crop.height = cr->c.height; gsc_ctx_state_lock_set(GSC_PARAMS, ctx); return 0; } static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = { .vidioc_querycap = gsc_m2m_querycap, .vidioc_enum_fmt_vid_cap_mplane = gsc_m2m_enum_fmt_mplane, .vidioc_enum_fmt_vid_out_mplane = gsc_m2m_enum_fmt_mplane, .vidioc_g_fmt_vid_cap_mplane = gsc_m2m_g_fmt_mplane, .vidioc_g_fmt_vid_out_mplane = gsc_m2m_g_fmt_mplane, .vidioc_try_fmt_vid_cap_mplane = gsc_m2m_try_fmt_mplane, .vidioc_try_fmt_vid_out_mplane = gsc_m2m_try_fmt_mplane, .vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane, .vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane, .vidioc_reqbufs = gsc_m2m_reqbufs, .vidioc_querybuf = gsc_m2m_querybuf, .vidioc_qbuf = gsc_m2m_qbuf, .vidioc_dqbuf = gsc_m2m_dqbuf, .vidioc_streamon = gsc_m2m_streamon, .vidioc_streamoff = gsc_m2m_streamoff, .vidioc_g_crop = gsc_m2m_g_crop, .vidioc_s_crop = gsc_m2m_s_crop, .vidioc_cropcap = gsc_m2m_cropcap }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct gsc_ctx *ctx = priv; int ret; memset(src_vq, 0, sizeof(*src_vq)); src_vq->name = kasprintf(GFP_KERNEL, "%s-src", dev_name(&ctx->gsc_dev->pdev->dev)); src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; src_vq->drv_priv = ctx; src_vq->ops = &gsc_m2m_qops; src_vq->mem_ops = ctx->gsc_dev->vb2->ops; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); ret = vb2_queue_init(src_vq); if (ret) return ret; memset(dst_vq, 0, sizeof(*dst_vq)); dst_vq->name = kasprintf(GFP_KERNEL, "%s-dst", dev_name(&ctx->gsc_dev->pdev->dev)); dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; dst_vq->drv_priv = ctx; dst_vq->ops = &gsc_m2m_qops; dst_vq->mem_ops = ctx->gsc_dev->vb2->ops; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); return vb2_queue_init(dst_vq); } static int gsc_m2m_open(struct file *file) { struct gsc_dev *gsc = video_drvdata(file); struct gsc_ctx *ctx = NULL; int ret; gsc_dbg("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state); if (gsc_out_opened(gsc) || gsc_cap_opened(gsc)) return -EBUSY; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; v4l2_fh_init(&ctx->fh, gsc->m2m.vfd); ret = gsc_ctrls_create(ctx); if (ret) goto error_fh; /* Use separate control handler per file handle */ ctx->fh.ctrl_handler = &ctx->ctrl_handler; file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); ctx->gsc_dev = gsc; /* Default color format */ ctx->s_frame.fmt = get_format(0); ctx->d_frame.fmt = get_format(0); /* Setup the device context for mem2mem mode. */ ctx->state |= GSC_CTX_M2M; ctx->flags = 0; ctx->in_path = GSC_DMA; ctx->out_path = GSC_DMA; spin_lock_init(&ctx->slock); init_timer(&ctx->op_timer); ctx->op_timer.data = (unsigned long)ctx; ctx->op_timer.function = gsc_op_timer_handler; INIT_LIST_HEAD(&ctx->fence_wait_list); INIT_WORK(&ctx->fence_work, gsc_m2m_fence_work); ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init); if (IS_ERR(ctx->m2m_ctx)) { gsc_err("Failed to initialize m2m context"); ret = PTR_ERR(ctx->m2m_ctx); goto error_fh; } if (gsc->m2m.refcnt++ == 0) set_bit(ST_M2M_OPEN, &gsc->state); gsc_dbg("gsc m2m driver is opened, ctx(0x%p)", ctx); return 0; error_fh: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return ret; } static int gsc_m2m_release(struct file *file) { struct gsc_ctx *ctx = fh_to_ctx(file->private_data); struct gsc_dev *gsc = ctx->gsc_dev; gsc_dbg("pid: %d, state: 0x%lx, refcnt= %d", task_pid_nr(current), gsc->state, gsc->m2m.refcnt); /* if we didn't properly sequence with the secure side to turn off * content protection, we may be left in a very bad state and the * only way to recover this reliably is to reboot. */ BUG_ON(gsc->protected_content); kfree(ctx->m2m_ctx->cap_q_ctx.q.name); kfree(ctx->m2m_ctx->out_q_ctx.q.name); v4l2_m2m_ctx_release(ctx->m2m_ctx); gsc_ctrls_delete(ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); if (--gsc->m2m.refcnt <= 0) clear_bit(ST_M2M_OPEN, &gsc->state); kfree(ctx); return 0; } static unsigned int gsc_m2m_poll(struct file *file, struct poll_table_struct *wait) { struct gsc_ctx *ctx = fh_to_ctx(file->private_data); return v4l2_m2m_poll(file, ctx->m2m_ctx, wait); } static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma) { struct gsc_ctx *ctx = fh_to_ctx(file->private_data); return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); } static const struct v4l2_file_operations gsc_m2m_fops = { .owner = THIS_MODULE, .open = gsc_m2m_open, .release = gsc_m2m_release, .poll = gsc_m2m_poll, .unlocked_ioctl = video_ioctl2, .mmap = gsc_m2m_mmap, }; static struct v4l2_m2m_ops gsc_m2m_ops = { .device_run = gsc_m2m_device_run, .job_abort = gsc_m2m_job_abort, }; int gsc_register_m2m_device(struct gsc_dev *gsc) { struct video_device *vfd; struct platform_device *pdev; int ret = 0; if (!gsc) return -ENODEV; pdev = gsc->pdev; vfd = video_device_alloc(); if (!vfd) { dev_err(&pdev->dev, "Failed to allocate video device\n"); return -ENOMEM; } vfd->fops = &gsc_m2m_fops; vfd->ioctl_ops = &gsc_m2m_ioctl_ops; vfd->release = video_device_release; vfd->lock = &gsc->lock; snprintf(vfd->name, sizeof(vfd->name), "%s:m2m", dev_name(&pdev->dev)); video_set_drvdata(vfd, gsc); gsc->m2m.vfd = vfd; gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops); if (IS_ERR(gsc->m2m.m2m_dev)) { dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n"); ret = PTR_ERR(gsc->m2m.m2m_dev); goto err_m2m_r1; } ret = video_register_device(vfd, VFL_TYPE_GRABBER, EXYNOS_VIDEONODE_GSC_M2M(gsc->id)); if (ret) { dev_err(&pdev->dev, "%s(): failed to register video device\n", __func__); goto err_m2m_r2; } gsc_dbg("gsc m2m driver registered as /dev/video%d", vfd->num); return 0; err_m2m_r2: v4l2_m2m_release(gsc->m2m.m2m_dev); err_m2m_r1: video_device_release(gsc->m2m.vfd); return ret; } void gsc_unregister_m2m_device(struct gsc_dev *gsc) { if (gsc) v4l2_m2m_release(gsc->m2m.m2m_dev); }
gpl-2.0
smksyj/linux_modified_mlock
drivers/net/ethernet/emulex/benet/be_main.c
47
138512
/* * Copyright (C) 2005 - 2014 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Contact Information: * linux-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ #include <linux/prefetch.h> #include <linux/module.h> #include "be.h" #include "be_cmds.h" #include <asm/div64.h> #include <linux/aer.h> #include <linux/if_bridge.h> #include <net/busy_poll.h> #include <net/vxlan.h> MODULE_VERSION(DRV_VER); MODULE_DEVICE_TABLE(pci, be_dev_ids); MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); MODULE_AUTHOR("Emulex Corporation"); MODULE_LICENSE("GPL"); static unsigned int num_vfs; module_param(num_vfs, uint, S_IRUGO); MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize"); static ushort rx_frag_size = 2048; module_param(rx_frag_size, ushort, S_IRUGO); MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); static const struct pci_device_id be_dev_ids[] = { { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)}, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)}, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)}, { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)}, { 0 } }; MODULE_DEVICE_TABLE(pci, be_dev_ids); /* UE Status Low CSR */ static const char * const ue_status_low_desc[] = { "CEV", "CTX", "DBUF", "ERX", "Host", "MPU", "NDMA", "PTC ", "RDMA ", "RXF ", "RXIPS ", "RXULP0 ", "RXULP1 ", "RXULP2 ", "TIM ", "TPOST ", "TPRE ", "TXIPS ", "TXULP0 ", "TXULP1 ", "UC ", "WDMA ", "TXULP2 ", "HOST1 ", "P0_OB_LINK ", "P1_OB_LINK ", "HOST_GPIO ", "MBOX ", "ERX2 ", "SPARE ", "JTAG ", "MPU_INTPEND " }; /* UE Status High CSR */ static const char * const ue_status_hi_desc[] = { "LPCMEMHOST", "MGMT_MAC", "PCS0ONLINE", "MPU_IRAM", "PCS1ONLINE", "PCTL0", "PCTL1", "PMEM", "RR", "TXPB", "RXPP", "XAUI", "TXP", "ARM", "IPC", "HOST2", "HOST3", "HOST4", "HOST5", "HOST6", "HOST7", "ECRC", "Poison TLP", "NETC", "PERIPH", "LLTXULP", "D2P", "RCON", "LDMA", "LLTXP", "LLTXPB", "Unknown" }; static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) { struct be_dma_mem *mem = &q->dma_mem; if (mem->va) { dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, mem->dma); mem->va = NULL; } } static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, u16 len, u16 entry_size) { struct be_dma_mem *mem = &q->dma_mem; memset(q, 0, sizeof(*q)); q->len = len; q->entry_size = entry_size; mem->size = len * entry_size; mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, GFP_KERNEL); if (!mem->va) return -ENOMEM; return 0; } static void be_reg_intr_set(struct be_adapter *adapter, bool enable) { u32 reg, enabled; pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, &reg); enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; if (!enabled && enable) reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; else if (enabled && !enable) reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; else return; pci_write_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); } static void be_intr_set(struct be_adapter *adapter, bool enable) { int status = 0; /* On lancer interrupts can't be controlled via this register */ if (lancer_chip(adapter)) return; if (adapter->eeh_error) return; status = be_cmd_intr_set(adapter, enable); if (status) be_reg_intr_set(adapter, enable); } static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) { u32 val = 0; val |= qid & DB_RQ_RING_ID_MASK; val |= posted << DB_RQ_NUM_POSTED_SHIFT; wmb(); iowrite32(val, adapter->db + DB_RQ_OFFSET); } static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo, u16 posted) { u32 val = 0; val |= txo->q.id & DB_TXULP_RING_ID_MASK; val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; wmb(); iowrite32(val, adapter->db + txo->db_offset); } static void be_eq_notify(struct be_adapter *adapter, u16 qid, bool arm, bool clear_int, u16 num_popped) { u32 val = 0; val |= qid & DB_EQ_RING_ID_MASK; val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT); if (adapter->eeh_error) return; if (arm) val |= 1 << DB_EQ_REARM_SHIFT; if (clear_int) val |= 1 << DB_EQ_CLR_SHIFT; val |= 1 << DB_EQ_EVNT_SHIFT; val |= num_popped << DB_EQ_NUM_POPPED_SHIFT; iowrite32(val, adapter->db + DB_EQ_OFFSET); } void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped) { u32 val = 0; val |= qid & DB_CQ_RING_ID_MASK; val |= ((qid & DB_CQ_RING_ID_EXT_MASK) << DB_CQ_RING_ID_EXT_MASK_SHIFT); if (adapter->eeh_error) return; if (arm) val |= 1 << DB_CQ_REARM_SHIFT; val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; iowrite32(val, adapter->db + DB_CQ_OFFSET); } static int be_mac_addr_set(struct net_device *netdev, void *p) { struct be_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->pdev->dev; struct sockaddr *addr = p; int status; u8 mac[ETH_ALEN]; u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; /* Proceed further only if, User provided MAC is different * from active MAC */ if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) return 0; /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT * privilege or if PF did not provision the new MAC address. * On BE3, this cmd will always fail if the VF doesn't have the * FILTMGMT privilege. This failure is OK, only if the PF programmed * the MAC for the VF. */ status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, adapter->if_handle, &adapter->pmac_id[0], 0); if (!status) { curr_pmac_id = adapter->pmac_id[0]; /* Delete the old programmed MAC. This call may fail if the * old MAC was already deleted by the PF driver. */ if (adapter->pmac_id[0] != old_pmac_id) be_cmd_pmac_del(adapter, adapter->if_handle, old_pmac_id, 0); } /* Decide if the new MAC is successfully activated only after * querying the FW */ status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac, adapter->if_handle, true, 0); if (status) goto err; /* The MAC change did not happen, either due to lack of privilege * or PF didn't pre-provision. */ if (!ether_addr_equal(addr->sa_data, mac)) { status = -EPERM; goto err; } memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); dev_info(dev, "MAC address changed to %pM\n", mac); return 0; err: dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); return status; } /* BE2 supports only v0 cmd */ static void *hw_stats_from_cmd(struct be_adapter *adapter) { if (BE2_chip(adapter)) { struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va; return &cmd->hw_stats; } else if (BE3_chip(adapter)) { struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va; return &cmd->hw_stats; } else { struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va; return &cmd->hw_stats; } } /* BE2 supports only v0 cmd */ static void *be_erx_stats_from_cmd(struct be_adapter *adapter) { if (BE2_chip(adapter)) { struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); return &hw_stats->erx; } else if (BE3_chip(adapter)) { struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); return &hw_stats->erx; } else { struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter); return &hw_stats->erx; } } static void populate_be_v0_stats(struct be_adapter *adapter) { struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter); struct be_pmem_stats *pmem_sts = &hw_stats->pmem; struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf; struct be_port_rxf_stats_v0 *port_stats = &rxf_stats->port[adapter->port_num]; struct be_drv_stats *drvs = &adapter->drv_stats; be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); drvs->rx_pause_frames = port_stats->rx_pause_frames; drvs->rx_crc_errors = port_stats->rx_crc_errors; drvs->rx_control_frames = port_stats->rx_control_frames; drvs->rx_in_range_errors = port_stats->rx_in_range_errors; drvs->rx_frame_too_long = port_stats->rx_frame_too_long; drvs->rx_dropped_runt = port_stats->rx_dropped_runt; drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow; drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; drvs->rx_out_range_errors = port_stats->rx_out_range_errors; drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow; drvs->rx_dropped_header_too_small = port_stats->rx_dropped_header_too_small; drvs->rx_address_filtered = port_stats->rx_address_filtered + port_stats->rx_vlan_filtered; drvs->rx_alignment_symbol_errors = port_stats->rx_alignment_symbol_errors; drvs->tx_pauseframes = port_stats->tx_pauseframes; drvs->tx_controlframes = port_stats->tx_controlframes; if (adapter->port_num) drvs->jabber_events = rxf_stats->port1_jabber_events; else drvs->jabber_events = rxf_stats->port0_jabber_events; drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; drvs->forwarded_packets = rxf_stats->forwarded_packets; drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; } static void populate_be_v1_stats(struct be_adapter *adapter) { struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter); struct be_pmem_stats *pmem_sts = &hw_stats->pmem; struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf; struct be_port_rxf_stats_v1 *port_stats = &rxf_stats->port[adapter->port_num]; struct be_drv_stats *drvs = &adapter->drv_stats; be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; drvs->rx_pause_frames = port_stats->rx_pause_frames; drvs->rx_crc_errors = port_stats->rx_crc_errors; drvs->rx_control_frames = port_stats->rx_control_frames; drvs->rx_in_range_errors = port_stats->rx_in_range_errors; drvs->rx_frame_too_long = port_stats->rx_frame_too_long; drvs->rx_dropped_runt = port_stats->rx_dropped_runt; drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; drvs->rx_out_range_errors = port_stats->rx_out_range_errors; drvs->rx_dropped_header_too_small = port_stats->rx_dropped_header_too_small; drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow_drop; drvs->rx_address_filtered = port_stats->rx_address_filtered; drvs->rx_alignment_symbol_errors = port_stats->rx_alignment_symbol_errors; drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; drvs->tx_pauseframes = port_stats->tx_pauseframes; drvs->tx_controlframes = port_stats->tx_controlframes; drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes; drvs->jabber_events = port_stats->jabber_events; drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; drvs->forwarded_packets = rxf_stats->forwarded_packets; drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; } static void populate_be_v2_stats(struct be_adapter *adapter) { struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter); struct be_pmem_stats *pmem_sts = &hw_stats->pmem; struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf; struct be_port_rxf_stats_v2 *port_stats = &rxf_stats->port[adapter->port_num]; struct be_drv_stats *drvs = &adapter->drv_stats; be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats)); drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop; drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames; drvs->rx_pause_frames = port_stats->rx_pause_frames; drvs->rx_crc_errors = port_stats->rx_crc_errors; drvs->rx_control_frames = port_stats->rx_control_frames; drvs->rx_in_range_errors = port_stats->rx_in_range_errors; drvs->rx_frame_too_long = port_stats->rx_frame_too_long; drvs->rx_dropped_runt = port_stats->rx_dropped_runt; drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs; drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs; drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs; drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length; drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small; drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short; drvs->rx_out_range_errors = port_stats->rx_out_range_errors; drvs->rx_dropped_header_too_small = port_stats->rx_dropped_header_too_small; drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow_drop; drvs->rx_address_filtered = port_stats->rx_address_filtered; drvs->rx_alignment_symbol_errors = port_stats->rx_alignment_symbol_errors; drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop; drvs->tx_pauseframes = port_stats->tx_pauseframes; drvs->tx_controlframes = port_stats->tx_controlframes; drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes; drvs->jabber_events = port_stats->jabber_events; drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf; drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr; drvs->forwarded_packets = rxf_stats->forwarded_packets; drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu; drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr; drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags; adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops; if (be_roce_supported(adapter)) { drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd; drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd; drvs->rx_roce_frames = port_stats->roce_frames_received; drvs->roce_drops_crc = port_stats->roce_drops_crc; drvs->roce_drops_payload_len = port_stats->roce_drops_payload_len; } } static void populate_lancer_stats(struct be_adapter *adapter) { struct be_drv_stats *drvs = &adapter->drv_stats; struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter); be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats)); drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo; drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo; drvs->rx_control_frames = pport_stats->rx_control_frames_lo; drvs->rx_in_range_errors = pport_stats->rx_in_range_errors; drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo; drvs->rx_dropped_runt = pport_stats->rx_dropped_runt; drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors; drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors; drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors; drvs->rx_dropped_tcp_length = pport_stats->rx_dropped_invalid_tcp_length; drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small; drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short; drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors; drvs->rx_dropped_header_too_small = pport_stats->rx_dropped_header_too_small; drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow; drvs->rx_address_filtered = pport_stats->rx_address_filtered + pport_stats->rx_vlan_filtered; drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo; drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow; drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo; drvs->tx_controlframes = pport_stats->tx_control_frames_lo; drvs->jabber_events = pport_stats->rx_jabbers; drvs->forwarded_packets = pport_stats->num_forwards_lo; drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo; drvs->rx_drops_too_many_frags = pport_stats->rx_drops_too_many_frags_lo; } static void accumulate_16bit_val(u32 *acc, u16 val) { #define lo(x) (x & 0xFFFF) #define hi(x) (x & 0xFFFF0000) bool wrapped = val < lo(*acc); u32 newacc = hi(*acc) + val; if (wrapped) newacc += 65536; ACCESS_ONCE(*acc) = newacc; } static void populate_erx_stats(struct be_adapter *adapter, struct be_rx_obj *rxo, u32 erx_stat) { if (!BEx_chip(adapter)) rx_stats(rxo)->rx_drops_no_frags = erx_stat; else /* below erx HW counter can actually wrap around after * 65535. Driver accumulates a 32-bit value */ accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, (u16)erx_stat); } void be_parse_stats(struct be_adapter *adapter) { struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter); struct be_rx_obj *rxo; int i; u32 erx_stat; if (lancer_chip(adapter)) { populate_lancer_stats(adapter); } else { if (BE2_chip(adapter)) populate_be_v0_stats(adapter); else if (BE3_chip(adapter)) /* for BE3 */ populate_be_v1_stats(adapter); else populate_be_v2_stats(adapter); /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */ for_all_rx_queues(adapter, rxo, i) { erx_stat = erx->rx_drops_no_fragments[rxo->q.id]; populate_erx_stats(adapter, rxo, erx_stat); } } } static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct be_adapter *adapter = netdev_priv(netdev); struct be_drv_stats *drvs = &adapter->drv_stats; struct be_rx_obj *rxo; struct be_tx_obj *txo; u64 pkts, bytes; unsigned int start; int i; for_all_rx_queues(adapter, rxo, i) { const struct be_rx_stats *rx_stats = rx_stats(rxo); do { start = u64_stats_fetch_begin_irq(&rx_stats->sync); pkts = rx_stats(rxo)->rx_pkts; bytes = rx_stats(rxo)->rx_bytes; } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start)); stats->rx_packets += pkts; stats->rx_bytes += bytes; stats->multicast += rx_stats(rxo)->rx_mcast_pkts; stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs + rx_stats(rxo)->rx_drops_no_frags; } for_all_tx_queues(adapter, txo, i) { const struct be_tx_stats *tx_stats = tx_stats(txo); do { start = u64_stats_fetch_begin_irq(&tx_stats->sync); pkts = tx_stats(txo)->tx_pkts; bytes = tx_stats(txo)->tx_bytes; } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start)); stats->tx_packets += pkts; stats->tx_bytes += bytes; } /* bad pkts received */ stats->rx_errors = drvs->rx_crc_errors + drvs->rx_alignment_symbol_errors + drvs->rx_in_range_errors + drvs->rx_out_range_errors + drvs->rx_frame_too_long + drvs->rx_dropped_too_small + drvs->rx_dropped_too_short + drvs->rx_dropped_header_too_small + drvs->rx_dropped_tcp_length + drvs->rx_dropped_runt; /* detailed rx errors */ stats->rx_length_errors = drvs->rx_in_range_errors + drvs->rx_out_range_errors + drvs->rx_frame_too_long; stats->rx_crc_errors = drvs->rx_crc_errors; /* frame alignment errors */ stats->rx_frame_errors = drvs->rx_alignment_symbol_errors; /* receiver fifo overrun */ /* drops_no_pbuf is no per i/f, it's per BE card */ stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop + drvs->rx_input_fifo_overflow_drop + drvs->rx_drops_no_pbuf; return stats; } void be_link_status_update(struct be_adapter *adapter, u8 link_status) { struct net_device *netdev = adapter->netdev; if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) { netif_carrier_off(netdev); adapter->flags |= BE_FLAGS_LINK_STATUS_INIT; } if (link_status) netif_carrier_on(netdev); else netif_carrier_off(netdev); } static void be_tx_stats_update(struct be_tx_obj *txo, u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped) { struct be_tx_stats *stats = tx_stats(txo); u64_stats_update_begin(&stats->sync); stats->tx_reqs++; stats->tx_wrbs += wrb_cnt; stats->tx_bytes += copied; stats->tx_pkts += (gso_segs ? gso_segs : 1); if (stopped) stats->tx_stops++; u64_stats_update_end(&stats->sync); } /* Determine number of WRB entries needed to xmit data in an skb */ static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb, bool *dummy) { int cnt = (skb->len > skb->data_len); cnt += skb_shinfo(skb)->nr_frags; /* to account for hdr wrb */ cnt++; if (lancer_chip(adapter) || !(cnt & 1)) { *dummy = false; } else { /* add a dummy to make it an even num */ cnt++; *dummy = true; } BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); return cnt; } static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len) { wrb->frag_pa_hi = upper_32_bits(addr); wrb->frag_pa_lo = addr & 0xFFFFFFFF; wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK; wrb->rsvd0 = 0; } static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, struct sk_buff *skb) { u8 vlan_prio; u16 vlan_tag; vlan_tag = vlan_tx_tag_get(skb); vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; /* If vlan priority provided by OS is NOT in available bmap */ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | adapter->recommended_prio; return vlan_tag; } /* Used only for IP tunnel packets */ static u16 skb_inner_ip_proto(struct sk_buff *skb) { return (inner_ip_hdr(skb)->version == 4) ? inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr; } static u16 skb_ip_proto(struct sk_buff *skb) { return (ip_hdr(skb)->version == 4) ? ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; } static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan) { u16 vlan_tag, proto; memset(hdr, 0, sizeof(*hdr)); SET_TX_WRB_HDR_BITS(crc, hdr, 1); if (skb_is_gso(skb)) { SET_TX_WRB_HDR_BITS(lso, hdr, 1); SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size); if (skb_is_gso_v6(skb) && !lancer_chip(adapter)) SET_TX_WRB_HDR_BITS(lso6, hdr, 1); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->encapsulation) { SET_TX_WRB_HDR_BITS(ipcs, hdr, 1); proto = skb_inner_ip_proto(skb); } else { proto = skb_ip_proto(skb); } if (proto == IPPROTO_TCP) SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1); else if (proto == IPPROTO_UDP) SET_TX_WRB_HDR_BITS(udpcs, hdr, 1); } if (vlan_tx_tag_present(skb)) { SET_TX_WRB_HDR_BITS(vlan, hdr, 1); vlan_tag = be_get_tx_vlan_tag(adapter, skb); SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag); } /* To skip HW VLAN tagging: evt = 1, compl = 0 */ SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan); SET_TX_WRB_HDR_BITS(event, hdr, 1); SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt); SET_TX_WRB_HDR_BITS(len, hdr, len); } static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, bool unmap_single) { dma_addr_t dma; be_dws_le_to_cpu(wrb, sizeof(*wrb)); dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; if (wrb->frag_len) { if (unmap_single) dma_unmap_single(dev, dma, wrb->frag_len, DMA_TO_DEVICE); else dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE); } } static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq, struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb, bool skip_hw_vlan) { dma_addr_t busaddr; int i, copied = 0; struct device *dev = &adapter->pdev->dev; struct sk_buff *first_skb = skb; struct be_eth_wrb *wrb; struct be_eth_hdr_wrb *hdr; bool map_single = false; u16 map_head; hdr = queue_head_node(txq); queue_head_inc(txq); map_head = txq->head; if (skb->len > skb->data_len) { int len = skb_headlen(skb); busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; map_single = true; wrb = queue_head_node(txq); wrb_fill(wrb, busaddr, len); be_dws_cpu_to_le(wrb, sizeof(*wrb)); queue_head_inc(txq); copied += len; } for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; busaddr = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (dma_mapping_error(dev, busaddr)) goto dma_err; wrb = queue_head_node(txq); wrb_fill(wrb, busaddr, skb_frag_size(frag)); be_dws_cpu_to_le(wrb, sizeof(*wrb)); queue_head_inc(txq); copied += skb_frag_size(frag); } if (dummy_wrb) { wrb = queue_head_node(txq); wrb_fill(wrb, 0, 0); be_dws_cpu_to_le(wrb, sizeof(*wrb)); queue_head_inc(txq); } wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan); be_dws_cpu_to_le(hdr, sizeof(*hdr)); return copied; dma_err: txq->head = map_head; while (copied) { wrb = queue_head_node(txq); unmap_tx_frag(dev, wrb, map_single); map_single = false; copied -= wrb->frag_len; adapter->drv_stats.dma_map_errors++; queue_head_inc(txq); } return 0; } static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, struct sk_buff *skb, bool *skip_hw_vlan) { u16 vlan_tag = 0; skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return skb; if (vlan_tx_tag_present(skb)) vlan_tag = be_get_tx_vlan_tag(adapter, skb); if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { if (!vlan_tag) vlan_tag = adapter->pvid; /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to * skip VLAN insertion */ if (skip_hw_vlan) *skip_hw_vlan = true; } if (vlan_tag) { skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), vlan_tag); if (unlikely(!skb)) return skb; skb->vlan_tci = 0; } /* Insert the outer VLAN, if any */ if (adapter->qnq_vid) { vlan_tag = adapter->qnq_vid; skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), vlan_tag); if (unlikely(!skb)) return skb; if (skip_hw_vlan) *skip_hw_vlan = true; } return skb; } static bool be_ipv6_exthdr_check(struct sk_buff *skb) { struct ethhdr *eh = (struct ethhdr *)skb->data; u16 offset = ETH_HLEN; if (eh->h_proto == htons(ETH_P_IPV6)) { struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset); offset += sizeof(struct ipv6hdr); if (ip6h->nexthdr != NEXTHDR_TCP && ip6h->nexthdr != NEXTHDR_UDP) { struct ipv6_opt_hdr *ehdr = (struct ipv6_opt_hdr *)(skb->data + offset); /* offending pkt: 2nd byte following IPv6 hdr is 0xff */ if (ehdr->hdrlen == 0xff) return true; } } return false; } static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb) { return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid; } static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb) { return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); } static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, bool *skip_hw_vlan) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; unsigned int eth_hdr_len; struct iphdr *ip; /* For padded packets, BE HW modifies tot_len field in IP header * incorrecly when VLAN tag is inserted by HW. * For padded packets, Lancer computes incorrect checksum. */ eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ? VLAN_ETH_HLEN : ETH_HLEN; if (skb->len <= 60 && (lancer_chip(adapter) || vlan_tx_tag_present(skb)) && is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); } /* If vlan tag is already inlined in the packet, skip HW VLAN * tagging in pvid-tagging mode */ if (be_pvid_tagging_enabled(adapter) && veh->h_vlan_proto == htons(ETH_P_8021Q)) *skip_hw_vlan = true; /* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. * Manually insert VLAN in pkt. */ if (skb->ip_summed != CHECKSUM_PARTIAL && vlan_tx_tag_present(skb)) { skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto err; } /* HW may lockup when VLAN HW tagging is requested on * certain ipv6 packets. Drop such pkts if the HW workaround to * skip HW tagging is not enabled by FW. */ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) && (adapter->pvid || adapter->qnq_vid) && !qnq_async_evt_rcvd(adapter))) goto tx_drop; /* Manual VLAN tag insertion to prevent: * ASIC lockup when the ASIC inserts VLAN tag into * certain ipv6 packets. Insert VLAN tags in driver, * and set event, completion, vlan bits accordingly * in the Tx WRB. */ if (be_ipv6_tx_stall_chk(adapter, skb) && be_vlan_tag_tx_chk(adapter, skb)) { skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) goto err; } return skb; tx_drop: dev_kfree_skb_any(skb); err: return NULL; } static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, bool *skip_hw_vlan) { /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or * less may cause a transmit stall on that port. So the work-around is * to pad short packets (<= 32 bytes) to a 36-byte length. */ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { if (skb_put_padto(skb, 36)) return NULL; } if (BEx_chip(adapter) || lancer_chip(adapter)) { skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); if (!skb) return NULL; } return skb; } static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)]; struct be_queue_info *txq = &txo->q; bool dummy_wrb, stopped = false; u32 wrb_cnt = 0, copied = 0; bool skip_hw_vlan = false; u32 start = txq->head; skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan); if (!skb) { tx_stats(txo)->tx_drv_drops++; return NETDEV_TX_OK; } wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb); copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb, skip_hw_vlan); if (copied) { int gso_segs = skb_shinfo(skb)->gso_segs; /* record the sent skb in the sent_skb table */ BUG_ON(txo->sent_skb_list[start]); txo->sent_skb_list[start] = skb; /* Ensure txq has space for the next skb; Else stop the queue * *BEFORE* ringing the tx doorbell, so that we serialze the * tx compls of the current transmit which'll wake up the queue */ atomic_add(wrb_cnt, &txq->used); if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) { netif_stop_subqueue(netdev, skb_get_queue_mapping(skb)); stopped = true; } be_txq_notify(adapter, txo, wrb_cnt); be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped); } else { txq->head = start; tx_stats(txo)->tx_drv_drops++; dev_kfree_skb_any(skb); } return NETDEV_TX_OK; } static int be_change_mtu(struct net_device *netdev, int new_mtu) { struct be_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->pdev->dev; if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) { dev_info(dev, "MTU must be between %d and %d bytes\n", BE_MIN_MTU, BE_MAX_MTU); return -EINVAL; } dev_info(dev, "MTU changed from %d to %d bytes\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; return 0; } /* * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. * If the user configures more, place BE in vlan promiscuous mode. */ static int be_vid_config(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; u16 vids[BE_NUM_VLANS_SUPPORTED]; u16 num = 0, i = 0; int status = 0; /* No need to further configure vids if in promiscuous mode */ if (adapter->promiscuous) return 0; if (adapter->vlans_added > be_max_vlans(adapter)) goto set_vlan_promisc; /* Construct VLAN Table to give to HW */ for_each_set_bit(i, adapter->vids, VLAN_N_VID) vids[num++] = cpu_to_le16(i); status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); if (status) { /* Set to VLAN promisc mode as setting VLAN filter failed */ if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) goto set_vlan_promisc; dev_err(dev, "Setting HW VLAN filtering failed\n"); } else { if (adapter->flags & BE_FLAGS_VLAN_PROMISC) { /* hw VLAN filtering re-enabled. */ status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, OFF); if (!status) { dev_info(dev, "Disabling VLAN Promiscuous mode\n"); adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; } } } return status; set_vlan_promisc: if (adapter->flags & BE_FLAGS_VLAN_PROMISC) return 0; status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON); if (!status) { dev_info(dev, "Enable VLAN Promiscuous mode\n"); adapter->flags |= BE_FLAGS_VLAN_PROMISC; } else dev_err(dev, "Failed to enable VLAN Promiscuous mode\n"); return status; } static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); int status = 0; /* Packets with VID 0 are always received by Lancer by default */ if (lancer_chip(adapter) && vid == 0) return status; if (test_bit(vid, adapter->vids)) return status; set_bit(vid, adapter->vids); adapter->vlans_added++; status = be_vid_config(adapter); if (status) { adapter->vlans_added--; clear_bit(vid, adapter->vids); } return status; } static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct be_adapter *adapter = netdev_priv(netdev); /* Packets with VID 0 are always received by Lancer by default */ if (lancer_chip(adapter) && vid == 0) return 0; clear_bit(vid, adapter->vids); adapter->vlans_added--; return be_vid_config(adapter); } static void be_clear_promisc(struct be_adapter *adapter) { adapter->promiscuous = false; adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC); be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); } static void be_set_rx_mode(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); int status; if (netdev->flags & IFF_PROMISC) { be_cmd_rx_filter(adapter, IFF_PROMISC, ON); adapter->promiscuous = true; goto done; } /* BE was previously in promiscuous mode; disable it */ if (adapter->promiscuous) { be_clear_promisc(adapter); if (adapter->vlans_added) be_vid_config(adapter); } /* Enable multicast promisc if num configured exceeds what we support */ if (netdev->flags & IFF_ALLMULTI || netdev_mc_count(netdev) > be_max_mc(adapter)) goto set_mcast_promisc; if (netdev_uc_count(netdev) != adapter->uc_macs) { struct netdev_hw_addr *ha; int i = 1; /* First slot is claimed by the Primary MAC */ for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) { be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id[i], 0); } if (netdev_uc_count(netdev) > be_max_uc(adapter)) { be_cmd_rx_filter(adapter, IFF_PROMISC, ON); adapter->promiscuous = true; goto done; } netdev_for_each_uc_addr(ha, adapter->netdev) { adapter->uc_macs++; /* First slot is for Primary MAC */ be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle, &adapter->pmac_id[adapter->uc_macs], 0); } } status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); if (!status) { if (adapter->flags & BE_FLAGS_MCAST_PROMISC) adapter->flags &= ~BE_FLAGS_MCAST_PROMISC; goto done; } set_mcast_promisc: if (adapter->flags & BE_FLAGS_MCAST_PROMISC) return; /* Set to MCAST promisc mode if setting MULTICAST address fails * or if num configured exceeds what we support */ status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON); if (!status) adapter->flags |= BE_FLAGS_MCAST_PROMISC; done: return; } static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; int status; if (!sriov_enabled(adapter)) return -EPERM; if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs) return -EINVAL; /* Proceed further only if user provided MAC is different * from active MAC */ if (ether_addr_equal(mac, vf_cfg->mac_addr)) return 0; if (BEx_chip(adapter)) { be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id, vf + 1); status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, &vf_cfg->pmac_id, vf + 1); } else { status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle, vf + 1); } if (status) { dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x", mac, vf, status); return be_cmd_status(status); } ether_addr_copy(vf_cfg->mac_addr, mac); return 0; } static int be_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *vi) { struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; if (!sriov_enabled(adapter)) return -EPERM; if (vf >= adapter->num_vfs) return -EINVAL; vi->vf = vf; vi->max_tx_rate = vf_cfg->tx_rate; vi->min_tx_rate = 0; vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); vi->linkstate = adapter->vf_cfg[vf].plink_tracking; return 0; } static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { struct be_adapter *adapter = netdev_priv(netdev); struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; int status = 0; if (!sriov_enabled(adapter)) return -EPERM; if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7) return -EINVAL; if (vlan || qos) { vlan |= qos << VLAN_PRIO_SHIFT; if (vf_cfg->vlan_tag != vlan) status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_cfg->if_handle, 0); } else { /* Reset Transparent Vlan Tagging. */ status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1, vf_cfg->if_handle, 0); } if (status) { dev_err(&adapter->pdev->dev, "VLAN %d config on VF %d failed : %#x\n", vlan, vf, status); return be_cmd_status(status); } vf_cfg->vlan_tag = vlan; return 0; } static int be_set_vf_tx_rate(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) { struct be_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->pdev->dev; int percent_rate, status = 0; u16 link_speed = 0; u8 link_status; if (!sriov_enabled(adapter)) return -EPERM; if (vf >= adapter->num_vfs) return -EINVAL; if (min_tx_rate) return -EINVAL; if (!max_tx_rate) goto config_qos; status = be_cmd_link_status_query(adapter, &link_speed, &link_status, 0); if (status) goto err; if (!link_status) { dev_err(dev, "TX-rate setting not allowed when link is down\n"); status = -ENETDOWN; goto err; } if (max_tx_rate < 100 || max_tx_rate > link_speed) { dev_err(dev, "TX-rate must be between 100 and %d Mbps\n", link_speed); status = -EINVAL; goto err; } /* On Skyhawk the QOS setting must be done only as a % value */ percent_rate = link_speed / 100; if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) { dev_err(dev, "TX-rate must be a multiple of %d Mbps\n", percent_rate); status = -EINVAL; goto err; } config_qos: status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1); if (status) goto err; adapter->vf_cfg[vf].tx_rate = max_tx_rate; return 0; err: dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n", max_tx_rate, vf); return be_cmd_status(status); } static int be_set_vf_link_state(struct net_device *netdev, int vf, int link_state) { struct be_adapter *adapter = netdev_priv(netdev); int status; if (!sriov_enabled(adapter)) return -EPERM; if (vf >= adapter->num_vfs) return -EINVAL; status = be_cmd_set_logical_link_config(adapter, link_state, vf+1); if (status) { dev_err(&adapter->pdev->dev, "Link state change on VF %d failed: %#x\n", vf, status); return be_cmd_status(status); } adapter->vf_cfg[vf].plink_tracking = link_state; return 0; } static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts, ulong now) { aic->rx_pkts_prev = rx_pkts; aic->tx_reqs_prev = tx_pkts; aic->jiffies = now; } static void be_eqd_update(struct be_adapter *adapter) { struct be_set_eqd set_eqd[MAX_EVT_QS]; int eqd, i, num = 0, start; struct be_aic_obj *aic; struct be_eq_obj *eqo; struct be_rx_obj *rxo; struct be_tx_obj *txo; u64 rx_pkts, tx_pkts; ulong now; u32 pps, delta; for_all_evt_queues(adapter, eqo, i) { aic = &adapter->aic_obj[eqo->idx]; if (!aic->enable) { if (aic->jiffies) aic->jiffies = 0; eqd = aic->et_eqd; goto modify_eqd; } rxo = &adapter->rx_obj[eqo->idx]; do { start = u64_stats_fetch_begin_irq(&rxo->stats.sync); rx_pkts = rxo->stats.rx_pkts; } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start)); txo = &adapter->tx_obj[eqo->idx]; do { start = u64_stats_fetch_begin_irq(&txo->stats.sync); tx_pkts = txo->stats.tx_reqs; } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start)); /* Skip, if wrapped around or first calculation */ now = jiffies; if (!aic->jiffies || time_before(now, aic->jiffies) || rx_pkts < aic->rx_pkts_prev || tx_pkts < aic->tx_reqs_prev) { be_aic_update(aic, rx_pkts, tx_pkts, now); continue; } delta = jiffies_to_msecs(now - aic->jiffies); pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) + (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta); eqd = (pps / 15000) << 2; if (eqd < 8) eqd = 0; eqd = min_t(u32, eqd, aic->max_eqd); eqd = max_t(u32, eqd, aic->min_eqd); be_aic_update(aic, rx_pkts, tx_pkts, now); modify_eqd: if (eqd != aic->prev_eqd) { set_eqd[num].delay_multiplier = (eqd * 65)/100; set_eqd[num].eq_id = eqo->q.id; aic->prev_eqd = eqd; num++; } } if (num) be_cmd_modify_eqd(adapter, set_eqd, num); } static void be_rx_stats_update(struct be_rx_obj *rxo, struct be_rx_compl_info *rxcp) { struct be_rx_stats *stats = rx_stats(rxo); u64_stats_update_begin(&stats->sync); stats->rx_compl++; stats->rx_bytes += rxcp->pkt_size; stats->rx_pkts++; if (rxcp->pkt_type == BE_MULTICAST_PACKET) stats->rx_mcast_pkts++; if (rxcp->err) stats->rx_compl_err++; u64_stats_update_end(&stats->sync); } static inline bool csum_passed(struct be_rx_compl_info *rxcp) { /* L4 checksum is not reliable for non TCP/UDP packets. * Also ignore ipcksm for ipv6 pkts */ return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum && (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err; } static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *rx_page_info; struct be_queue_info *rxq = &rxo->q; u16 frag_idx = rxq->tail; rx_page_info = &rxo->page_info_tbl[frag_idx]; BUG_ON(!rx_page_info->page); if (rx_page_info->last_frag) { dma_unmap_page(&adapter->pdev->dev, dma_unmap_addr(rx_page_info, bus), adapter->big_page_size, DMA_FROM_DEVICE); rx_page_info->last_frag = false; } else { dma_sync_single_for_cpu(&adapter->pdev->dev, dma_unmap_addr(rx_page_info, bus), rx_frag_size, DMA_FROM_DEVICE); } queue_tail_inc(rxq); atomic_dec(&rxq->used); return rx_page_info; } /* Throwaway the data in the Rx completion */ static void be_rx_compl_discard(struct be_rx_obj *rxo, struct be_rx_compl_info *rxcp) { struct be_rx_page_info *page_info; u16 i, num_rcvd = rxcp->num_rcvd; for (i = 0; i < num_rcvd; i++) { page_info = get_rx_page_info(rxo); put_page(page_info->page); memset(page_info, 0, sizeof(*page_info)); } } /* * skb_fill_rx_data forms a complete skb for an ether frame * indicated by rxcp. */ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, struct be_rx_compl_info *rxcp) { struct be_rx_page_info *page_info; u16 i, j; u16 hdr_len, curr_frag_len, remaining; u8 *start; page_info = get_rx_page_info(rxo); start = page_address(page_info->page) + page_info->page_offset; prefetch(start); /* Copy data in the first descriptor of this completion */ curr_frag_len = min(rxcp->pkt_size, rx_frag_size); skb->len = curr_frag_len; if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */ memcpy(skb->data, start, curr_frag_len); /* Complete packet has now been moved to data */ put_page(page_info->page); skb->data_len = 0; skb->tail += curr_frag_len; } else { hdr_len = ETH_HLEN; memcpy(skb->data, start, hdr_len); skb_shinfo(skb)->nr_frags = 1; skb_frag_set_page(skb, 0, page_info->page); skb_shinfo(skb)->frags[0].page_offset = page_info->page_offset + hdr_len; skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); skb->data_len = curr_frag_len - hdr_len; skb->truesize += rx_frag_size; skb->tail += hdr_len; } page_info->page = NULL; if (rxcp->pkt_size <= rx_frag_size) { BUG_ON(rxcp->num_rcvd != 1); return; } /* More frags present for this completion */ remaining = rxcp->pkt_size - curr_frag_len; for (i = 1, j = 0; i < rxcp->num_rcvd; i++) { page_info = get_rx_page_info(rxo); curr_frag_len = min(remaining, rx_frag_size); /* Coalesce all frags from the same physical page in one slot */ if (page_info->page_offset == 0) { /* Fresh page */ j++; skb_frag_set_page(skb, j, page_info->page); skb_shinfo(skb)->frags[j].page_offset = page_info->page_offset; skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); skb_shinfo(skb)->nr_frags++; } else { put_page(page_info->page); } skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); skb->len += curr_frag_len; skb->data_len += curr_frag_len; skb->truesize += rx_frag_size; remaining -= curr_frag_len; page_info->page = NULL; } BUG_ON(j > MAX_SKB_FRAGS); } /* Process the RX completion indicated by rxcp when GRO is disabled */ static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi, struct be_rx_compl_info *rxcp) { struct be_adapter *adapter = rxo->adapter; struct net_device *netdev = adapter->netdev; struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE); if (unlikely(!skb)) { rx_stats(rxo)->rx_drops_no_skbs++; be_rx_compl_discard(rxo, rxcp); return; } skb_fill_rx_data(rxo, skb, rxcp); if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, netdev); skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); if (netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); skb->csum_level = rxcp->tunneled; skb_mark_napi_id(skb, napi); if (rxcp->vlanf) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); netif_receive_skb(skb); } /* Process the RX completion indicated by rxcp when GRO is enabled */ static void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi, struct be_rx_compl_info *rxcp) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info; struct sk_buff *skb = NULL; u16 remaining, curr_frag_len; u16 i, j; skb = napi_get_frags(napi); if (!skb) { be_rx_compl_discard(rxo, rxcp); return; } remaining = rxcp->pkt_size; for (i = 0, j = -1; i < rxcp->num_rcvd; i++) { page_info = get_rx_page_info(rxo); curr_frag_len = min(remaining, rx_frag_size); /* Coalesce all frags from the same physical page in one slot */ if (i == 0 || page_info->page_offset == 0) { /* First frag or Fresh page */ j++; skb_frag_set_page(skb, j, page_info->page); skb_shinfo(skb)->frags[j].page_offset = page_info->page_offset; skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); } else { put_page(page_info->page); } skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); skb->truesize += rx_frag_size; remaining -= curr_frag_len; memset(page_info, 0, sizeof(*page_info)); } BUG_ON(j > MAX_SKB_FRAGS); skb_shinfo(skb)->nr_frags = j + 1; skb->len = rxcp->pkt_size; skb->data_len = rxcp->pkt_size; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]); if (adapter->netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3); skb->csum_level = rxcp->tunneled; skb_mark_napi_id(skb, napi); if (rxcp->vlanf) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag); napi_gro_frags(napi); } static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl, struct be_rx_compl_info *rxcp) { rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl); rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl); rxcp->err = GET_RX_COMPL_V1_BITS(err, compl); rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl); rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl); rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl); rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl); rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl); rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl); rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl); rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl); if (rxcp->vlanf) { rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl); rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl); } rxcp->port = GET_RX_COMPL_V1_BITS(port, compl); rxcp->tunneled = GET_RX_COMPL_V1_BITS(tunneled, compl); } static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl, struct be_rx_compl_info *rxcp) { rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl); rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl); rxcp->err = GET_RX_COMPL_V0_BITS(err, compl); rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl); rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl); rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl); rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl); rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl); rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl); rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl); rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl); if (rxcp->vlanf) { rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl); rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl); } rxcp->port = GET_RX_COMPL_V0_BITS(port, compl); rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl); } static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) { struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq); struct be_rx_compl_info *rxcp = &rxo->rxcp; struct be_adapter *adapter = rxo->adapter; /* For checking the valid bit it is Ok to use either definition as the * valid bit is at the same position in both v0 and v1 Rx compl */ if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0) return NULL; rmb(); be_dws_le_to_cpu(compl, sizeof(*compl)); if (adapter->be3_native) be_parse_rx_compl_v1(compl, rxcp); else be_parse_rx_compl_v0(compl, rxcp); if (rxcp->ip_frag) rxcp->l4_csum = 0; if (rxcp->vlanf) { /* In QNQ modes, if qnq bit is not set, then the packet was * tagged only with the transparent outer vlan-tag and must * not be treated as a vlan packet by host */ if (be_is_qnq_mode(adapter) && !rxcp->qnq) rxcp->vlanf = 0; if (!lancer_chip(adapter)) rxcp->vlan_tag = swab16(rxcp->vlan_tag); if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) && !test_bit(rxcp->vlan_tag, adapter->vids)) rxcp->vlanf = 0; } /* As the compl has been parsed, reset it; we wont touch it again */ compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0; queue_tail_inc(&rxo->cq); return rxcp; } static inline struct page *be_alloc_pages(u32 size, gfp_t gfp) { u32 order = get_order(size); if (order > 0) gfp |= __GFP_COMP; return alloc_pages(gfp, order); } /* * Allocate a page, split it to fragments of size rx_frag_size and post as * receive buffers to BE */ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed) { struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; struct be_queue_info *rxq = &rxo->q; struct page *pagep = NULL; struct device *dev = &adapter->pdev->dev; struct be_eth_rx_d *rxd; u64 page_dmaaddr = 0, frag_dmaaddr; u32 posted, page_offset = 0, notify = 0; page_info = &rxo->page_info_tbl[rxq->head]; for (posted = 0; posted < frags_needed && !page_info->page; posted++) { if (!pagep) { pagep = be_alloc_pages(adapter->big_page_size, gfp); if (unlikely(!pagep)) { rx_stats(rxo)->rx_post_fail++; break; } page_dmaaddr = dma_map_page(dev, pagep, 0, adapter->big_page_size, DMA_FROM_DEVICE); if (dma_mapping_error(dev, page_dmaaddr)) { put_page(pagep); pagep = NULL; adapter->drv_stats.dma_map_errors++; break; } page_offset = 0; } else { get_page(pagep); page_offset += rx_frag_size; } page_info->page_offset = page_offset; page_info->page = pagep; rxd = queue_head_node(rxq); frag_dmaaddr = page_dmaaddr + page_info->page_offset; rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); /* Any space left in the current big page for another frag? */ if ((page_offset + rx_frag_size + rx_frag_size) > adapter->big_page_size) { pagep = NULL; page_info->last_frag = true; dma_unmap_addr_set(page_info, bus, page_dmaaddr); } else { dma_unmap_addr_set(page_info, bus, frag_dmaaddr); } prev_page_info = page_info; queue_head_inc(rxq); page_info = &rxo->page_info_tbl[rxq->head]; } /* Mark the last frag of a page when we break out of the above loop * with no more slots available in the RXQ */ if (pagep) { prev_page_info->last_frag = true; dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr); } if (posted) { atomic_add(posted, &rxq->used); if (rxo->rx_post_starved) rxo->rx_post_starved = false; do { notify = min(256u, posted); be_rxq_notify(adapter, rxq->id, notify); posted -= notify; } while (posted); } else if (atomic_read(&rxq->used) == 0) { /* Let be_worker replenish when memory is available */ rxo->rx_post_starved = true; } } static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) { struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq); if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) return NULL; rmb(); be_dws_le_to_cpu(txcp, sizeof(*txcp)); txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; queue_tail_inc(tx_cq); return txcp; } static u16 be_tx_compl_process(struct be_adapter *adapter, struct be_tx_obj *txo, u16 last_index) { struct be_queue_info *txq = &txo->q; struct be_eth_wrb *wrb; struct sk_buff **sent_skbs = txo->sent_skb_list; struct sk_buff *sent_skb; u16 cur_index, num_wrbs = 1; /* account for hdr wrb */ bool unmap_skb_hdr = true; sent_skb = sent_skbs[txq->tail]; BUG_ON(!sent_skb); sent_skbs[txq->tail] = NULL; /* skip header wrb */ queue_tail_inc(txq); do { cur_index = txq->tail; wrb = queue_tail_node(txq); unmap_tx_frag(&adapter->pdev->dev, wrb, (unmap_skb_hdr && skb_headlen(sent_skb))); unmap_skb_hdr = false; num_wrbs++; queue_tail_inc(txq); } while (cur_index != last_index); dev_consume_skb_any(sent_skb); return num_wrbs; } /* Return the number of events in the event queue */ static inline int events_get(struct be_eq_obj *eqo) { struct be_eq_entry *eqe; int num = 0; do { eqe = queue_tail_node(&eqo->q); if (eqe->evt == 0) break; rmb(); eqe->evt = 0; num++; queue_tail_inc(&eqo->q); } while (true); return num; } /* Leaves the EQ is disarmed state */ static void be_eq_clean(struct be_eq_obj *eqo) { int num = events_get(eqo); be_eq_notify(eqo->adapter, eqo->q.id, false, true, num); } static void be_rx_cq_clean(struct be_rx_obj *rxo) { struct be_rx_page_info *page_info; struct be_queue_info *rxq = &rxo->q; struct be_queue_info *rx_cq = &rxo->cq; struct be_rx_compl_info *rxcp; struct be_adapter *adapter = rxo->adapter; int flush_wait = 0; /* Consume pending rx completions. * Wait for the flush completion (identified by zero num_rcvd) * to arrive. Notify CQ even when there are no more CQ entries * for HW to flush partially coalesced CQ entries. * In Lancer, there is no need to wait for flush compl. */ for (;;) { rxcp = be_rx_compl_get(rxo); if (!rxcp) { if (lancer_chip(adapter)) break; if (flush_wait++ > 10 || be_hw_error(adapter)) { dev_warn(&adapter->pdev->dev, "did not receive flush compl\n"); break; } be_cq_notify(adapter, rx_cq->id, true, 0); mdelay(1); } else { be_rx_compl_discard(rxo, rxcp); be_cq_notify(adapter, rx_cq->id, false, 1); if (rxcp->num_rcvd == 0) break; } } /* After cleanup, leave the CQ in unarmed state */ be_cq_notify(adapter, rx_cq->id, false, 0); /* Then free posted rx buffers that were not used */ while (atomic_read(&rxq->used) > 0) { page_info = get_rx_page_info(rxo); put_page(page_info->page); memset(page_info, 0, sizeof(*page_info)); } BUG_ON(atomic_read(&rxq->used)); rxq->tail = 0; rxq->head = 0; } static void be_tx_compl_clean(struct be_adapter *adapter) { struct be_tx_obj *txo; struct be_queue_info *txq; struct be_eth_tx_compl *txcp; u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0; struct sk_buff *sent_skb; bool dummy_wrb; int i, pending_txqs; /* Stop polling for compls when HW has been silent for 10ms */ do { pending_txqs = adapter->num_tx_qs; for_all_tx_queues(adapter, txo, i) { cmpl = 0; num_wrbs = 0; txq = &txo->q; while ((txcp = be_tx_compl_get(&txo->cq))) { end_idx = GET_TX_COMPL_BITS(wrb_index, txcp); num_wrbs += be_tx_compl_process(adapter, txo, end_idx); cmpl++; } if (cmpl) { be_cq_notify(adapter, txo->cq.id, false, cmpl); atomic_sub(num_wrbs, &txq->used); timeo = 0; } if (atomic_read(&txq->used) == 0) pending_txqs--; } if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter)) break; mdelay(1); } while (true); for_all_tx_queues(adapter, txo, i) { txq = &txo->q; if (atomic_read(&txq->used)) dev_err(&adapter->pdev->dev, "%d pending tx-compls\n", atomic_read(&txq->used)); /* free posted tx for which compls will never arrive */ while (atomic_read(&txq->used)) { sent_skb = txo->sent_skb_list[txq->tail]; end_idx = txq->tail; num_wrbs = wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb); index_adv(&end_idx, num_wrbs - 1, txq->len); num_wrbs = be_tx_compl_process(adapter, txo, end_idx); atomic_sub(num_wrbs, &txq->used); } } } static void be_evt_queues_destroy(struct be_adapter *adapter) { struct be_eq_obj *eqo; int i; for_all_evt_queues(adapter, eqo, i) { if (eqo->q.created) { be_eq_clean(eqo); be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); napi_hash_del(&eqo->napi); netif_napi_del(&eqo->napi); } be_queue_free(adapter, &eqo->q); } } static int be_evt_queues_create(struct be_adapter *adapter) { struct be_queue_info *eq; struct be_eq_obj *eqo; struct be_aic_obj *aic; int i, rc; adapter->num_evt_qs = min_t(u16, num_irqs(adapter), adapter->cfg_num_qs); for_all_evt_queues(adapter, eqo, i) { netif_napi_add(adapter->netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT); napi_hash_add(&eqo->napi); aic = &adapter->aic_obj[i]; eqo->adapter = adapter; eqo->idx = i; aic->max_eqd = BE_MAX_EQD; aic->enable = true; eq = &eqo->q; rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)); if (rc) return rc; rc = be_cmd_eq_create(adapter, eqo); if (rc) return rc; } return 0; } static void be_mcc_queues_destroy(struct be_adapter *adapter) { struct be_queue_info *q; q = &adapter->mcc_obj.q; if (q->created) be_cmd_q_destroy(adapter, q, QTYPE_MCCQ); be_queue_free(adapter, q); q = &adapter->mcc_obj.cq; if (q->created) be_cmd_q_destroy(adapter, q, QTYPE_CQ); be_queue_free(adapter, q); } /* Must be called only after TX qs are created as MCC shares TX EQ */ static int be_mcc_queues_create(struct be_adapter *adapter) { struct be_queue_info *q, *cq; cq = &adapter->mcc_obj.cq; if (be_queue_alloc(adapter, cq, MCC_CQ_LEN, sizeof(struct be_mcc_compl))) goto err; /* Use the default EQ for MCC completions */ if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0)) goto mcc_cq_free; q = &adapter->mcc_obj.q; if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) goto mcc_cq_destroy; if (be_cmd_mccq_create(adapter, q, cq)) goto mcc_q_free; return 0; mcc_q_free: be_queue_free(adapter, q); mcc_cq_destroy: be_cmd_q_destroy(adapter, cq, QTYPE_CQ); mcc_cq_free: be_queue_free(adapter, cq); err: return -1; } static void be_tx_queues_destroy(struct be_adapter *adapter) { struct be_queue_info *q; struct be_tx_obj *txo; u8 i; for_all_tx_queues(adapter, txo, i) { q = &txo->q; if (q->created) be_cmd_q_destroy(adapter, q, QTYPE_TXQ); be_queue_free(adapter, q); q = &txo->cq; if (q->created) be_cmd_q_destroy(adapter, q, QTYPE_CQ); be_queue_free(adapter, q); } } static int be_tx_qs_create(struct be_adapter *adapter) { struct be_queue_info *cq, *eq; struct be_tx_obj *txo; int status, i; adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter)); for_all_tx_queues(adapter, txo, i) { cq = &txo->cq; status = be_queue_alloc(adapter, cq, TX_CQ_LEN, sizeof(struct be_eth_tx_compl)); if (status) return status; u64_stats_init(&txo->stats.sync); u64_stats_init(&txo->stats.sync_compl); /* If num_evt_qs is less than num_tx_qs, then more than * one txq share an eq */ eq = &adapter->eq_obj[i % adapter->num_evt_qs].q; status = be_cmd_cq_create(adapter, cq, eq, false, 3); if (status) return status; status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN, sizeof(struct be_eth_wrb)); if (status) return status; status = be_cmd_txq_create(adapter, txo); if (status) return status; } dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n", adapter->num_tx_qs); return 0; } static void be_rx_cqs_destroy(struct be_adapter *adapter) { struct be_queue_info *q; struct be_rx_obj *rxo; int i; for_all_rx_queues(adapter, rxo, i) { q = &rxo->cq; if (q->created) be_cmd_q_destroy(adapter, q, QTYPE_CQ); be_queue_free(adapter, q); } } static int be_rx_cqs_create(struct be_adapter *adapter) { struct be_queue_info *eq, *cq; struct be_rx_obj *rxo; int rc, i; /* We can create as many RSS rings as there are EQs. */ adapter->num_rx_qs = adapter->num_evt_qs; /* We'll use RSS only if atleast 2 RSS rings are supported. * When RSS is used, we'll need a default RXQ for non-IP traffic. */ if (adapter->num_rx_qs > 1) adapter->num_rx_qs++; adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; for_all_rx_queues(adapter, rxo, i) { rxo->adapter = adapter; cq = &rxo->cq; rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, sizeof(struct be_eth_rx_compl)); if (rc) return rc; u64_stats_init(&rxo->stats.sync); eq = &adapter->eq_obj[i % adapter->num_evt_qs].q; rc = be_cmd_cq_create(adapter, cq, eq, false, 3); if (rc) return rc; } dev_info(&adapter->pdev->dev, "created %d RSS queue(s) and 1 default RX queue\n", adapter->num_rx_qs - 1); return 0; } static irqreturn_t be_intx(int irq, void *dev) { struct be_eq_obj *eqo = dev; struct be_adapter *adapter = eqo->adapter; int num_evts = 0; /* IRQ is not expected when NAPI is scheduled as the EQ * will not be armed. * But, this can happen on Lancer INTx where it takes * a while to de-assert INTx or in BE2 where occasionaly * an interrupt may be raised even when EQ is unarmed. * If NAPI is already scheduled, then counting & notifying * events will orphan them. */ if (napi_schedule_prep(&eqo->napi)) { num_evts = events_get(eqo); __napi_schedule(&eqo->napi); if (num_evts) eqo->spurious_intr = 0; } be_eq_notify(adapter, eqo->q.id, false, true, num_evts); /* Return IRQ_HANDLED only for the the first spurious intr * after a valid intr to stop the kernel from branding * this irq as a bad one! */ if (num_evts || eqo->spurious_intr++ == 0) return IRQ_HANDLED; else return IRQ_NONE; } static irqreturn_t be_msix(int irq, void *dev) { struct be_eq_obj *eqo = dev; be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0); napi_schedule(&eqo->napi); return IRQ_HANDLED; } static inline bool do_gro(struct be_rx_compl_info *rxcp) { return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false; } static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi, int budget, int polling) { struct be_adapter *adapter = rxo->adapter; struct be_queue_info *rx_cq = &rxo->cq; struct be_rx_compl_info *rxcp; u32 work_done; u32 frags_consumed = 0; for (work_done = 0; work_done < budget; work_done++) { rxcp = be_rx_compl_get(rxo); if (!rxcp) break; /* Is it a flush compl that has no data */ if (unlikely(rxcp->num_rcvd == 0)) goto loop_continue; /* Discard compl with partial DMA Lancer B0 */ if (unlikely(!rxcp->pkt_size)) { be_rx_compl_discard(rxo, rxcp); goto loop_continue; } /* On BE drop pkts that arrive due to imperfect filtering in * promiscuous mode on some skews */ if (unlikely(rxcp->port != adapter->port_num && !lancer_chip(adapter))) { be_rx_compl_discard(rxo, rxcp); goto loop_continue; } /* Don't do gro when we're busy_polling */ if (do_gro(rxcp) && polling != BUSY_POLLING) be_rx_compl_process_gro(rxo, napi, rxcp); else be_rx_compl_process(rxo, napi, rxcp); loop_continue: frags_consumed += rxcp->num_rcvd; be_rx_stats_update(rxo, rxcp); } if (work_done) { be_cq_notify(adapter, rx_cq->id, true, work_done); /* When an rx-obj gets into post_starved state, just * let be_worker do the posting. */ if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM && !rxo->rx_post_starved) be_post_rx_frags(rxo, GFP_ATOMIC, max_t(u32, MAX_RX_POST, frags_consumed)); } return work_done; } static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status) { switch (status) { case BE_TX_COMP_HDR_PARSE_ERR: tx_stats(txo)->tx_hdr_parse_err++; break; case BE_TX_COMP_NDMA_ERR: tx_stats(txo)->tx_dma_err++; break; case BE_TX_COMP_ACL_ERR: tx_stats(txo)->tx_spoof_check_err++; break; } } static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status) { switch (status) { case LANCER_TX_COMP_LSO_ERR: tx_stats(txo)->tx_tso_err++; break; case LANCER_TX_COMP_HSW_DROP_MAC_ERR: case LANCER_TX_COMP_HSW_DROP_VLAN_ERR: tx_stats(txo)->tx_spoof_check_err++; break; case LANCER_TX_COMP_QINQ_ERR: tx_stats(txo)->tx_qinq_err++; break; case LANCER_TX_COMP_PARITY_ERR: tx_stats(txo)->tx_internal_parity_err++; break; case LANCER_TX_COMP_DMA_ERR: tx_stats(txo)->tx_dma_err++; break; } } static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo, int idx) { struct be_eth_tx_compl *txcp; int num_wrbs = 0, work_done = 0; u32 compl_status; u16 last_idx; while ((txcp = be_tx_compl_get(&txo->cq))) { last_idx = GET_TX_COMPL_BITS(wrb_index, txcp); num_wrbs += be_tx_compl_process(adapter, txo, last_idx); work_done++; compl_status = GET_TX_COMPL_BITS(status, txcp); if (compl_status) { if (lancer_chip(adapter)) lancer_update_tx_err(txo, compl_status); else be_update_tx_err(txo, compl_status); } } if (work_done) { be_cq_notify(adapter, txo->cq.id, true, work_done); atomic_sub(num_wrbs, &txo->q.used); /* As Tx wrbs have been freed up, wake up netdev queue * if it was stopped due to lack of tx wrbs. */ if (__netif_subqueue_stopped(adapter->netdev, idx) && atomic_read(&txo->q.used) < txo->q.len / 2) { netif_wake_subqueue(adapter->netdev, idx); } u64_stats_update_begin(&tx_stats(txo)->sync_compl); tx_stats(txo)->tx_compl += work_done; u64_stats_update_end(&tx_stats(txo)->sync_compl); } } int be_poll(struct napi_struct *napi, int budget) { struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); struct be_adapter *adapter = eqo->adapter; int max_work = 0, work, i, num_evts; struct be_rx_obj *rxo; struct be_tx_obj *txo; num_evts = events_get(eqo); for_all_tx_queues_on_eq(adapter, eqo, txo, i) be_process_tx(adapter, txo, i); if (be_lock_napi(eqo)) { /* This loop will iterate twice for EQ0 in which * completions of the last RXQ (default one) are also processed * For other EQs the loop iterates only once */ for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { work = be_process_rx(rxo, napi, budget, NAPI_POLLING); max_work = max(work, max_work); } be_unlock_napi(eqo); } else { max_work = budget; } if (is_mcc_eqo(eqo)) be_process_mcc(adapter); if (max_work < budget) { napi_complete(napi); be_eq_notify(adapter, eqo->q.id, true, false, num_evts); } else { /* As we'll continue in polling mode, count and clear events */ be_eq_notify(adapter, eqo->q.id, false, false, num_evts); } return max_work; } #ifdef CONFIG_NET_RX_BUSY_POLL static int be_busy_poll(struct napi_struct *napi) { struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi); struct be_adapter *adapter = eqo->adapter; struct be_rx_obj *rxo; int i, work = 0; if (!be_lock_busy_poll(eqo)) return LL_FLUSH_BUSY; for_all_rx_queues_on_eq(adapter, eqo, rxo, i) { work = be_process_rx(rxo, napi, 4, BUSY_POLLING); if (work) break; } be_unlock_busy_poll(eqo); return work; } #endif void be_detect_error(struct be_adapter *adapter) { u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; u32 i; bool error_detected = false; struct device *dev = &adapter->pdev->dev; struct net_device *netdev = adapter->netdev; if (be_hw_error(adapter)) return; if (lancer_chip(adapter)) { sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET); sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET); adapter->hw_error = true; /* Do not log error messages if its a FW reset */ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 && sliport_err2 == SLIPORT_ERROR_FW_RESET2) { dev_info(dev, "Firmware update in progress\n"); } else { error_detected = true; dev_err(dev, "Error detected in the card\n"); dev_err(dev, "ERR: sliport status 0x%x\n", sliport_status); dev_err(dev, "ERR: sliport error1 0x%x\n", sliport_err1); dev_err(dev, "ERR: sliport error2 0x%x\n", sliport_err2); } } } else { pci_read_config_dword(adapter->pdev, PCICFG_UE_STATUS_LOW, &ue_lo); pci_read_config_dword(adapter->pdev, PCICFG_UE_STATUS_HIGH, &ue_hi); pci_read_config_dword(adapter->pdev, PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); pci_read_config_dword(adapter->pdev, PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask); ue_lo = (ue_lo & ~ue_lo_mask); ue_hi = (ue_hi & ~ue_hi_mask); /* On certain platforms BE hardware can indicate spurious UEs. * Allow HW to stop working completely in case of a real UE. * Hence not setting the hw_error for UE detection. */ if (ue_lo || ue_hi) { error_detected = true; dev_err(dev, "Unrecoverable Error detected in the adapter"); dev_err(dev, "Please reboot server to recover"); if (skyhawk_chip(adapter)) adapter->hw_error = true; for (i = 0; ue_lo; ue_lo >>= 1, i++) { if (ue_lo & 1) dev_err(dev, "UE: %s bit set\n", ue_status_low_desc[i]); } for (i = 0; ue_hi; ue_hi >>= 1, i++) { if (ue_hi & 1) dev_err(dev, "UE: %s bit set\n", ue_status_hi_desc[i]); } } } if (error_detected) netif_carrier_off(netdev); } static void be_msix_disable(struct be_adapter *adapter) { if (msix_enabled(adapter)) { pci_disable_msix(adapter->pdev); adapter->num_msix_vec = 0; adapter->num_msix_roce_vec = 0; } } static int be_msix_enable(struct be_adapter *adapter) { int i, num_vec; struct device *dev = &adapter->pdev->dev; /* If RoCE is supported, program the max number of NIC vectors that * may be configured via set-channels, along with vectors needed for * RoCe. Else, just program the number we'll use initially. */ if (be_roce_supported(adapter)) num_vec = min_t(int, 2 * be_max_eqs(adapter), 2 * num_online_cpus()); else num_vec = adapter->cfg_num_qs; for (i = 0; i < num_vec; i++) adapter->msix_entries[i].entry = i; num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, MIN_MSIX_VECTORS, num_vec); if (num_vec < 0) goto fail; if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) { adapter->num_msix_roce_vec = num_vec / 2; dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n", adapter->num_msix_roce_vec); } adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec; dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n", adapter->num_msix_vec); return 0; fail: dev_warn(dev, "MSIx enable failed\n"); /* INTx is not supported in VFs, so fail probe if enable_msix fails */ if (!be_physfn(adapter)) return num_vec; return 0; } static inline int be_msix_vec_get(struct be_adapter *adapter, struct be_eq_obj *eqo) { return adapter->msix_entries[eqo->msix_idx].vector; } static int be_msix_register(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct be_eq_obj *eqo; int status, i, vec; for_all_evt_queues(adapter, eqo, i) { sprintf(eqo->desc, "%s-q%d", netdev->name, i); vec = be_msix_vec_get(adapter, eqo); status = request_irq(vec, be_msix, 0, eqo->desc, eqo); if (status) goto err_msix; } return 0; err_msix: for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--) free_irq(be_msix_vec_get(adapter, eqo), eqo); dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n", status); be_msix_disable(adapter); return status; } static int be_irq_register(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; int status; if (msix_enabled(adapter)) { status = be_msix_register(adapter); if (status == 0) goto done; /* INTx is not supported for VF */ if (!be_physfn(adapter)) return status; } /* INTx: only the first EQ is used */ netdev->irq = adapter->pdev->irq; status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name, &adapter->eq_obj[0]); if (status) { dev_err(&adapter->pdev->dev, "INTx request IRQ failed - err %d\n", status); return status; } done: adapter->isr_registered = true; return 0; } static void be_irq_unregister(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct be_eq_obj *eqo; int i; if (!adapter->isr_registered) return; /* INTx */ if (!msix_enabled(adapter)) { free_irq(netdev->irq, &adapter->eq_obj[0]); goto done; } /* MSIx */ for_all_evt_queues(adapter, eqo, i) free_irq(be_msix_vec_get(adapter, eqo), eqo); done: adapter->isr_registered = false; } static void be_rx_qs_destroy(struct be_adapter *adapter) { struct be_queue_info *q; struct be_rx_obj *rxo; int i; for_all_rx_queues(adapter, rxo, i) { q = &rxo->q; if (q->created) { be_cmd_rxq_destroy(adapter, q); be_rx_cq_clean(rxo); } be_queue_free(adapter, q); } } static int be_close(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); struct be_eq_obj *eqo; int i; /* This protection is needed as be_close() may be called even when the * adapter is in cleared state (after eeh perm failure) */ if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) return 0; be_roce_dev_close(adapter); if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { for_all_evt_queues(adapter, eqo, i) { napi_disable(&eqo->napi); be_disable_busy_poll(eqo); } adapter->flags &= ~BE_FLAGS_NAPI_ENABLED; } be_async_mcc_disable(adapter); /* Wait for all pending tx completions to arrive so that * all tx skbs are freed. */ netif_tx_disable(netdev); be_tx_compl_clean(adapter); be_rx_qs_destroy(adapter); for (i = 1; i < (adapter->uc_macs + 1); i++) be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id[i], 0); adapter->uc_macs = 0; for_all_evt_queues(adapter, eqo, i) { if (msix_enabled(adapter)) synchronize_irq(be_msix_vec_get(adapter, eqo)); else synchronize_irq(netdev->irq); be_eq_clean(eqo); } be_irq_unregister(adapter); return 0; } static int be_rx_qs_create(struct be_adapter *adapter) { struct rss_info *rss = &adapter->rss_info; u8 rss_key[RSS_HASH_KEY_LEN]; struct be_rx_obj *rxo; int rc, i, j; for_all_rx_queues(adapter, rxo, i) { rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, sizeof(struct be_eth_rx_d)); if (rc) return rc; } /* The FW would like the default RXQ to be created first */ rxo = default_rxo(adapter); rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size, adapter->if_handle, false, &rxo->rss_id); if (rc) return rc; for_all_rss_queues(adapter, rxo, i) { rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size, adapter->if_handle, true, &rxo->rss_id); if (rc) return rc; } if (be_multi_rxq(adapter)) { for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rx_qs - 1) { for_all_rss_queues(adapter, rxo, i) { if ((j + i) >= RSS_INDIR_TABLE_LEN) break; rss->rsstable[j + i] = rxo->rss_id; rss->rss_queue[j + i] = i; } } rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6; if (!BEx_chip(adapter)) rss->rss_flags |= RSS_ENABLE_UDP_IPV4 | RSS_ENABLE_UDP_IPV6; } else { /* Disable RSS, if only default RX Q is created */ rss->rss_flags = RSS_ENABLE_NONE; } netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, 128, rss_key); if (rc) { rss->rss_flags = RSS_ENABLE_NONE; return rc; } memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); /* First time posting */ for_all_rx_queues(adapter, rxo, i) be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); return 0; } static int be_open(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); struct be_eq_obj *eqo; struct be_rx_obj *rxo; struct be_tx_obj *txo; u8 link_status; int status, i; status = be_rx_qs_create(adapter); if (status) goto err; status = be_irq_register(adapter); if (status) goto err; for_all_rx_queues(adapter, rxo, i) be_cq_notify(adapter, rxo->cq.id, true, 0); for_all_tx_queues(adapter, txo, i) be_cq_notify(adapter, txo->cq.id, true, 0); be_async_mcc_enable(adapter); for_all_evt_queues(adapter, eqo, i) { napi_enable(&eqo->napi); be_enable_busy_poll(eqo); be_eq_notify(adapter, eqo->q.id, true, true, 0); } adapter->flags |= BE_FLAGS_NAPI_ENABLED; status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); if (!status) be_link_status_update(adapter, link_status); netif_tx_start_all_queues(netdev); be_roce_dev_open(adapter); #ifdef CONFIG_BE2NET_VXLAN if (skyhawk_chip(adapter)) vxlan_get_rx_port(netdev); #endif return 0; err: be_close(adapter->netdev); return -EIO; } static int be_setup_wol(struct be_adapter *adapter, bool enable) { struct be_dma_mem cmd; int status = 0; u8 mac[ETH_ALEN]; memset(mac, 0, ETH_ALEN); cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, GFP_KERNEL); if (!cmd.va) return -ENOMEM; if (enable) { status = pci_write_config_dword(adapter->pdev, PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); if (status) { dev_err(&adapter->pdev->dev, "Could not enable Wake-on-lan\n"); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } status = be_cmd_enable_magic_wol(adapter, adapter->netdev->dev_addr, &cmd); pci_enable_wake(adapter->pdev, PCI_D3hot, 1); pci_enable_wake(adapter->pdev, PCI_D3cold, 1); } else { status = be_cmd_enable_magic_wol(adapter, mac, &cmd); pci_enable_wake(adapter->pdev, PCI_D3hot, 0); pci_enable_wake(adapter->pdev, PCI_D3cold, 0); } dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } /* * Generate a seed MAC address from the PF MAC Address using jhash. * MAC Address for VFs are assigned incrementally starting from the seed. * These addresses are programmed in the ASIC by the PF and the VF driver * queries for the MAC address during its probe. */ static int be_vf_eth_addr_config(struct be_adapter *adapter) { u32 vf; int status = 0; u8 mac[ETH_ALEN]; struct be_vf_cfg *vf_cfg; be_vf_eth_addr_generate(adapter, mac); for_all_vfs(adapter, vf_cfg, vf) { if (BEx_chip(adapter)) status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle, &vf_cfg->pmac_id, vf + 1); else status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle, vf + 1); if (status) dev_err(&adapter->pdev->dev, "Mac address assignment failed for VF %d\n", vf); else memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); mac[5] += 1; } return status; } static int be_vfs_mac_query(struct be_adapter *adapter) { int status, vf; u8 mac[ETH_ALEN]; struct be_vf_cfg *vf_cfg; for_all_vfs(adapter, vf_cfg, vf) { status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id, mac, vf_cfg->if_handle, false, vf+1); if (status) return status; memcpy(vf_cfg->mac_addr, mac, ETH_ALEN); } return 0; } static void be_vf_clear(struct be_adapter *adapter) { struct be_vf_cfg *vf_cfg; u32 vf; if (pci_vfs_assigned(adapter->pdev)) { dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs: not disabling VFs\n"); goto done; } pci_disable_sriov(adapter->pdev); for_all_vfs(adapter, vf_cfg, vf) { if (BEx_chip(adapter)) be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id, vf + 1); else be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle, vf + 1); be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } done: kfree(adapter->vf_cfg); adapter->num_vfs = 0; adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED; } static void be_clear_queues(struct be_adapter *adapter) { be_mcc_queues_destroy(adapter); be_rx_cqs_destroy(adapter); be_tx_queues_destroy(adapter); be_evt_queues_destroy(adapter); } static void be_cancel_worker(struct be_adapter *adapter) { if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) { cancel_delayed_work_sync(&adapter->work); adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED; } } static void be_mac_clear(struct be_adapter *adapter) { int i; if (adapter->pmac_id) { for (i = 0; i < (adapter->uc_macs + 1); i++) be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id[i], 0); adapter->uc_macs = 0; kfree(adapter->pmac_id); adapter->pmac_id = NULL; } } #ifdef CONFIG_BE2NET_VXLAN static void be_disable_vxlan_offloads(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) be_cmd_manage_iface(adapter, adapter->if_handle, OP_CONVERT_TUNNEL_TO_NORMAL); if (adapter->vxlan_port) be_cmd_set_vxlan_port(adapter, 0); adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS; adapter->vxlan_port = 0; netdev->hw_enc_features = 0; netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL); } #endif static int be_clear(struct be_adapter *adapter) { be_cancel_worker(adapter); if (sriov_enabled(adapter)) be_vf_clear(adapter); /* Re-configure FW to distribute resources evenly across max-supported * number of VFs, only when VFs are not already enabled. */ if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev)) be_cmd_set_sriov_config(adapter, adapter->pool_res, pci_sriov_get_totalvfs(adapter->pdev)); #ifdef CONFIG_BE2NET_VXLAN be_disable_vxlan_offloads(adapter); #endif /* delete the primary mac along with the uc-mac list */ be_mac_clear(adapter); be_cmd_if_destroy(adapter, adapter->if_handle, 0); be_clear_queues(adapter); be_msix_disable(adapter); adapter->flags &= ~BE_FLAGS_SETUP_DONE; return 0; } static int be_vfs_if_create(struct be_adapter *adapter) { struct be_resources res = {0}; struct be_vf_cfg *vf_cfg; u32 cap_flags, en_flags, vf; int status = 0; cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST; for_all_vfs(adapter, vf_cfg, vf) { if (!BE3_chip(adapter)) { status = be_cmd_get_profile_config(adapter, &res, vf + 1); if (!status) cap_flags = res.if_cap_flags; } /* If a FW profile exists, then cap_flags are updated */ en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST); status = be_cmd_if_create(adapter, cap_flags, en_flags, &vf_cfg->if_handle, vf + 1); if (status) goto err; } err: return status; } static int be_vf_setup_init(struct be_adapter *adapter) { struct be_vf_cfg *vf_cfg; int vf; adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg), GFP_KERNEL); if (!adapter->vf_cfg) return -ENOMEM; for_all_vfs(adapter, vf_cfg, vf) { vf_cfg->if_handle = -1; vf_cfg->pmac_id = -1; } return 0; } static int be_vf_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; struct be_vf_cfg *vf_cfg; int status, old_vfs, vf; u32 privileges; old_vfs = pci_num_vf(adapter->pdev); status = be_vf_setup_init(adapter); if (status) goto err; if (old_vfs) { for_all_vfs(adapter, vf_cfg, vf) { status = be_cmd_get_if_id(adapter, vf_cfg, vf); if (status) goto err; } status = be_vfs_mac_query(adapter); if (status) goto err; } else { status = be_vfs_if_create(adapter); if (status) goto err; status = be_vf_eth_addr_config(adapter); if (status) goto err; } for_all_vfs(adapter, vf_cfg, vf) { /* Allow VFs to programs MAC/VLAN filters */ status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); if (!status && !(privileges & BE_PRIV_FILTMGMT)) { status = be_cmd_set_fn_privileges(adapter, privileges | BE_PRIV_FILTMGMT, vf + 1); if (!status) dev_info(dev, "VF%d has FILTMGMT privilege\n", vf); } /* Allow full available bandwidth */ if (!old_vfs) be_cmd_config_qos(adapter, 0, 0, vf + 1); if (!old_vfs) { be_cmd_enable_vf(adapter, vf + 1); be_cmd_set_logical_link_config(adapter, IFLA_VF_LINK_STATE_AUTO, vf+1); } } if (!old_vfs) { status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); if (status) { dev_err(dev, "SRIOV enable failed\n"); adapter->num_vfs = 0; goto err; } } adapter->flags |= BE_FLAGS_SRIOV_ENABLED; return 0; err: dev_err(dev, "VF setup failed\n"); be_vf_clear(adapter); return status; } /* Converting function_mode bits on BE3 to SH mc_type enums */ static u8 be_convert_mc_type(u32 function_mode) { if (function_mode & VNIC_MODE && function_mode & QNQ_MODE) return vNIC1; else if (function_mode & QNQ_MODE) return FLEX10; else if (function_mode & VNIC_MODE) return vNIC2; else if (function_mode & UMC_ENABLED) return UMC; else return MC_NONE; } /* On BE2/BE3 FW does not suggest the supported limits */ static void BEx_get_resources(struct be_adapter *adapter, struct be_resources *res) { bool use_sriov = adapter->num_vfs ? 1 : 0; if (be_physfn(adapter)) res->max_uc_mac = BE_UC_PMAC_COUNT; else res->max_uc_mac = BE_VF_UC_PMAC_COUNT; adapter->mc_type = be_convert_mc_type(adapter->function_mode); if (be_is_mc(adapter)) { /* Assuming that there are 4 channels per port, * when multi-channel is enabled */ if (be_is_qnq_mode(adapter)) res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; else /* In a non-qnq multichannel mode, the pvid * takes up one vlan entry */ res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1; } else { res->max_vlans = BE_NUM_VLANS_SUPPORTED; } res->max_mcast_mac = BE_MAX_MC; /* 1) For BE3 1Gb ports, FW does not support multiple TXQs * 2) Create multiple TX rings on a BE3-R multi-channel interface * *only* if it is RSS-capable. */ if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) || !be_physfn(adapter) || (be_is_mc(adapter) && !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) { res->max_tx_qs = 1; } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { struct be_resources super_nic_res = {0}; /* On a SuperNIC profile, the driver needs to use the * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits */ be_cmd_get_profile_config(adapter, &super_nic_res, 0); /* Some old versions of BE3 FW don't report max_tx_qs value */ res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS; } else { res->max_tx_qs = BE3_MAX_TX_QS; } if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && !use_sriov && be_physfn(adapter)) res->max_rss_qs = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; res->max_rx_qs = res->max_rss_qs + 1; if (be_physfn(adapter)) res->max_evt_qs = (be_max_vfs(adapter) > 0) ? BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; else res->max_evt_qs = 1; res->if_cap_flags = BE_IF_CAP_FLAGS_WANT; if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS)) res->if_cap_flags &= ~BE_IF_FLAGS_RSS; } static void be_setup_init(struct be_adapter *adapter) { adapter->vlan_prio_bmap = 0xff; adapter->phy.link_speed = -1; adapter->if_handle = -1; adapter->be3_native = false; adapter->promiscuous = false; if (be_physfn(adapter)) adapter->cmd_privileges = MAX_PRIVILEGES; else adapter->cmd_privileges = MIN_PRIVILEGES; } static int be_get_sriov_config(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; struct be_resources res = {0}; int max_vfs, old_vfs; /* Some old versions of BE3 FW don't report max_vfs value */ be_cmd_get_profile_config(adapter, &res, 0); if (BE3_chip(adapter) && !res.max_vfs) { max_vfs = pci_sriov_get_totalvfs(adapter->pdev); res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; } adapter->pool_res = res; if (!be_max_vfs(adapter)) { if (num_vfs) dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n"); adapter->num_vfs = 0; return 0; } pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter)); /* validate num_vfs module param */ old_vfs = pci_num_vf(adapter->pdev); if (old_vfs) { dev_info(dev, "%d VFs are already enabled\n", old_vfs); if (old_vfs != num_vfs) dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs); adapter->num_vfs = old_vfs; } else { if (num_vfs > be_max_vfs(adapter)) { dev_info(dev, "Resources unavailable to init %d VFs\n", num_vfs); dev_info(dev, "Limiting to %d VFs\n", be_max_vfs(adapter)); } adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter)); } return 0; } static int be_get_resources(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; struct be_resources res = {0}; int status; if (BEx_chip(adapter)) { BEx_get_resources(adapter, &res); adapter->res = res; } /* For Lancer, SH etc read per-function resource limits from FW. * GET_FUNC_CONFIG returns per function guaranteed limits. * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits */ if (!BEx_chip(adapter)) { status = be_cmd_get_func_config(adapter, &res); if (status) return status; /* If RoCE may be enabled stash away half the EQs for RoCE */ if (be_roce_supported(adapter)) res.max_evt_qs /= 2; adapter->res = res; } dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", be_max_txqs(adapter), be_max_rxqs(adapter), be_max_rss(adapter), be_max_eqs(adapter), be_max_vfs(adapter)); dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", be_max_uc(adapter), be_max_mc(adapter), be_max_vlans(adapter)); return 0; } static void be_sriov_config(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; int status; status = be_get_sriov_config(adapter); if (status) { dev_err(dev, "Failed to query SR-IOV configuration\n"); dev_err(dev, "SR-IOV cannot be enabled\n"); return; } /* When the HW is in SRIOV capable configuration, the PF-pool * resources are equally distributed across the max-number of * VFs. The user may request only a subset of the max-vfs to be * enabled. Based on num_vfs, redistribute the resources across * num_vfs so that each VF will have access to more number of * resources. This facility is not available in BE3 FW. * Also, this is done by FW in Lancer chip. */ if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) { status = be_cmd_set_sriov_config(adapter, adapter->pool_res, adapter->num_vfs); if (status) dev_err(dev, "Failed to optimize SR-IOV resources\n"); } } static int be_get_config(struct be_adapter *adapter) { u16 profile_id; int status; status = be_cmd_query_fw_cfg(adapter); if (status) return status; if (be_physfn(adapter)) { status = be_cmd_get_active_profile(adapter, &profile_id); if (!status) dev_info(&adapter->pdev->dev, "Using profile 0x%x\n", profile_id); } if (!BE2_chip(adapter) && be_physfn(adapter)) be_sriov_config(adapter); status = be_get_resources(adapter); if (status) return status; adapter->pmac_id = kcalloc(be_max_uc(adapter), sizeof(*adapter->pmac_id), GFP_KERNEL); if (!adapter->pmac_id) return -ENOMEM; /* Sanitize cfg_num_qs based on HW and platform limits */ adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter)); return 0; } static int be_mac_setup(struct be_adapter *adapter) { u8 mac[ETH_ALEN]; int status; if (is_zero_ether_addr(adapter->netdev->dev_addr)) { status = be_cmd_get_perm_mac(adapter, mac); if (status) return status; memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); } else { /* Maybe the HW was reset; dev_addr must be re-programmed */ memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN); } /* For BE3-R VFs, the PF programs the initial MAC address */ if (!(BEx_chip(adapter) && be_virtfn(adapter))) be_cmd_pmac_add(adapter, mac, adapter->if_handle, &adapter->pmac_id[0], 0); return 0; } static void be_schedule_worker(struct be_adapter *adapter) { schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; } static int be_setup_queues(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; int status; status = be_evt_queues_create(adapter); if (status) goto err; status = be_tx_qs_create(adapter); if (status) goto err; status = be_rx_cqs_create(adapter); if (status) goto err; status = be_mcc_queues_create(adapter); if (status) goto err; status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs); if (status) goto err; status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs); if (status) goto err; return 0; err: dev_err(&adapter->pdev->dev, "queue_setup failed\n"); return status; } int be_update_queues(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; int status; if (netif_running(netdev)) be_close(netdev); be_cancel_worker(adapter); /* If any vectors have been shared with RoCE we cannot re-program * the MSIx table. */ if (!adapter->num_msix_roce_vec) be_msix_disable(adapter); be_clear_queues(adapter); if (!msix_enabled(adapter)) { status = be_msix_enable(adapter); if (status) return status; } status = be_setup_queues(adapter); if (status) return status; be_schedule_worker(adapter); if (netif_running(netdev)) status = be_open(netdev); return status; } static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; u32 tx_fc, rx_fc, en_flags; int status; be_setup_init(adapter); if (!lancer_chip(adapter)) be_cmd_req_native_mode(adapter); status = be_get_config(adapter); if (status) goto err; status = be_msix_enable(adapter); if (status) goto err; en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS; if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) en_flags |= BE_IF_FLAGS_RSS; en_flags = en_flags & be_if_cap_flags(adapter); status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, &adapter->if_handle, 0); if (status) goto err; /* Updating real_num_tx/rx_queues() requires rtnl_lock() */ rtnl_lock(); status = be_setup_queues(adapter); rtnl_unlock(); if (status) goto err; be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0); status = be_mac_setup(adapter); if (status) goto err; be_cmd_get_fw_ver(adapter); dev_info(dev, "FW version is %s\n", adapter->fw_ver); if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) { dev_err(dev, "Firmware on card is old(%s), IRQs may not work", adapter->fw_ver); dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); } if (adapter->vlans_added) be_vid_config(adapter); be_set_rx_mode(adapter->netdev); be_cmd_get_acpi_wol_cap(adapter); be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (be_physfn(adapter)) be_cmd_set_logical_link_config(adapter, IFLA_VF_LINK_STATE_AUTO, 0); if (adapter->num_vfs) be_vf_setup(adapter); status = be_cmd_get_phy_info(adapter); if (!status && be_pause_supported(adapter)) adapter->phy.fc_autoneg = 1; be_schedule_worker(adapter); adapter->flags |= BE_FLAGS_SETUP_DONE; return 0; err: be_clear(adapter); return status; } #ifdef CONFIG_NET_POLL_CONTROLLER static void be_netpoll(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); struct be_eq_obj *eqo; int i; for_all_evt_queues(adapter, eqo, i) { be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0); napi_schedule(&eqo->napi); } } #endif static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "}; static bool phy_flashing_required(struct be_adapter *adapter) { return (adapter->phy.phy_type == TN_8022 && adapter->phy.interface_type == PHY_TYPE_BASET_10GB); } static bool is_comp_in_ufi(struct be_adapter *adapter, struct flash_section_info *fsec, int type) { int i = 0, img_type = 0; struct flash_section_info_g2 *fsec_g2 = NULL; if (BE2_chip(adapter)) fsec_g2 = (struct flash_section_info_g2 *)fsec; for (i = 0; i < MAX_FLASH_COMP; i++) { if (fsec_g2) img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type); else img_type = le32_to_cpu(fsec->fsec_entry[i].type); if (img_type == type) return true; } return false; } static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, int header_size, const struct firmware *fw) { struct flash_section_info *fsec = NULL; const u8 *p = fw->data; p += header_size; while (p < (fw->data + fw->size)) { fsec = (struct flash_section_info *)p; if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) return fsec; p += 32; } return NULL; } static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p, u32 img_offset, u32 img_size, int hdr_size, u16 img_optype, bool *crc_match) { u32 crc_offset; int status; u8 crc[4]; status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4); if (status) return status; crc_offset = hdr_size + img_offset + img_size - 4; /* Skip flashing, if crc of flashed region matches */ if (!memcmp(crc, p + crc_offset, 4)) *crc_match = true; else *crc_match = false; return status; } static int be_flash(struct be_adapter *adapter, const u8 *img, struct be_dma_mem *flash_cmd, int optype, int img_size) { struct be_cmd_write_flashrom *req = flash_cmd->va; u32 total_bytes, flash_op, num_bytes; int status; total_bytes = img_size; while (total_bytes) { num_bytes = min_t(u32, 32*1024, total_bytes); total_bytes -= num_bytes; if (!total_bytes) { if (optype == OPTYPE_PHY_FW) flash_op = FLASHROM_OPER_PHY_FLASH; else flash_op = FLASHROM_OPER_FLASH; } else { if (optype == OPTYPE_PHY_FW) flash_op = FLASHROM_OPER_PHY_SAVE; else flash_op = FLASHROM_OPER_SAVE; } memcpy(req->data_buf, img, num_bytes); img += num_bytes; status = be_cmd_write_flashrom(adapter, flash_cmd, optype, flash_op, num_bytes); if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && optype == OPTYPE_PHY_FW) break; else if (status) return status; } return 0; } /* For BE2, BE3 and BE3-R */ static int be_flash_BEx(struct be_adapter *adapter, const struct firmware *fw, struct be_dma_mem *flash_cmd, int num_of_images) { int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); struct device *dev = &adapter->pdev->dev; struct flash_section_info *fsec = NULL; int status, i, filehdr_size, num_comp; const struct flash_comp *pflashcomp; bool crc_match; const u8 *p; struct flash_comp gen3_flash_types[] = { { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE, FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI}, { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT, FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE}, { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS, FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI}, { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS, FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE}, { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS, FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE}, { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP, FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI}, { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE, FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE}, { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP, FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE}, { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW, FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI}, { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW, FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY} }; struct flash_comp gen2_flash_types[] = { { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE, FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI}, { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT, FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE}, { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS, FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI}, { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS, FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE}, { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS, FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE}, { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP, FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI}, { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE, FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE}, { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP, FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE} }; if (BE3_chip(adapter)) { pflashcomp = gen3_flash_types; filehdr_size = sizeof(struct flash_file_hdr_g3); num_comp = ARRAY_SIZE(gen3_flash_types); } else { pflashcomp = gen2_flash_types; filehdr_size = sizeof(struct flash_file_hdr_g2); num_comp = ARRAY_SIZE(gen2_flash_types); } /* Get flash section info*/ fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); if (!fsec) { dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); return -1; } for (i = 0; i < num_comp; i++) { if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type)) continue; if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0) continue; if (pflashcomp[i].optype == OPTYPE_PHY_FW && !phy_flashing_required(adapter)) continue; if (pflashcomp[i].optype == OPTYPE_REDBOOT) { status = be_check_flash_crc(adapter, fw->data, pflashcomp[i].offset, pflashcomp[i].size, filehdr_size + img_hdrs_size, OPTYPE_REDBOOT, &crc_match); if (status) { dev_err(dev, "Could not get CRC for 0x%x region\n", pflashcomp[i].optype); continue; } if (crc_match) continue; } p = fw->data + filehdr_size + pflashcomp[i].offset + img_hdrs_size; if (p + pflashcomp[i].size > fw->data + fw->size) return -1; status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, pflashcomp[i].size); if (status) { dev_err(dev, "Flashing section type 0x%x failed\n", pflashcomp[i].img_type); return status; } } return 0; } static u16 be_get_img_optype(struct flash_section_entry fsec_entry) { u32 img_type = le32_to_cpu(fsec_entry.type); u16 img_optype = le16_to_cpu(fsec_entry.optype); if (img_optype != 0xFFFF) return img_optype; switch (img_type) { case IMAGE_FIRMWARE_iSCSI: img_optype = OPTYPE_ISCSI_ACTIVE; break; case IMAGE_BOOT_CODE: img_optype = OPTYPE_REDBOOT; break; case IMAGE_OPTION_ROM_ISCSI: img_optype = OPTYPE_BIOS; break; case IMAGE_OPTION_ROM_PXE: img_optype = OPTYPE_PXE_BIOS; break; case IMAGE_OPTION_ROM_FCoE: img_optype = OPTYPE_FCOE_BIOS; break; case IMAGE_FIRMWARE_BACKUP_iSCSI: img_optype = OPTYPE_ISCSI_BACKUP; break; case IMAGE_NCSI: img_optype = OPTYPE_NCSI_FW; break; case IMAGE_FLASHISM_JUMPVECTOR: img_optype = OPTYPE_FLASHISM_JUMPVECTOR; break; case IMAGE_FIRMWARE_PHY: img_optype = OPTYPE_SH_PHY_FW; break; case IMAGE_REDBOOT_DIR: img_optype = OPTYPE_REDBOOT_DIR; break; case IMAGE_REDBOOT_CONFIG: img_optype = OPTYPE_REDBOOT_CONFIG; break; case IMAGE_UFI_DIR: img_optype = OPTYPE_UFI_DIR; break; default: break; } return img_optype; } static int be_flash_skyhawk(struct be_adapter *adapter, const struct firmware *fw, struct be_dma_mem *flash_cmd, int num_of_images) { int img_hdrs_size = num_of_images * sizeof(struct image_hdr); struct device *dev = &adapter->pdev->dev; struct flash_section_info *fsec = NULL; u32 img_offset, img_size, img_type; int status, i, filehdr_size; bool crc_match, old_fw_img; u16 img_optype; const u8 *p; filehdr_size = sizeof(struct flash_file_hdr_g3); fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); if (!fsec) { dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); return -EINVAL; } for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); img_type = le32_to_cpu(fsec->fsec_entry[i].type); img_optype = be_get_img_optype(fsec->fsec_entry[i]); old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF; if (img_optype == 0xFFFF) continue; /* Don't bother verifying CRC if an old FW image is being * flashed */ if (old_fw_img) goto flash; status = be_check_flash_crc(adapter, fw->data, img_offset, img_size, filehdr_size + img_hdrs_size, img_optype, &crc_match); /* The current FW image on the card does not recognize the new * FLASH op_type. The FW download is partially complete. * Reboot the server now to enable FW image to recognize the * new FLASH op_type. To complete the remaining process, * download the same FW again after the reboot. */ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { dev_err(dev, "Flash incomplete. Reset the server\n"); dev_err(dev, "Download FW image again after reset\n"); return -EAGAIN; } else if (status) { dev_err(dev, "Could not get CRC for 0x%x region\n", img_optype); return -EFAULT; } if (crc_match) continue; flash: p = fw->data + filehdr_size + img_offset + img_hdrs_size; if (p + img_size > fw->data + fw->size) return -1; status = be_flash(adapter, p, flash_cmd, img_optype, img_size); /* For old FW images ignore ILLEGAL_FIELD error or errors on * UFI_DIR region */ if (old_fw_img && (base_status(status) == MCC_STATUS_ILLEGAL_FIELD || (img_optype == OPTYPE_UFI_DIR && base_status(status) == MCC_STATUS_FAILED))) { continue; } else if (status) { dev_err(dev, "Flashing section type 0x%x failed\n", img_type); return -EFAULT; } } return 0; } static int lancer_fw_download(struct be_adapter *adapter, const struct firmware *fw) { #define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024) #define LANCER_FW_DOWNLOAD_LOCATION "/prg" struct device *dev = &adapter->pdev->dev; struct be_dma_mem flash_cmd; const u8 *data_ptr = NULL; u8 *dest_image_ptr = NULL; size_t image_size = 0; u32 chunk_size = 0; u32 data_written = 0; u32 offset = 0; int status = 0; u8 add_status = 0; u8 change_status; if (!IS_ALIGNED(fw->size, sizeof(u32))) { dev_err(dev, "FW image size should be multiple of 4\n"); return -EINVAL; } flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) + LANCER_FW_DOWNLOAD_CHUNK; flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) return -ENOMEM; dest_image_ptr = flash_cmd.va + sizeof(struct lancer_cmd_req_write_object); image_size = fw->size; data_ptr = fw->data; while (image_size) { chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK); /* Copy the image chunk content. */ memcpy(dest_image_ptr, data_ptr, chunk_size); status = lancer_cmd_write_object(adapter, &flash_cmd, chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION, &data_written, &change_status, &add_status); if (status) break; offset += data_written; data_ptr += data_written; image_size -= data_written; } if (!status) { /* Commit the FW written */ status = lancer_cmd_write_object(adapter, &flash_cmd, 0, offset, LANCER_FW_DOWNLOAD_LOCATION, &data_written, &change_status, &add_status); } dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); if (status) { dev_err(dev, "Firmware load error\n"); return be_cmd_status(status); } dev_info(dev, "Firmware flashed successfully\n"); if (change_status == LANCER_FW_RESET_NEEDED) { dev_info(dev, "Resetting adapter to activate new FW\n"); status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK); if (status) { dev_err(dev, "Adapter busy, could not reset FW\n"); dev_err(dev, "Reboot server to activate new FW\n"); } } else if (change_status != LANCER_NO_RESET_NEEDED) { dev_info(dev, "Reboot server to activate new FW\n"); } return 0; } #define UFI_TYPE2 2 #define UFI_TYPE3 3 #define UFI_TYPE3R 10 #define UFI_TYPE4 4 static int be_get_ufi_type(struct be_adapter *adapter, struct flash_file_hdr_g3 *fhdr) { if (!fhdr) goto be_get_ufi_exit; if (skyhawk_chip(adapter) && fhdr->build[0] == '4') return UFI_TYPE4; else if (BE3_chip(adapter) && fhdr->build[0] == '3') { if (fhdr->asic_type_rev == 0x10) return UFI_TYPE3R; else return UFI_TYPE3; } else if (BE2_chip(adapter) && fhdr->build[0] == '2') return UFI_TYPE2; be_get_ufi_exit: dev_err(&adapter->pdev->dev, "UFI and Interface are not compatible for flashing\n"); return -1; } static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) { struct flash_file_hdr_g3 *fhdr3; struct image_hdr *img_hdr_ptr = NULL; struct be_dma_mem flash_cmd; const u8 *p; int status = 0, i = 0, num_imgs = 0, ufi_type = 0; flash_cmd.size = sizeof(struct be_cmd_write_flashrom); flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, &flash_cmd.dma, GFP_KERNEL); if (!flash_cmd.va) { status = -ENOMEM; goto be_fw_exit; } p = fw->data; fhdr3 = (struct flash_file_hdr_g3 *)p; ufi_type = be_get_ufi_type(adapter, fhdr3); num_imgs = le32_to_cpu(fhdr3->num_imgs); for (i = 0; i < num_imgs; i++) { img_hdr_ptr = (struct image_hdr *)(fw->data + (sizeof(struct flash_file_hdr_g3) + i * sizeof(struct image_hdr))); if (le32_to_cpu(img_hdr_ptr->imageid) == 1) { switch (ufi_type) { case UFI_TYPE4: status = be_flash_skyhawk(adapter, fw, &flash_cmd, num_imgs); break; case UFI_TYPE3R: status = be_flash_BEx(adapter, fw, &flash_cmd, num_imgs); break; case UFI_TYPE3: /* Do not flash this ufi on BE3-R cards */ if (adapter->asic_rev < 0x10) status = be_flash_BEx(adapter, fw, &flash_cmd, num_imgs); else { status = -EINVAL; dev_err(&adapter->pdev->dev, "Can't load BE3 UFI on BE3R\n"); } } } } if (ufi_type == UFI_TYPE2) status = be_flash_BEx(adapter, fw, &flash_cmd, 0); else if (ufi_type == -1) status = -EINVAL; dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); if (status) { dev_err(&adapter->pdev->dev, "Firmware load error\n"); goto be_fw_exit; } dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n"); be_fw_exit: return status; } int be_load_fw(struct be_adapter *adapter, u8 *fw_file) { const struct firmware *fw; int status; if (!netif_running(adapter->netdev)) { dev_err(&adapter->pdev->dev, "Firmware load not allowed (interface is down)\n"); return -ENETDOWN; } status = request_firmware(&fw, fw_file, &adapter->pdev->dev); if (status) goto fw_exit; dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file); if (lancer_chip(adapter)) status = lancer_fw_download(adapter, fw); else status = be_fw_download(adapter, fw); if (!status) be_cmd_get_fw_ver(adapter); fw_exit: release_firmware(fw); return status; } static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh) { struct be_adapter *adapter = netdev_priv(dev); struct nlattr *attr, *br_spec; int rem; int status = 0; u16 mode = 0; if (!sriov_enabled(adapter)) return -EOPNOTSUPP; br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (!br_spec) return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; if (nla_len(attr) < sizeof(mode)) return -EINVAL; mode = nla_get_u16(attr); if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) return -EINVAL; status = be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle, mode == BRIDGE_MODE_VEPA ? PORT_FWD_TYPE_VEPA : PORT_FWD_TYPE_VEB); if (status) goto err; dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n", mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); return status; } err: dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n", mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); return status; } static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask) { struct be_adapter *adapter = netdev_priv(dev); int status = 0; u8 hsw_mode; if (!sriov_enabled(adapter)) return 0; /* BE and Lancer chips support VEB mode only */ if (BEx_chip(adapter) || lancer_chip(adapter)) { hsw_mode = PORT_FWD_TYPE_VEB; } else { status = be_cmd_get_hsw_config(adapter, NULL, 0, adapter->if_handle, &hsw_mode); if (status) return 0; } return ndo_dflt_bridge_getlink(skb, pid, seq, dev, hsw_mode == PORT_FWD_TYPE_VEPA ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, 0, 0); } #ifdef CONFIG_BE2NET_VXLAN /* VxLAN offload Notes: * * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload * is expected to work across all types of IP tunnels once exported. Skyhawk * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN * offloads in hw_enc_features only when a VxLAN port is added. If other (non * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for * those other tunnels are unexported on the fly through ndo_features_check(). * * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack * adds more than one port, disable offloads and don't re-enable them again * until after all the tunnels are removed. */ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { struct be_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->pdev->dev; int status; if (lancer_chip(adapter) || BEx_chip(adapter)) return; if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { dev_info(dev, "Only one UDP port supported for VxLAN offloads\n"); dev_info(dev, "Disabling VxLAN offloads\n"); adapter->vxlan_port_count++; goto err; } if (adapter->vxlan_port_count++ >= 1) return; status = be_cmd_manage_iface(adapter, adapter->if_handle, OP_CONVERT_NORMAL_TO_TUNNEL); if (status) { dev_warn(dev, "Failed to convert normal interface to tunnel\n"); goto err; } status = be_cmd_set_vxlan_port(adapter, port); if (status) { dev_warn(dev, "Failed to add VxLAN port\n"); goto err; } adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS; adapter->vxlan_port = port; netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL; netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; netdev->features |= NETIF_F_GSO_UDP_TUNNEL; dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); return; err: be_disable_vxlan_offloads(adapter); } static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { struct be_adapter *adapter = netdev_priv(netdev); if (lancer_chip(adapter) || BEx_chip(adapter)) return; if (adapter->vxlan_port != port) goto done; be_disable_vxlan_offloads(adapter); dev_info(&adapter->pdev->dev, "Disabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); done: adapter->vxlan_port_count--; } static netdev_features_t be_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { struct be_adapter *adapter = netdev_priv(dev); u8 l4_hdr = 0; /* The code below restricts offload features for some tunneled packets. * Offload features for normal (non tunnel) packets are unchanged. */ if (!skb->encapsulation || !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) return features; /* It's an encapsulated packet and VxLAN offloads are enabled. We * should disable tunnel offload features if it's not a VxLAN packet, * as tunnel offloads have been enabled only for VxLAN. This is done to * allow other tunneled traffic like GRE work fine while VxLAN * offloads are configured in Skyhawk-R. */ switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): l4_hdr = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): l4_hdr = ipv6_hdr(skb)->nexthdr; break; default: return features; } if (l4_hdr != IPPROTO_UDP || skb->inner_protocol_type != ENCAP_TYPE_ETHER || skb->inner_protocol != htons(ETH_P_TEB) || skb_inner_mac_header(skb) - skb_transport_header(skb) != sizeof(struct udphdr) + sizeof(struct vxlanhdr)) return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); return features; } #endif static const struct net_device_ops be_netdev_ops = { .ndo_open = be_open, .ndo_stop = be_close, .ndo_start_xmit = be_xmit, .ndo_set_rx_mode = be_set_rx_mode, .ndo_set_mac_address = be_mac_addr_set, .ndo_change_mtu = be_change_mtu, .ndo_get_stats64 = be_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = be_vlan_add_vid, .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, .ndo_set_vf_mac = be_set_vf_mac, .ndo_set_vf_vlan = be_set_vf_vlan, .ndo_set_vf_rate = be_set_vf_tx_rate, .ndo_get_vf_config = be_get_vf_config, .ndo_set_vf_link_state = be_set_vf_link_state, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = be_netpoll, #endif .ndo_bridge_setlink = be_ndo_bridge_setlink, .ndo_bridge_getlink = be_ndo_bridge_getlink, #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = be_busy_poll, #endif #ifdef CONFIG_BE2NET_VXLAN .ndo_add_vxlan_port = be_add_vxlan_port, .ndo_del_vxlan_port = be_del_vxlan_port, .ndo_features_check = be_features_check, #endif }; static void be_netdev_init(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX; if (be_multi_rxq(adapter)) netdev->hw_features |= NETIF_F_RXHASH; netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->flags |= IFF_MULTICAST; netif_set_gso_max_size(netdev, 65535 - ETH_HLEN); netdev->netdev_ops = &be_netdev_ops; netdev->ethtool_ops = &be_ethtool_ops; } static void be_unmap_pci_bars(struct be_adapter *adapter) { if (adapter->csr) pci_iounmap(adapter->pdev, adapter->csr); if (adapter->db) pci_iounmap(adapter->pdev, adapter->db); } static int db_bar(struct be_adapter *adapter) { if (lancer_chip(adapter) || !be_physfn(adapter)) return 0; else return 4; } static int be_roce_map_pci_bars(struct be_adapter *adapter) { if (skyhawk_chip(adapter)) { adapter->roce_db.size = 4096; adapter->roce_db.io_addr = pci_resource_start(adapter->pdev, db_bar(adapter)); adapter->roce_db.total_size = pci_resource_len(adapter->pdev, db_bar(adapter)); } return 0; } static int be_map_pci_bars(struct be_adapter *adapter) { u8 __iomem *addr; if (BEx_chip(adapter) && be_physfn(adapter)) { adapter->csr = pci_iomap(adapter->pdev, 2, 0); if (!adapter->csr) return -ENOMEM; } addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); if (!addr) goto pci_map_err; adapter->db = addr; be_roce_map_pci_bars(adapter); return 0; pci_map_err: dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); be_unmap_pci_bars(adapter); return -ENOMEM; } static void be_ctrl_cleanup(struct be_adapter *adapter) { struct be_dma_mem *mem = &adapter->mbox_mem_alloced; be_unmap_pci_bars(adapter); if (mem->va) dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, mem->dma); mem = &adapter->rx_filter; if (mem->va) dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, mem->dma); } static int be_ctrl_init(struct be_adapter *adapter) { struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; struct be_dma_mem *rx_filter = &adapter->rx_filter; u32 sli_intf; int status; pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf); adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >> SLI_INTF_FAMILY_SHIFT; adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0; status = be_map_pci_bars(adapter); if (status) goto done; mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev, mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); if (!mbox_mem_alloc->va) { status = -ENOMEM; goto unmap_pci_bars; } mbox_mem_align->size = sizeof(struct be_mcc_mailbox); mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); rx_filter->size = sizeof(struct be_cmd_req_rx_filter); rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev, rx_filter->size, &rx_filter->dma, GFP_KERNEL); if (!rx_filter->va) { status = -ENOMEM; goto free_mbox; } mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); init_completion(&adapter->et_cmd_compl); pci_save_state(adapter->pdev); return 0; free_mbox: dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size, mbox_mem_alloc->va, mbox_mem_alloc->dma); unmap_pci_bars: be_unmap_pci_bars(adapter); done: return status; } static void be_stats_cleanup(struct be_adapter *adapter) { struct be_dma_mem *cmd = &adapter->stats_cmd; if (cmd->va) dma_free_coherent(&adapter->pdev->dev, cmd->size, cmd->va, cmd->dma); } static int be_stats_init(struct be_adapter *adapter) { struct be_dma_mem *cmd = &adapter->stats_cmd; if (lancer_chip(adapter)) cmd->size = sizeof(struct lancer_cmd_req_pport_stats); else if (BE2_chip(adapter)) cmd->size = sizeof(struct be_cmd_req_get_stats_v0); else if (BE3_chip(adapter)) cmd->size = sizeof(struct be_cmd_req_get_stats_v1); else /* ALL non-BE ASICs */ cmd->size = sizeof(struct be_cmd_req_get_stats_v2); cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, GFP_KERNEL); if (!cmd->va) return -ENOMEM; return 0; } static void be_remove(struct pci_dev *pdev) { struct be_adapter *adapter = pci_get_drvdata(pdev); if (!adapter) return; be_roce_dev_remove(adapter); be_intr_set(adapter, false); cancel_delayed_work_sync(&adapter->func_recovery_work); unregister_netdev(adapter->netdev); be_clear(adapter); /* tell fw we're done with firing cmds */ be_cmd_fw_clean(adapter); be_stats_cleanup(adapter); be_ctrl_cleanup(adapter); pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); pci_disable_device(pdev); free_netdev(adapter->netdev); } static int be_get_initial_config(struct be_adapter *adapter) { int status, level; status = be_cmd_get_cntl_attributes(adapter); if (status) return status; /* Must be a power of 2 or else MODULO will BUG_ON */ adapter->be_get_temp_freq = 64; if (BEx_chip(adapter)) { level = be_cmd_get_fw_log_level(adapter); adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0; } adapter->cfg_num_qs = netif_get_num_default_rss_queues(); return 0; } static int lancer_recover_func(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; int status; status = lancer_test_and_set_rdy_state(adapter); if (status) goto err; if (netif_running(adapter->netdev)) be_close(adapter->netdev); be_clear(adapter); be_clear_all_error(adapter); status = be_setup(adapter); if (status) goto err; if (netif_running(adapter->netdev)) { status = be_open(adapter->netdev); if (status) goto err; } dev_err(dev, "Adapter recovery successful\n"); return 0; err: if (status == -EAGAIN) dev_err(dev, "Waiting for resource provisioning\n"); else dev_err(dev, "Adapter recovery failed\n"); return status; } static void be_func_recovery_task(struct work_struct *work) { struct be_adapter *adapter = container_of(work, struct be_adapter, func_recovery_work.work); int status = 0; be_detect_error(adapter); if (adapter->hw_error && lancer_chip(adapter)) { rtnl_lock(); netif_device_detach(adapter->netdev); rtnl_unlock(); status = lancer_recover_func(adapter); if (!status) netif_device_attach(adapter->netdev); } /* In Lancer, for all errors other than provisioning error (-EAGAIN), * no need to attempt further recovery. */ if (!status || status == -EAGAIN) schedule_delayed_work(&adapter->func_recovery_work, msecs_to_jiffies(1000)); } static void be_worker(struct work_struct *work) { struct be_adapter *adapter = container_of(work, struct be_adapter, work.work); struct be_rx_obj *rxo; int i; /* when interrupts are not yet enabled, just reap any pending * mcc completions */ if (!netif_running(adapter->netdev)) { local_bh_disable(); be_process_mcc(adapter); local_bh_enable(); goto reschedule; } if (!adapter->stats_cmd_sent) { if (lancer_chip(adapter)) lancer_cmd_get_pport_stats(adapter, &adapter->stats_cmd); else be_cmd_get_stats(adapter, &adapter->stats_cmd); } if (be_physfn(adapter) && MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) be_cmd_get_die_temperature(adapter); for_all_rx_queues(adapter, rxo, i) { /* Replenish RX-queues starved due to memory * allocation failures. */ if (rxo->rx_post_starved) be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST); } be_eqd_update(adapter); reschedule: adapter->work_counter++; schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); } /* If any VFs are already enabled don't FLR the PF */ static bool be_reset_required(struct be_adapter *adapter) { return pci_num_vf(adapter->pdev) ? false : true; } static char *mc_name(struct be_adapter *adapter) { char *str = ""; /* default */ switch (adapter->mc_type) { case UMC: str = "UMC"; break; case FLEX10: str = "FLEX10"; break; case vNIC1: str = "vNIC-1"; break; case nPAR: str = "nPAR"; break; case UFP: str = "UFP"; break; case vNIC2: str = "vNIC-2"; break; default: str = ""; } return str; } static inline char *func_name(struct be_adapter *adapter) { return be_physfn(adapter) ? "PF" : "VF"; } static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) { int status = 0; struct be_adapter *adapter; struct net_device *netdev; char port_name; dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER); status = pci_enable_device(pdev); if (status) goto do_none; status = pci_request_regions(pdev, DRV_NAME); if (status) goto disable_dev; pci_set_master(pdev); netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS); if (!netdev) { status = -ENOMEM; goto rel_reg; } adapter = netdev_priv(netdev); adapter->pdev = pdev; pci_set_drvdata(pdev, adapter); adapter->netdev = netdev; SET_NETDEV_DEV(netdev, &pdev->dev); status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (!status) { netdev->features |= NETIF_F_HIGHDMA; } else { status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (status) { dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); goto free_netdev; } } status = pci_enable_pcie_error_reporting(pdev); if (!status) dev_info(&pdev->dev, "PCIe error reporting enabled\n"); status = be_ctrl_init(adapter); if (status) goto free_netdev; /* sync up with fw's ready state */ if (be_physfn(adapter)) { status = be_fw_wait_ready(adapter); if (status) goto ctrl_clean; } if (be_reset_required(adapter)) { status = be_cmd_reset_function(adapter); if (status) goto ctrl_clean; /* Wait for interrupts to quiesce after an FLR */ msleep(100); } /* Allow interrupts for other ULPs running on NIC function */ be_intr_set(adapter, true); /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) goto ctrl_clean; status = be_stats_init(adapter); if (status) goto ctrl_clean; status = be_get_initial_config(adapter); if (status) goto stats_clean; INIT_DELAYED_WORK(&adapter->work, be_worker); INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task); adapter->rx_fc = true; adapter->tx_fc = true; status = be_setup(adapter); if (status) goto stats_clean; be_netdev_init(netdev); status = register_netdev(netdev); if (status != 0) goto unsetup; be_roce_dev_add(adapter); schedule_delayed_work(&adapter->func_recovery_work, msecs_to_jiffies(1000)); be_cmd_query_port_name(adapter, &port_name); dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev), func_name(adapter), mc_name(adapter), port_name); return 0; unsetup: be_clear(adapter); stats_clean: be_stats_cleanup(adapter); ctrl_clean: be_ctrl_cleanup(adapter); free_netdev: free_netdev(netdev); rel_reg: pci_release_regions(pdev); disable_dev: pci_disable_device(pdev); do_none: dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev)); return status; } static int be_suspend(struct pci_dev *pdev, pm_message_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; if (adapter->wol_en) be_setup_wol(adapter, true); be_intr_set(adapter, false); cancel_delayed_work_sync(&adapter->func_recovery_work); netif_device_detach(netdev); if (netif_running(netdev)) { rtnl_lock(); be_close(netdev); rtnl_unlock(); } be_clear(adapter); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } static int be_resume(struct pci_dev *pdev) { int status = 0; struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); status = pci_enable_device(pdev); if (status) return status; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); status = be_fw_wait_ready(adapter); if (status) return status; be_intr_set(adapter, true); /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) return status; be_setup(adapter); if (netif_running(netdev)) { rtnl_lock(); be_open(netdev); rtnl_unlock(); } schedule_delayed_work(&adapter->func_recovery_work, msecs_to_jiffies(1000)); netif_device_attach(netdev); if (adapter->wol_en) be_setup_wol(adapter, false); return 0; } /* * An FLR will stop BE from DMAing any data. */ static void be_shutdown(struct pci_dev *pdev) { struct be_adapter *adapter = pci_get_drvdata(pdev); if (!adapter) return; be_roce_dev_shutdown(adapter); cancel_delayed_work_sync(&adapter->work); cancel_delayed_work_sync(&adapter->func_recovery_work); netif_device_detach(adapter->netdev); be_cmd_reset_function(adapter); pci_disable_device(pdev); } static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; dev_err(&adapter->pdev->dev, "EEH error detected\n"); if (!adapter->eeh_error) { adapter->eeh_error = true; cancel_delayed_work_sync(&adapter->func_recovery_work); rtnl_lock(); netif_device_detach(netdev); if (netif_running(netdev)) be_close(netdev); rtnl_unlock(); be_clear(adapter); } if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; pci_disable_device(pdev); /* The error could cause the FW to trigger a flash debug dump. * Resetting the card while flash dump is in progress * can cause it not to recover; wait for it to finish. * Wait only for first function as it is needed only once per * adapter. */ if (pdev->devfn == 0) ssleep(30); return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) { struct be_adapter *adapter = pci_get_drvdata(pdev); int status; dev_info(&adapter->pdev->dev, "EEH reset\n"); status = pci_enable_device(pdev); if (status) return PCI_ERS_RESULT_DISCONNECT; pci_set_master(pdev); pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); /* Check if card is ok and fw is ready */ dev_info(&adapter->pdev->dev, "Waiting for FW to be ready after EEH reset\n"); status = be_fw_wait_ready(adapter); if (status) return PCI_ERS_RESULT_DISCONNECT; pci_cleanup_aer_uncorrect_error_status(pdev); be_clear_all_error(adapter); return PCI_ERS_RESULT_RECOVERED; } static void be_eeh_resume(struct pci_dev *pdev) { int status = 0; struct be_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; dev_info(&adapter->pdev->dev, "EEH resume\n"); pci_save_state(pdev); status = be_cmd_reset_function(adapter); if (status) goto err; /* On some BE3 FW versions, after a HW reset, * interrupts will remain disabled for each function. * So, explicitly enable interrupts */ be_intr_set(adapter, true); /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) goto err; status = be_setup(adapter); if (status) goto err; if (netif_running(netdev)) { status = be_open(netdev); if (status) goto err; } schedule_delayed_work(&adapter->func_recovery_work, msecs_to_jiffies(1000)); netif_device_attach(netdev); return; err: dev_err(&adapter->pdev->dev, "EEH resume failed\n"); } static const struct pci_error_handlers be_eeh_handlers = { .error_detected = be_eeh_err_detected, .slot_reset = be_eeh_reset, .resume = be_eeh_resume, }; static struct pci_driver be_driver = { .name = DRV_NAME, .id_table = be_dev_ids, .probe = be_probe, .remove = be_remove, .suspend = be_suspend, .resume = be_resume, .shutdown = be_shutdown, .err_handler = &be_eeh_handlers }; static int __init be_init_module(void) { if (rx_frag_size != 8192 && rx_frag_size != 4096 && rx_frag_size != 2048) { printk(KERN_WARNING DRV_NAME " : Module param rx_frag_size must be 2048/4096/8192." " Using 2048\n"); rx_frag_size = 2048; } return pci_register_driver(&be_driver); } module_init(be_init_module); static void __exit be_exit_module(void) { pci_unregister_driver(&be_driver); } module_exit(be_exit_module);
gpl-2.0
gobzateloon/Gobza_Sprout-LP
drivers/tty/n_tty.c
47
62631
/* * n_tty.c --- implements the N_TTY line discipline. * * This code used to be in tty_io.c, but things are getting hairy * enough that it made sense to split things off. (The N_TTY * processing has changed so much that it's hardly recognizable, * anyway...) * * Note that the open routine for N_TTY is guaranteed never to return * an error. This is because Linux will fall back to setting a line * to N_TTY if it can not switch to any other line discipline. * * Written by Theodore Ts'o, Copyright 1994. * * This file also contains code originally written by Linus Torvalds, * Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994. * * This file may be redistributed under the terms of the GNU General Public * License. * * Reduced memory usage for older ARM systems - Russell King. * * 2000/01/20 Fixed SMP locking on put_tty_queue using bits of * the patch by Andrew J. Kroll <ag784@freenet.buffalo.edu> * who actually finally proved there really was a race. * * 2002/03/18 Implemented n_tty_wakeup to send SIGIO POLL_OUTs to * waiting writing processes-Sapan Bhatia <sapan@corewars.org>. * Also fixed a bug in BLOCKING mode where n_tty_write returns * EAGAIN */ #include <linux/types.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/fcntl.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/timer.h> #include <linux/ctype.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/bitops.h> #include <linux/audit.h> #include <linux/file.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> /* number of characters left in xmit buffer before select has we have room */ #define WAKEUP_CHARS 256 /* * This defines the low- and high-watermarks for throttling and * unthrottling the TTY driver. These watermarks are used for * controlling the space in the read buffer. */ #define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */ #define TTY_THRESHOLD_UNTHROTTLE 128 /* * Special byte codes used in the echo buffer to represent operations * or special handling of characters. Bytes in the echo buffer that * are not part of such special blocks are treated as normal character * codes. */ #define ECHO_OP_START 0xff #define ECHO_OP_MOVE_BACK_COL 0x80 #define ECHO_OP_SET_CANON_COL 0x81 #define ECHO_OP_ERASE_TAB 0x82 #define ECHO_COMMIT_WATERMARK 256 #define ECHO_BLOCK 256 #define ECHO_DISCARD_WATERMARK N_TTY_BUF_SIZE - (ECHO_BLOCK + 32) #undef N_TTY_TRACE #ifdef N_TTY_TRACE # define n_tty_trace(f, args...) trace_printk(f, ##args) #else # define n_tty_trace(f, args...) #endif struct n_tty_data { /* producer-published */ size_t read_head; size_t canon_head; size_t echo_head; size_t echo_commit; size_t echo_mark; DECLARE_BITMAP(char_map, 256); /* private to n_tty_receive_overrun (single-threaded) */ unsigned long overrun_time; int num_overrun; /* non-atomic */ bool no_room; /* must hold exclusive termios_rwsem to reset these */ unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1; /* shared by producer and consumer */ char read_buf[N_TTY_BUF_SIZE]; DECLARE_BITMAP(read_flags, N_TTY_BUF_SIZE); unsigned char echo_buf[N_TTY_BUF_SIZE]; int minimum_to_wake; /* consumer-published */ size_t read_tail; size_t line_start; /* protected by output lock */ unsigned int column; unsigned int canon_column; size_t echo_tail; struct mutex atomic_read_lock; struct mutex output_lock; }; static inline size_t read_cnt(struct n_tty_data *ldata) { return ldata->read_head - ldata->read_tail; } static inline unsigned char read_buf(struct n_tty_data *ldata, size_t i) { return ldata->read_buf[i & (N_TTY_BUF_SIZE - 1)]; } static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i) { return &ldata->read_buf[i & (N_TTY_BUF_SIZE - 1)]; } static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i) { return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; } static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i) { return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; } static inline int tty_put_user(struct tty_struct *tty, unsigned char x, unsigned char __user *ptr) { struct n_tty_data *ldata = tty->disc_data; tty_audit_add_data(tty, &x, 1, ldata->icanon); return put_user(x, ptr); } static int receive_room(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; int left; if (I_PARMRK(tty)) { /* Multiply read_cnt by 3, since each byte might take up to * three times as many spaces when PARMRK is set (depending on * its flags, e.g. parity error). */ left = N_TTY_BUF_SIZE - read_cnt(ldata) * 3 - 1; } else left = N_TTY_BUF_SIZE - read_cnt(ldata) - 1; /* * If we are doing input canonicalization, and there are no * pending newlines, let characters through without limit, so * that erase characters will be handled. Other excess * characters will be beeped. */ if (left <= 0) left = ldata->icanon && ldata->canon_head == ldata->read_tail; return left; } /** * n_tty_set_room - receive space * @tty: terminal * * Re-schedules the flip buffer work if space just became available. * * Caller holds exclusive termios_rwsem * or * n_tty_read()/consumer path: * holds non-exclusive termios_rwsem */ static void n_tty_set_room(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; /* Did this open up the receive buffer? We may need to flip */ if (unlikely(ldata->no_room) && receive_room(tty)) { ldata->no_room = 0; WARN_RATELIMIT(tty->port->itty == NULL, "scheduling with invalid itty\n"); /* see if ldisc has been killed - if so, this means that * even though the ldisc has been halted and ->buf.work * cancelled, ->buf.work is about to be rescheduled */ WARN_RATELIMIT(test_bit(TTY_LDISC_HALTED, &tty->flags), "scheduling buffer work for halted ldisc\n"); queue_work(system_unbound_wq, &tty->port->buf.work); } } static ssize_t chars_in_buffer(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; ssize_t n = 0; if (!ldata->icanon) n = read_cnt(ldata); else n = ldata->canon_head - ldata->read_tail; return n; } /** * n_tty_write_wakeup - asynchronous I/O notifier * @tty: tty device * * Required for the ptys, serial driver etc. since processes * that attach themselves to the master and rely on ASYNC * IO must be woken up */ static void n_tty_write_wakeup(struct tty_struct *tty) { if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) kill_fasync(&tty->fasync, SIGIO, POLL_OUT); } static void n_tty_check_throttle(struct tty_struct *tty) { if (tty->driver->type == TTY_DRIVER_TYPE_PTY) return; /* * Check the remaining room for the input canonicalization * mode. We don't want to throttle the driver if we're in * canonical mode and don't have a newline yet! */ while (1) { int throttled; tty_set_flow_change(tty, TTY_THROTTLE_SAFE); if (receive_room(tty) >= TTY_THRESHOLD_THROTTLE) break; throttled = tty_throttle_safe(tty); if (!throttled) break; } __tty_set_flow_change(tty, 0); } static void n_tty_check_unthrottle(struct tty_struct *tty) { if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) { if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE) return; if (!tty->count) return; n_tty_set_room(tty); n_tty_write_wakeup(tty->link); wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT); return; } /* If there is enough space in the read buffer now, let the * low-level driver know. We use chars_in_buffer() to * check the buffer, as it now knows about canonical mode. * Otherwise, if the driver is throttled and the line is * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode, * we won't get any more characters. */ while (1) { int unthrottled; tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE); if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE) break; if (!tty->count) break; n_tty_set_room(tty); unthrottled = tty_unthrottle_safe(tty); if (!unthrottled) break; } __tty_set_flow_change(tty, 0); } /** * put_tty_queue - add character to tty * @c: character * @ldata: n_tty data * * Add a character to the tty read_buf queue. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * modifies read_head * * read_head is only considered 'published' if canonical mode is * not active. */ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) { *read_buf_addr(ldata, ldata->read_head++) = c; } /** * reset_buffer_flags - reset buffer state * @tty: terminal to reset * * Reset the read buffer counters and clear the flags. * Called from n_tty_open() and n_tty_flush_buffer(). * * Locking: caller holds exclusive termios_rwsem * (or locking is not required) */ static void reset_buffer_flags(struct n_tty_data *ldata) { ldata->read_head = ldata->canon_head = ldata->read_tail = 0; ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; ldata->echo_mark = 0; ldata->line_start = 0; ldata->erasing = 0; bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); } static void n_tty_packet_mode_flush(struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&tty->ctrl_lock, flags); if (tty->link->packet) { tty->ctrl_status |= TIOCPKT_FLUSHREAD; wake_up_interruptible(&tty->link->read_wait); } spin_unlock_irqrestore(&tty->ctrl_lock, flags); } /** * n_tty_flush_buffer - clean input queue * @tty: terminal device * * Flush the input buffer. Called when the tty layer wants the * buffer flushed (eg at hangup) or when the N_TTY line discipline * internally has to clean the pending queue (for example some signals). * * Holds termios_rwsem to exclude producer/consumer while * buffer indices are reset. * * Locking: ctrl_lock, exclusive termios_rwsem */ static void n_tty_flush_buffer(struct tty_struct *tty) { down_write(&tty->termios_rwsem); reset_buffer_flags(tty->disc_data); n_tty_set_room(tty); if (tty->link) n_tty_packet_mode_flush(tty); up_write(&tty->termios_rwsem); } /** * n_tty_chars_in_buffer - report available bytes * @tty: tty device * * Report the number of characters buffered to be delivered to user * at this instant in time. * * Locking: exclusive termios_rwsem */ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty) { ssize_t n; WARN_ONCE(1, "%s is deprecated and scheduled for removal.", __func__); down_write(&tty->termios_rwsem); n = chars_in_buffer(tty); up_write(&tty->termios_rwsem); return n; } /** * is_utf8_continuation - utf8 multibyte check * @c: byte to check * * Returns true if the utf8 character 'c' is a multibyte continuation * character. We use this to correctly compute the on screen size * of the character when printing */ static inline int is_utf8_continuation(unsigned char c) { return (c & 0xc0) == 0x80; } /** * is_continuation - multibyte check * @c: byte to check * * Returns true if the utf8 character 'c' is a multibyte continuation * character and the terminal is in unicode mode. */ static inline int is_continuation(unsigned char c, struct tty_struct *tty) { return I_IUTF8(tty) && is_utf8_continuation(c); } /** * do_output_char - output one character * @c: character (or partial unicode symbol) * @tty: terminal device * @space: space available in tty driver write buffer * * This is a helper function that handles one output character * (including special characters like TAB, CR, LF, etc.), * doing OPOST processing and putting the results in the * tty driver's write buffer. * * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY * and NLDLY. They simply aren't relevant in the world today. * If you ever need them, add them here. * * Returns the number of bytes of buffer space used or -1 if * no space left. * * Locking: should be called under the output_lock to protect * the column state and space left in the buffer */ static int do_output_char(unsigned char c, struct tty_struct *tty, int space) { struct n_tty_data *ldata = tty->disc_data; int spaces; if (!space) return -1; switch (c) { case '\n': if (O_ONLRET(tty)) ldata->column = 0; if (O_ONLCR(tty)) { if (space < 2) return -1; ldata->canon_column = ldata->column = 0; tty->ops->write(tty, "\r\n", 2); return 2; } ldata->canon_column = ldata->column; break; case '\r': if (O_ONOCR(tty) && ldata->column == 0) return 0; if (O_OCRNL(tty)) { c = '\n'; if (O_ONLRET(tty)) ldata->canon_column = ldata->column = 0; break; } ldata->canon_column = ldata->column = 0; break; case '\t': spaces = 8 - (ldata->column & 7); if (O_TABDLY(tty) == XTABS) { if (space < spaces) return -1; ldata->column += spaces; tty->ops->write(tty, " ", spaces); return spaces; } ldata->column += spaces; break; case '\b': if (ldata->column > 0) ldata->column--; break; default: if (!iscntrl(c)) { if (O_OLCUC(tty)) c = toupper(c); if (!is_continuation(c, tty)) ldata->column++; } break; } tty_put_char(tty, c); return 1; } /** * process_output - output post processor * @c: character (or partial unicode symbol) * @tty: terminal device * * Output one character with OPOST processing. * Returns -1 when the output device is full and the character * must be retried. * * Locking: output_lock to protect column state and space left * (also, this is called from n_tty_write under the * tty layer write lock) */ static int process_output(unsigned char c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; int space, retval; mutex_lock(&ldata->output_lock); space = tty_write_room(tty); retval = do_output_char(c, tty, space); mutex_unlock(&ldata->output_lock); if (retval < 0) return -1; else return 0; } /** * process_output_block - block post processor * @tty: terminal device * @buf: character buffer * @nr: number of bytes to output * * Output a block of characters with OPOST processing. * Returns the number of characters output. * * This path is used to speed up block console writes, among other * things when processing blocks of output data. It handles only * the simple cases normally found and helps to generate blocks of * symbols for the console driver and thus improve performance. * * Locking: output_lock to protect column state and space left * (also, this is called from n_tty_write under the * tty layer write lock) */ static ssize_t process_output_block(struct tty_struct *tty, const unsigned char *buf, unsigned int nr) { struct n_tty_data *ldata = tty->disc_data; int space; int i; const unsigned char *cp; mutex_lock(&ldata->output_lock); space = tty_write_room(tty); if (!space) { mutex_unlock(&ldata->output_lock); return 0; } if (nr > space) nr = space; for (i = 0, cp = buf; i < nr; i++, cp++) { unsigned char c = *cp; switch (c) { case '\n': if (O_ONLRET(tty)) ldata->column = 0; if (O_ONLCR(tty)) goto break_out; ldata->canon_column = ldata->column; break; case '\r': if (O_ONOCR(tty) && ldata->column == 0) goto break_out; if (O_OCRNL(tty)) goto break_out; ldata->canon_column = ldata->column = 0; break; case '\t': goto break_out; case '\b': if (ldata->column > 0) ldata->column--; break; default: if (!iscntrl(c)) { if (O_OLCUC(tty)) goto break_out; if (!is_continuation(c, tty)) ldata->column++; } break; } } break_out: i = tty->ops->write(tty, buf, i); mutex_unlock(&ldata->output_lock); return i; } /** * process_echoes - write pending echo characters * @tty: terminal device * * Write previously buffered echo (and other ldisc-generated) * characters to the tty. * * Characters generated by the ldisc (including echoes) need to * be buffered because the driver's write buffer can fill during * heavy program output. Echoing straight to the driver will * often fail under these conditions, causing lost characters and * resulting mismatches of ldisc state information. * * Since the ldisc state must represent the characters actually sent * to the driver at the time of the write, operations like certain * changes in column state are also saved in the buffer and executed * here. * * A circular fifo buffer is used so that the most recent characters * are prioritized. Also, when control characters are echoed with a * prefixed "^", the pair is treated atomically and thus not separated. * * Locking: callers must hold output_lock */ static size_t __process_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; int space, old_space; size_t tail; unsigned char c; old_space = space = tty_write_room(tty); tail = ldata->echo_tail; while (ldata->echo_commit != tail) { c = echo_buf(ldata, tail); if (c == ECHO_OP_START) { unsigned char op; int no_space_left = 0; /* * If the buffer byte is the start of a multi-byte * operation, get the next byte, which is either the * op code or a control character value. */ op = echo_buf(ldata, tail + 1); switch (op) { unsigned int num_chars, num_bs; case ECHO_OP_ERASE_TAB: num_chars = echo_buf(ldata, tail + 2); /* * Determine how many columns to go back * in order to erase the tab. * This depends on the number of columns * used by other characters within the tab * area. If this (modulo 8) count is from * the start of input rather than from a * previous tab, we offset by canon column. * Otherwise, tab spacing is normal. */ if (!(num_chars & 0x80)) num_chars += ldata->canon_column; num_bs = 8 - (num_chars & 7); if (num_bs > space) { no_space_left = 1; break; } space -= num_bs; while (num_bs--) { tty_put_char(tty, '\b'); if (ldata->column > 0) ldata->column--; } tail += 3; break; case ECHO_OP_SET_CANON_COL: ldata->canon_column = ldata->column; tail += 2; break; case ECHO_OP_MOVE_BACK_COL: if (ldata->column > 0) ldata->column--; tail += 2; break; case ECHO_OP_START: /* This is an escaped echo op start code */ if (!space) { no_space_left = 1; break; } tty_put_char(tty, ECHO_OP_START); ldata->column++; space--; tail += 2; break; default: /* * If the op is not a special byte code, * it is a ctrl char tagged to be echoed * as "^X" (where X is the letter * representing the control char). * Note that we must ensure there is * enough space for the whole ctrl pair. * */ if (space < 2) { no_space_left = 1; break; } tty_put_char(tty, '^'); tty_put_char(tty, op ^ 0100); ldata->column += 2; space -= 2; tail += 2; } if (no_space_left) break; } else { if (O_OPOST(tty)) { int retval = do_output_char(c, tty, space); if (retval < 0) break; space -= retval; } else { if (!space) break; tty_put_char(tty, c); space -= 1; } tail += 1; } } /* If the echo buffer is nearly full (so that the possibility exists * of echo overrun before the next commit), then discard enough * data at the tail to prevent a subsequent overrun */ while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { if (echo_buf(ldata, tail) == ECHO_OP_START) { if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) tail += 3; else tail += 2; } else tail++; } ldata->echo_tail = tail; return old_space - space; } static void commit_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; size_t nr, old, echoed; size_t head; head = ldata->echo_head; ldata->echo_mark = head; old = ldata->echo_commit - ldata->echo_tail; /* Process committed echoes if the accumulated # of bytes * is over the threshold (and try again each time another * block is accumulated) */ nr = head - ldata->echo_tail; if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK)) return; mutex_lock(&ldata->output_lock); ldata->echo_commit = head; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); if (echoed && tty->ops->flush_chars) tty->ops->flush_chars(tty); } static void process_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; size_t echoed; if ((!L_ECHO(tty) && !L_ECHONL(tty)) || ldata->echo_mark == ldata->echo_tail) return; mutex_lock(&ldata->output_lock); ldata->echo_commit = ldata->echo_mark; echoed = __process_echoes(tty); mutex_unlock(&ldata->output_lock); if (echoed && tty->ops->flush_chars) tty->ops->flush_chars(tty); } /* NB: echo_mark and echo_head should be equivalent here */ static void flush_echoes(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if ((!L_ECHO(tty) && !L_ECHONL(tty)) || ldata->echo_commit == ldata->echo_head) return; mutex_lock(&ldata->output_lock); ldata->echo_commit = ldata->echo_head; __process_echoes(tty); mutex_unlock(&ldata->output_lock); } /** * add_echo_byte - add a byte to the echo buffer * @c: unicode byte to echo * @ldata: n_tty data * * Add a character or operation byte to the echo buffer. */ static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata) { *echo_buf_addr(ldata, ldata->echo_head++) = c; } /** * echo_move_back_col - add operation to move back a column * @ldata: n_tty data * * Add an operation to the echo buffer to move back one column. */ static void echo_move_back_col(struct n_tty_data *ldata) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_MOVE_BACK_COL, ldata); } /** * echo_set_canon_col - add operation to set the canon column * @ldata: n_tty data * * Add an operation to the echo buffer to set the canon column * to the current column. */ static void echo_set_canon_col(struct n_tty_data *ldata) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_SET_CANON_COL, ldata); } /** * echo_erase_tab - add operation to erase a tab * @num_chars: number of character columns already used * @after_tab: true if num_chars starts after a previous tab * @ldata: n_tty data * * Add an operation to the echo buffer to erase a tab. * * Called by the eraser function, which knows how many character * columns have been used since either a previous tab or the start * of input. This information will be used later, along with * canon column (if applicable), to go back the correct number * of columns. */ static void echo_erase_tab(unsigned int num_chars, int after_tab, struct n_tty_data *ldata) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_ERASE_TAB, ldata); /* We only need to know this modulo 8 (tab spacing) */ num_chars &= 7; /* Set the high bit as a flag if num_chars is after a previous tab */ if (after_tab) num_chars |= 0x80; add_echo_byte(num_chars, ldata); } /** * echo_char_raw - echo a character raw * @c: unicode byte to echo * @tty: terminal device * * Echo user input back onto the screen. This must be called only when * L_ECHO(tty) is true. Called from the driver receive_buf path. * * This variant does not treat control characters specially. */ static void echo_char_raw(unsigned char c, struct n_tty_data *ldata) { if (c == ECHO_OP_START) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_START, ldata); } else { add_echo_byte(c, ldata); } } /** * echo_char - echo a character * @c: unicode byte to echo * @tty: terminal device * * Echo user input back onto the screen. This must be called only when * L_ECHO(tty) is true. Called from the driver receive_buf path. * * This variant tags control characters to be echoed as "^X" * (where X is the letter representing the control char). */ static void echo_char(unsigned char c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (c == ECHO_OP_START) { add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(ECHO_OP_START, ldata); } else { if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') add_echo_byte(ECHO_OP_START, ldata); add_echo_byte(c, ldata); } } /** * finish_erasing - complete erase * @ldata: n_tty data */ static inline void finish_erasing(struct n_tty_data *ldata) { if (ldata->erasing) { echo_char_raw('/', ldata); ldata->erasing = 0; } } /** * eraser - handle erase function * @c: character input * @tty: terminal device * * Perform erase and necessary output when an erase character is * present in the stream from the driver layer. Handles the complexities * of UTF-8 multibyte symbols. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * modifies read_head * * Modifying the read_head is not considered a publish in this context * because canonical mode is active -- only canon_head publishes */ static void eraser(unsigned char c, struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; enum { ERASE, WERASE, KILL } kill_type; size_t head; size_t cnt; int seen_alnums; if (ldata->read_head == ldata->canon_head) { /* process_output('\a', tty); */ /* what do you think? */ return; } if (c == ERASE_CHAR(tty)) kill_type = ERASE; else if (c == WERASE_CHAR(tty)) kill_type = WERASE; else { if (!L_ECHO(tty)) { ldata->read_head = ldata->canon_head; return; } if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) { ldata->read_head = ldata->canon_head; finish_erasing(ldata); echo_char(KILL_CHAR(tty), tty); /* Add a newline if ECHOK is on and ECHOKE is off. */ if (L_ECHOK(tty)) echo_char_raw('\n', ldata); return; } kill_type = KILL; } seen_alnums = 0; while (ldata->read_head != ldata->canon_head) { head = ldata->read_head; /* erase a single possibly multibyte character */ do { head--; c = read_buf(ldata, head); } while (is_continuation(c, tty) && head != ldata->canon_head); /* do not partially erase */ if (is_continuation(c, tty)) break; if (kill_type == WERASE) { /* Equivalent to BSD's ALTWERASE. */ if (isalnum(c) || c == '_') seen_alnums++; else if (seen_alnums) break; } cnt = ldata->read_head - head; ldata->read_head = head; if (L_ECHO(tty)) { if (L_ECHOPRT(tty)) { if (!ldata->erasing) { echo_char_raw('\\', ldata); ldata->erasing = 1; } /* if cnt > 1, output a multi-byte character */ echo_char(c, tty); while (--cnt > 0) { head++; echo_char_raw(read_buf(ldata, head), ldata); echo_move_back_col(ldata); } } else if (kill_type == ERASE && !L_ECHOE(tty)) { echo_char(ERASE_CHAR(tty), tty); } else if (c == '\t') { unsigned int num_chars = 0; int after_tab = 0; size_t tail = ldata->read_head; /* * Count the columns used for characters * since the start of input or after a * previous tab. * This info is used to go back the correct * number of columns. */ while (tail != ldata->canon_head) { tail--; c = read_buf(ldata, tail); if (c == '\t') { after_tab = 1; break; } else if (iscntrl(c)) { if (L_ECHOCTL(tty)) num_chars += 2; } else if (!is_continuation(c, tty)) { num_chars++; } } echo_erase_tab(num_chars, after_tab, ldata); } else { if (iscntrl(c) && L_ECHOCTL(tty)) { echo_char_raw('\b', ldata); echo_char_raw(' ', ldata); echo_char_raw('\b', ldata); } if (!iscntrl(c) || L_ECHOCTL(tty)) { echo_char_raw('\b', ldata); echo_char_raw(' ', ldata); echo_char_raw('\b', ldata); } } } if (kill_type == ERASE) break; } if (ldata->read_head == ldata->canon_head && L_ECHO(tty)) finish_erasing(ldata); } /** * isig - handle the ISIG optio * @sig: signal * @tty: terminal * * Called when a signal is being sent due to terminal input. * Called from the driver receive_buf path so serialized. * * Locking: ctrl_lock */ static void isig(int sig, struct tty_struct *tty) { struct pid *tty_pgrp = tty_get_pgrp(tty); if (tty_pgrp) { kill_pgrp(tty_pgrp, sig, 1); put_pid(tty_pgrp); } } /** * n_tty_receive_break - handle break * @tty: terminal * * An RS232 break event has been hit in the incoming bitstream. This * can cause a variety of events depending upon the termios settings. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * publishes read_head via put_tty_queue() * * Note: may get exclusive termios_rwsem if flushing input buffer */ static void n_tty_receive_break(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (I_IGNBRK(tty)) return; if (I_BRKINT(tty)) { isig(SIGINT, tty); if (!L_NOFLSH(tty)) { /* flushing needs exclusive termios_rwsem */ up_read(&tty->termios_rwsem); n_tty_flush_buffer(tty); tty_driver_flush_buffer(tty); down_read(&tty->termios_rwsem); } return; } if (I_PARMRK(tty)) { put_tty_queue('\377', ldata); put_tty_queue('\0', ldata); } put_tty_queue('\0', ldata); wake_up_interruptible(&tty->read_wait); } /** * n_tty_receive_overrun - handle overrun reporting * @tty: terminal * * Data arrived faster than we could process it. While the tty * driver has flagged this the bits that were missed are gone * forever. * * Called from the receive_buf path so single threaded. Does not * need locking as num_overrun and overrun_time are function * private. */ static void n_tty_receive_overrun(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; char buf[64]; ldata->num_overrun++; if (time_after(jiffies, ldata->overrun_time + HZ) || time_after(ldata->overrun_time, jiffies)) { printk(KERN_WARNING "%s: %d input overrun(s)\n", tty_name(tty, buf), ldata->num_overrun); ldata->overrun_time = jiffies; ldata->num_overrun = 0; } } /** * n_tty_receive_parity_error - error notifier * @tty: terminal device * @c: character * * Process a parity error and queue the right data to indicate * the error case if necessary. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * publishes read_head via put_tty_queue() */ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c) { struct n_tty_data *ldata = tty->disc_data; if (I_IGNPAR(tty)) return; if (I_PARMRK(tty)) { put_tty_queue('\377', ldata); put_tty_queue('\0', ldata); put_tty_queue(c, ldata); } else if (I_INPCK(tty)) put_tty_queue('\0', ldata); else put_tty_queue(c, ldata); wake_up_interruptible(&tty->read_wait); } static void n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c) { if (!L_NOFLSH(tty)) { /* flushing needs exclusive termios_rwsem */ up_read(&tty->termios_rwsem); n_tty_flush_buffer(tty); tty_driver_flush_buffer(tty); down_read(&tty->termios_rwsem); } if (I_IXON(tty)) start_tty(tty); if (L_ECHO(tty)) { echo_char(c, tty); commit_echoes(tty); } isig(signal, tty); return; } /** * n_tty_receive_char - perform processing * @tty: terminal device * @c: character * * Process an individual character of input received from the driver. * This is serialized with respect to itself by the rules for the * driver above. * * n_tty_receive_buf()/producer path: * caller holds non-exclusive termios_rwsem * publishes canon_head if canonical mode is active * otherwise, publishes read_head via put_tty_queue() * * Returns 1 if LNEXT was received, else returns 0 */ static int n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) { struct n_tty_data *ldata = tty->disc_data; int parmrk; if (I_IXON(tty)) { if (c == START_CHAR(tty)) { start_tty(tty); commit_echoes(tty); return 0; } if (c == STOP_CHAR(tty)) { stop_tty(tty); return 0; } } if (L_ISIG(tty)) { if (c == INTR_CHAR(tty)) { n_tty_receive_signal_char(tty, SIGINT, c); return 0; } else if (c == QUIT_CHAR(tty)) { n_tty_receive_signal_char(tty, SIGQUIT, c); return 0; } else if (c == SUSP_CHAR(tty)) { n_tty_receive_signal_char(tty, SIGTSTP, c); return 0; } } if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) { start_tty(tty); process_echoes(tty); } if (c == '\r') { if (I_IGNCR(tty)) return 0; if (I_ICRNL(tty)) c = '\n'; } else if (c == '\n' && I_INLCR(tty)) c = '\r'; if (ldata->icanon) { if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { eraser(c, tty); commit_echoes(tty); return 0; } if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) { ldata->lnext = 1; if (L_ECHO(tty)) { finish_erasing(ldata); if (L_ECHOCTL(tty)) { echo_char_raw('^', ldata); echo_char_raw('\b', ldata); commit_echoes(tty); } } return 1; } if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) { size_t tail = ldata->canon_head; finish_erasing(ldata); echo_char(c, tty); echo_char_raw('\n', ldata); while (tail != ldata->read_head) { echo_char(read_buf(ldata, tail), tty); tail++; } commit_echoes(tty); return 0; } if (c == '\n') { if (L_ECHO(tty) || L_ECHONL(tty)) { echo_char_raw('\n', ldata); commit_echoes(tty); } goto handle_newline; } if (c == EOF_CHAR(tty)) { c = __DISABLED_CHAR; goto handle_newline; } if ((c == EOL_CHAR(tty)) || (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) { parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0; /* * XXX are EOL_CHAR and EOL2_CHAR echoed?!? */ if (L_ECHO(tty)) { /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); commit_echoes(tty); } /* * XXX does PARMRK doubling happen for * EOL_CHAR and EOL2_CHAR? */ if (parmrk) put_tty_queue(c, ldata); handle_newline: set_bit(ldata->read_head & (N_TTY_BUF_SIZE - 1), ldata->read_flags); put_tty_queue(c, ldata); ldata->canon_head = ldata->read_head; kill_fasync(&tty->fasync, SIGIO, POLL_IN); if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); return 0; } } parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0; if (L_ECHO(tty)) { finish_erasing(ldata); if (c == '\n') echo_char_raw('\n', ldata); else { /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); } commit_echoes(tty); } if (parmrk) put_tty_queue(c, ldata); put_tty_queue(c, ldata); return 0; } static inline void n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c) { struct n_tty_data *ldata = tty->disc_data; int parmrk; if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) { start_tty(tty); process_echoes(tty); } if (L_ECHO(tty)) { finish_erasing(ldata); /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); commit_echoes(tty); } parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0; if (parmrk) put_tty_queue(c, ldata); put_tty_queue(c, ldata); } static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) { n_tty_receive_char_inline(tty, c); } static inline void n_tty_receive_char_fast(struct tty_struct *tty, unsigned char c) { struct n_tty_data *ldata = tty->disc_data; if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) { start_tty(tty); process_echoes(tty); } if (L_ECHO(tty)) { finish_erasing(ldata); /* Record the column of first canon char. */ if (ldata->canon_head == ldata->read_head) echo_set_canon_col(ldata); echo_char(c, tty); commit_echoes(tty); } put_tty_queue(c, ldata); } static inline void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c) { if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); if (I_IXON(tty)) { if (c == STOP_CHAR(tty)) stop_tty(tty); else if (c == START_CHAR(tty) || (tty->stopped && !tty->flow_stopped && I_IXANY(tty) && c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) && c != SUSP_CHAR(tty))) { start_tty(tty); process_echoes(tty); } } } static void n_tty_receive_char_flagged(struct tty_struct *tty, unsigned char c, char flag) { char buf[64]; switch (flag) { case TTY_BREAK: n_tty_receive_break(tty); break; case TTY_PARITY: case TTY_FRAME: n_tty_receive_parity_error(tty, c); break; case TTY_OVERRUN: n_tty_receive_overrun(tty); break; default: printk(KERN_ERR "%s: unknown flag %d\n", tty_name(tty, buf), flag); break; } } static void n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag) { struct n_tty_data *ldata = tty->disc_data; ldata->lnext = 0; if (likely(flag == TTY_NORMAL)) { if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); n_tty_receive_char(tty, c); } else n_tty_receive_char_flagged(tty, c, flag); } /** * n_tty_receive_buf - data receive * @tty: terminal device * @cp: buffer * @fp: flag buffer * @count: characters * * Called by the terminal driver when a block of characters has * been received. This function must be called from soft contexts * not from interrupt context. The driver is responsible for making * calls one at a time and in order (or using flush_to_ldisc) * * n_tty_receive_buf()/producer path: * claims non-exclusive termios_rwsem * publishes read_head and canon_head */ static void n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; size_t n, head; head = ldata->read_head & (N_TTY_BUF_SIZE - 1); n = N_TTY_BUF_SIZE - max(read_cnt(ldata), head); n = min_t(size_t, count, n); memcpy(read_buf_addr(ldata, head), cp, n); ldata->read_head += n; cp += n; count -= n; head = ldata->read_head & (N_TTY_BUF_SIZE - 1); n = N_TTY_BUF_SIZE - max(read_cnt(ldata), head); n = min_t(size_t, count, n); memcpy(read_buf_addr(ldata, head), cp, n); ldata->read_head += n; } static void n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; char flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) put_tty_queue(*cp++, ldata); else n_tty_receive_char_flagged(tty, *cp++, flag); } } static void n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { char flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) n_tty_receive_char_closing(tty, *cp++); else n_tty_receive_char_flagged(tty, *cp++, flag); } } static void n_tty_receive_buf_standard(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; char flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) { unsigned char c = *cp++; if (I_ISTRIP(tty)) c &= 0x7f; if (I_IUCLC(tty) && L_IEXTEN(tty)) c = tolower(c); if (L_EXTPROC(tty)) { put_tty_queue(c, ldata); continue; } if (!test_bit(c, ldata->char_map)) n_tty_receive_char_inline(tty, c); else if (n_tty_receive_char_special(tty, c) && count) { if (fp) flag = *fp++; n_tty_receive_char_lnext(tty, *cp++, flag); count--; } } else n_tty_receive_char_flagged(tty, *cp++, flag); } } static void n_tty_receive_buf_fast(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; char flag = TTY_NORMAL; while (count--) { if (fp) flag = *fp++; if (likely(flag == TTY_NORMAL)) { unsigned char c = *cp++; if (!test_bit(c, ldata->char_map)) n_tty_receive_char_fast(tty, c); else if (n_tty_receive_char_special(tty, c) && count) { if (fp) flag = *fp++; n_tty_receive_char_lnext(tty, *cp++, flag); count--; } } else n_tty_receive_char_flagged(tty, *cp++, flag); } } static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty)); if (ldata->real_raw) n_tty_receive_buf_real_raw(tty, cp, fp, count); else if (ldata->raw || (L_EXTPROC(tty) && !preops)) n_tty_receive_buf_raw(tty, cp, fp, count); else if (tty->closing && !L_EXTPROC(tty)) n_tty_receive_buf_closing(tty, cp, fp, count); else { if (ldata->lnext) { char flag = TTY_NORMAL; if (fp) flag = *fp++; n_tty_receive_char_lnext(tty, *cp++, flag); count--; } if (!preops && !I_PARMRK(tty)) n_tty_receive_buf_fast(tty, cp, fp, count); else n_tty_receive_buf_standard(tty, cp, fp, count); flush_echoes(tty); if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } if ((!ldata->icanon && (read_cnt(ldata) >= ldata->minimum_to_wake)) || L_EXTPROC(tty)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); if (waitqueue_active(&tty->read_wait)) wake_up_interruptible(&tty->read_wait); } } static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { int room, n; down_read(&tty->termios_rwsem); while (1) { room = receive_room(tty); n = min(count, room); if (!n) break; __receive_buf(tty, cp, fp, n); cp += n; if (fp) fp += n; count -= n; } tty->receive_room = room; n_tty_check_throttle(tty); up_read(&tty->termios_rwsem); } static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct n_tty_data *ldata = tty->disc_data; int room, n, rcvd = 0; down_read(&tty->termios_rwsem); while (1) { room = receive_room(tty); n = min(count, room); if (!n) { if (!room) ldata->no_room = 1; break; } __receive_buf(tty, cp, fp, n); cp += n; if (fp) fp += n; count -= n; rcvd += n; } tty->receive_room = room; n_tty_check_throttle(tty); up_read(&tty->termios_rwsem); return rcvd; } int is_ignored(int sig) { return (sigismember(&current->blocked, sig) || current->sighand->action[sig-1].sa.sa_handler == SIG_IGN); } /** * n_tty_set_termios - termios data changed * @tty: terminal * @old: previous data * * Called by the tty layer when the user changes termios flags so * that the line discipline can plan ahead. This function cannot sleep * and is protected from re-entry by the tty layer. The user is * guaranteed that this function will not be re-entered or in progress * when the ldisc is closed. * * Locking: Caller holds tty->termios_rwsem */ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) { struct n_tty_data *ldata = tty->disc_data; int canon_change = 1; if (old) canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON; if (canon_change) { bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); ldata->line_start = ldata->canon_head = ldata->read_tail; ldata->erasing = 0; ldata->lnext = 0; } if (canon_change && !L_ICANON(tty) && read_cnt(ldata)) wake_up_interruptible(&tty->read_wait); ldata->icanon = (L_ICANON(tty) != 0); if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) || I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) || I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) || I_PARMRK(tty)) { bitmap_zero(ldata->char_map, 256); if (I_IGNCR(tty) || I_ICRNL(tty)) set_bit('\r', ldata->char_map); if (I_INLCR(tty)) set_bit('\n', ldata->char_map); if (L_ICANON(tty)) { set_bit(ERASE_CHAR(tty), ldata->char_map); set_bit(KILL_CHAR(tty), ldata->char_map); set_bit(EOF_CHAR(tty), ldata->char_map); set_bit('\n', ldata->char_map); set_bit(EOL_CHAR(tty), ldata->char_map); if (L_IEXTEN(tty)) { set_bit(WERASE_CHAR(tty), ldata->char_map); set_bit(LNEXT_CHAR(tty), ldata->char_map); set_bit(EOL2_CHAR(tty), ldata->char_map); if (L_ECHO(tty)) set_bit(REPRINT_CHAR(tty), ldata->char_map); } } if (I_IXON(tty)) { set_bit(START_CHAR(tty), ldata->char_map); set_bit(STOP_CHAR(tty), ldata->char_map); } if (L_ISIG(tty)) { set_bit(INTR_CHAR(tty), ldata->char_map); set_bit(QUIT_CHAR(tty), ldata->char_map); set_bit(SUSP_CHAR(tty), ldata->char_map); } clear_bit(__DISABLED_CHAR, ldata->char_map); ldata->raw = 0; ldata->real_raw = 0; } else { ldata->raw = 1; if ((I_IGNBRK(tty) || (!I_BRKINT(tty) && !I_PARMRK(tty))) && (I_IGNPAR(tty) || !I_INPCK(tty)) && (tty->driver->flags & TTY_DRIVER_REAL_RAW)) ldata->real_raw = 1; else ldata->real_raw = 0; } n_tty_set_room(tty); /* * Fix tty hang when I_IXON(tty) is cleared, but the tty * been stopped by STOP_CHAR(tty) before it. */ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { start_tty(tty); } /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->read_wait); } /** * n_tty_close - close the ldisc for this tty * @tty: device * * Called from the terminal layer when this line discipline is * being shut down, either because of a close or becsuse of a * discipline change. The function will not be called while other * ldisc methods are in progress. */ static void n_tty_close(struct tty_struct *tty) { struct n_tty_data *ldata = tty->disc_data; if (tty->link) n_tty_packet_mode_flush(tty); vfree(ldata); tty->disc_data = NULL; } /** * n_tty_open - open an ldisc * @tty: terminal to open * * Called when this line discipline is being attached to the * terminal device. Can sleep. Called serialized so that no * other events will occur in parallel. No further open will occur * until a close. */ static int n_tty_open(struct tty_struct *tty) { struct n_tty_data *ldata; /* Currently a malloc failure here can panic */ ldata = vmalloc(sizeof(*ldata)); if (!ldata) goto err; ldata->overrun_time = jiffies; mutex_init(&ldata->atomic_read_lock); mutex_init(&ldata->output_lock); tty->disc_data = ldata; reset_buffer_flags(tty->disc_data); ldata->column = 0; ldata->canon_column = 0; ldata->minimum_to_wake = 1; ldata->num_overrun = 0; ldata->no_room = 0; ldata->lnext = 0; tty->closing = 0; /* indicate buffer work may resume */ clear_bit(TTY_LDISC_HALTED, &tty->flags); n_tty_set_termios(tty, NULL); tty_unthrottle(tty); return 0; err: return -ENOMEM; } static inline int input_available_p(struct tty_struct *tty, int amt) { struct n_tty_data *ldata = tty->disc_data; if (ldata->icanon && !L_EXTPROC(tty)) { if (ldata->canon_head != ldata->read_tail) return 1; } else if (read_cnt(ldata) >= (amt ? amt : 1)) return 1; return 0; } /** * copy_from_read_buf - copy read data directly * @tty: terminal device * @b: user data * @nr: size of data * * Helper function to speed up n_tty_read. It is only called when * ICANON is off; it copies characters straight from the tty queue to * user space directly. It can be profitably called twice; once to * drain the space from the tail pointer to the (physical) end of the * buffer, and once to drain the space from the (physical) beginning of * the buffer to head pointer. * * Called under the ldata->atomic_read_lock sem * * n_tty_read()/consumer path: * caller holds non-exclusive termios_rwsem * read_tail published */ static int copy_from_read_buf(struct tty_struct *tty, unsigned char __user **b, size_t *nr) { struct n_tty_data *ldata = tty->disc_data; int retval; size_t n; bool is_eof; size_t tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); retval = 0; n = min(read_cnt(ldata), N_TTY_BUF_SIZE - tail); n = min(*nr, n); if (n) { retval = copy_to_user(*b, read_buf_addr(ldata, tail), n); n -= retval; is_eof = n == 1 && read_buf(ldata, tail) == EOF_CHAR(tty); tty_audit_add_data(tty, read_buf_addr(ldata, tail), n, ldata->icanon); ldata->read_tail += n; /* Turn single EOF into zero-length read */ if (L_EXTPROC(tty) && ldata->icanon && is_eof && !read_cnt(ldata)) n = 0; *b += n; *nr -= n; } return retval; } /** * canon_copy_from_read_buf - copy read data in canonical mode * @tty: terminal device * @b: user data * @nr: size of data * * Helper function for n_tty_read. It is only called when ICANON is on; * it copies one line of input up to and including the line-delimiting * character into the user-space buffer. * * Called under the atomic_read_lock mutex * * n_tty_read()/consumer path: * caller holds non-exclusive termios_rwsem * read_tail published */ static int canon_copy_from_read_buf(struct tty_struct *tty, unsigned char __user **b, size_t *nr) { struct n_tty_data *ldata = tty->disc_data; size_t n, size, more, c; size_t eol; size_t tail; int ret, found = 0; bool eof_push = 0; /* N.B. avoid overrun if nr == 0 */ n = min(*nr, read_cnt(ldata)); if (!n) return 0; tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); n_tty_trace("%s: nr:%zu tail:%zu n:%zu size:%zu\n", __func__, *nr, tail, n, size); eol = find_next_bit(ldata->read_flags, size, tail); more = n - (size - tail); if (eol == N_TTY_BUF_SIZE && more) { /* scan wrapped without finding set bit */ eol = find_next_bit(ldata->read_flags, more, 0); if (eol != more) found = 1; } else if (eol != size) found = 1; size = N_TTY_BUF_SIZE - tail; n = eol - tail; if (n > 4096) n += 4096; n += found; c = n; if (found && read_buf(ldata, eol) == __DISABLED_CHAR) { n--; eof_push = !n && ldata->read_tail != ldata->line_start; } n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", __func__, eol, found, n, c, size, more); if (n > size) { ret = copy_to_user(*b, read_buf_addr(ldata, tail), size); if (ret) return -EFAULT; ret = copy_to_user(*b + size, ldata->read_buf, n - size); } else ret = copy_to_user(*b, read_buf_addr(ldata, tail), n); if (ret) return -EFAULT; *b += n; *nr -= n; if (found) clear_bit(eol, ldata->read_flags); smp_mb__after_clear_bit(); ldata->read_tail += c; if (found) { ldata->line_start = ldata->read_tail; tty_audit_push(tty); } return eof_push ? -EAGAIN : 0; } extern ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *); /** * job_control - check job control * @tty: tty * @file: file handle * * Perform job control management checks on this file/tty descriptor * and if appropriate send any needed signals and return a negative * error code if action should be taken. * * Locking: redirected write test is safe * current->signal->tty check is safe * ctrl_lock to safely reference tty->pgrp */ static int job_control(struct tty_struct *tty, struct file *file) { /* Job control check -- must be done at start and after every sleep (POSIX.1 7.1.1.4). */ /* NOTE: not yet done after every sleep pending a thorough check of the logic of this change. -- jlc */ /* don't stop on /dev/console */ if (file->f_op->write == redirected_tty_write || current->signal->tty != tty) return 0; spin_lock_irq(&tty->ctrl_lock); if (!tty->pgrp) printk(KERN_ERR "n_tty_read: no tty->pgrp!\n"); else if (task_pgrp(current) != tty->pgrp) { spin_unlock_irq(&tty->ctrl_lock); if (is_ignored(SIGTTIN) || is_current_pgrp_orphaned()) return -EIO; kill_pgrp(task_pgrp(current), SIGTTIN, 1); set_thread_flag(TIF_SIGPENDING); return -ERESTARTSYS; } spin_unlock_irq(&tty->ctrl_lock); return 0; } /** * n_tty_read - read function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Perform reads for the line discipline. We are guaranteed that the * line discipline will not be closed under us but we may get multiple * parallel readers and must handle this ourselves. We may also get * a hangup. Always called in user context, may sleep. * * This code must be sure never to sleep through a hangup. * * n_tty_read()/consumer path: * claims non-exclusive termios_rwsem * publishes read_tail */ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, unsigned char __user *buf, size_t nr) { struct n_tty_data *ldata = tty->disc_data; unsigned char __user *b = buf; DECLARE_WAITQUEUE(wait, current); int c; int minimum, time; ssize_t retval = 0; long timeout; unsigned long flags; int packet; c = job_control(tty, file); if (c < 0) return c; /* * Internal serialization of reads. */ if (file->f_flags & O_NONBLOCK) { if (!mutex_trylock(&ldata->atomic_read_lock)) return -EAGAIN; } else { if (mutex_lock_interruptible(&ldata->atomic_read_lock)) return -ERESTARTSYS; } down_read(&tty->termios_rwsem); minimum = time = 0; timeout = MAX_SCHEDULE_TIMEOUT; if (!ldata->icanon) { minimum = MIN_CHAR(tty); if (minimum) { time = (HZ / 10) * TIME_CHAR(tty); if (time) ldata->minimum_to_wake = 1; else if (!waitqueue_active(&tty->read_wait) || (ldata->minimum_to_wake > minimum)) ldata->minimum_to_wake = minimum; } else { timeout = (HZ / 10) * TIME_CHAR(tty); ldata->minimum_to_wake = minimum = 1; } } packet = tty->packet; add_wait_queue(&tty->read_wait, &wait); while (nr) { /* First test for status change. */ if (packet && tty->link->ctrl_status) { unsigned char cs; if (b != buf) break; spin_lock_irqsave(&tty->link->ctrl_lock, flags); cs = tty->link->ctrl_status; tty->link->ctrl_status = 0; spin_unlock_irqrestore(&tty->link->ctrl_lock, flags); if (tty_put_user(tty, cs, b++)) { retval = -EFAULT; b--; break; } nr--; break; } /* This statement must be first before checking for input so that any interrupt will set the state back to TASK_RUNNING. */ set_current_state(TASK_INTERRUPTIBLE); if (((minimum - (b - buf)) < ldata->minimum_to_wake) && ((minimum - (b - buf)) >= 1)) ldata->minimum_to_wake = (minimum - (b - buf)); if (!input_available_p(tty, 0)) { if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { up_read(&tty->termios_rwsem); tty_flush_to_ldisc(tty); down_read(&tty->termios_rwsem); if (!input_available_p(tty, 0)) { retval = -EIO; break; } } else { if (tty_hung_up_p(file)) break; if (!timeout) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } n_tty_set_room(tty); up_read(&tty->termios_rwsem); timeout = schedule_timeout(timeout); down_read(&tty->termios_rwsem); continue; } } __set_current_state(TASK_RUNNING); /* Deal with packet mode. */ if (packet && b == buf) { if (tty_put_user(tty, TIOCPKT_DATA, b++)) { retval = -EFAULT; b--; break; } nr--; } if (ldata->icanon && !L_EXTPROC(tty)) { retval = canon_copy_from_read_buf(tty, &b, &nr); if (retval == -EAGAIN) { retval = 0; continue; } else if (retval) break; } else { int uncopied; /* The copy function takes the read lock and handles locking internally for this case */ uncopied = copy_from_read_buf(tty, &b, &nr); uncopied += copy_from_read_buf(tty, &b, &nr); if (uncopied) { retval = -EFAULT; break; } } n_tty_check_unthrottle(tty); if (b - buf >= minimum) break; if (time) timeout = time; } n_tty_set_room(tty); up_read(&tty->termios_rwsem); mutex_unlock(&ldata->atomic_read_lock); remove_wait_queue(&tty->read_wait, &wait); if (!waitqueue_active(&tty->read_wait)) ldata->minimum_to_wake = minimum; __set_current_state(TASK_RUNNING); if (b - buf) retval = b - buf; return retval; } /** * n_tty_write - write function for tty * @tty: tty device * @file: file object * @buf: userspace buffer pointer * @nr: size of I/O * * Write function of the terminal device. This is serialized with * respect to other write callers but not to termios changes, reads * and other such events. Since the receive code will echo characters, * thus calling driver write methods, the output_lock is used in * the output processing functions called here as well as in the * echo processing function to protect the column state and space * left in the buffer. * * This code must be sure never to sleep through a hangup. * * Locking: output_lock to protect column state and space left * (note that the process_output*() functions take this * lock themselves) */ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { const unsigned char *b = buf; DECLARE_WAITQUEUE(wait, current); int c; ssize_t retval = 0; /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */ if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) { retval = tty_check_change(tty); if (retval) return retval; } down_read(&tty->termios_rwsem); /* Write out any echoed characters that are still pending */ process_echoes(tty); add_wait_queue(&tty->write_wait, &wait); while (1) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (tty_hung_up_p(file) || (tty->link && !tty->link->count)) { retval = -EIO; break; } if (O_OPOST(tty)) { while (nr > 0) { ssize_t num = process_output_block(tty, b, nr); if (num < 0) { if (num == -EAGAIN) break; retval = num; goto break_out; } b += num; nr -= num; if (nr == 0) break; c = *b; if (process_output(c, tty) < 0) break; b++; nr--; } if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } else { struct n_tty_data *ldata = tty->disc_data; while (nr > 0) { mutex_lock(&ldata->output_lock); c = tty->ops->write(tty, b, nr); mutex_unlock(&ldata->output_lock); if (c < 0) { retval = c; goto break_out; } if (!c) break; b += c; nr -= c; } } if (!nr) break; if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } up_read(&tty->termios_rwsem); schedule(); down_read(&tty->termios_rwsem); } break_out: __set_current_state(TASK_RUNNING); remove_wait_queue(&tty->write_wait, &wait); if (b - buf != nr && tty->fasync) set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); up_read(&tty->termios_rwsem); return (b - buf) ? b - buf : retval; } /** * n_tty_poll - poll method for N_TTY * @tty: terminal device * @file: file accessing it * @wait: poll table * * Called when the line discipline is asked to poll() for data or * for special events. This code is not serialized with respect to * other events save open/close. * * This code must be sure never to sleep through a hangup. * Called without the kernel lock held - fine */ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) { struct n_tty_data *ldata = tty->disc_data; unsigned int mask = 0; poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); if (input_available_p(tty, TIME_CHAR(tty) ? 0 : MIN_CHAR(tty))) mask |= POLLIN | POLLRDNORM; if (tty->packet && tty->link->ctrl_status) mask |= POLLPRI | POLLIN | POLLRDNORM; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) mask |= POLLHUP; if (tty_hung_up_p(file)) mask |= POLLHUP; if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { if (MIN_CHAR(tty) && !TIME_CHAR(tty)) ldata->minimum_to_wake = MIN_CHAR(tty); else ldata->minimum_to_wake = 1; } if (tty->ops->write && !tty_is_writelocked(tty) && tty_chars_in_buffer(tty) < WAKEUP_CHARS && tty_write_room(tty) > 0) mask |= POLLOUT | POLLWRNORM; return mask; } static unsigned long inq_canon(struct n_tty_data *ldata) { size_t nr, head, tail; if (ldata->canon_head == ldata->read_tail) return 0; head = ldata->canon_head; tail = ldata->read_tail; nr = head - tail; /* Skip EOF-chars.. */ while (head != tail) { if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) && read_buf(ldata, tail) == __DISABLED_CHAR) nr--; tail++; } return nr; } static int n_tty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct n_tty_data *ldata = tty->disc_data; int retval; switch (cmd) { case TIOCOUTQ: return put_user(tty_chars_in_buffer(tty), (int __user *) arg); case TIOCINQ: down_write(&tty->termios_rwsem); if (L_ICANON(tty)) retval = inq_canon(ldata); else retval = read_cnt(ldata); up_write(&tty->termios_rwsem); return put_user(retval, (unsigned int __user *) arg); default: return n_tty_ioctl_helper(tty, file, cmd, arg); } } static void n_tty_fasync(struct tty_struct *tty, int on) { struct n_tty_data *ldata = tty->disc_data; if (!waitqueue_active(&tty->read_wait)) { if (on) ldata->minimum_to_wake = 1; else if (!tty->fasync) ldata->minimum_to_wake = N_TTY_BUF_SIZE; } } struct tty_ldisc_ops tty_ldisc_N_TTY = { .magic = TTY_LDISC_MAGIC, .name = "n_tty", .open = n_tty_open, .close = n_tty_close, .flush_buffer = n_tty_flush_buffer, .chars_in_buffer = n_tty_chars_in_buffer, .read = n_tty_read, .write = n_tty_write, .ioctl = n_tty_ioctl, .set_termios = n_tty_set_termios, .poll = n_tty_poll, .receive_buf = n_tty_receive_buf, .write_wakeup = n_tty_write_wakeup, .fasync = n_tty_fasync, .receive_buf2 = n_tty_receive_buf2, }; /** * n_tty_inherit_ops - inherit N_TTY methods * @ops: struct tty_ldisc_ops where to save N_TTY methods * * Enables a 'subclass' line discipline to 'inherit' N_TTY * methods. */ void n_tty_inherit_ops(struct tty_ldisc_ops *ops) { *ops = tty_ldisc_N_TTY; ops->owner = NULL; ops->refcount = ops->flags = 0; } EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
gpl-2.0
daveti/gcc
libgfortran/generated/_conjg_c4.F90
47
1460
! Copyright (C) 2002-2015 Free Software Foundation, Inc. ! Contributed by Paul Brook <paul@nowt.org> ! !This file is part of the GNU Fortran 95 runtime library (libgfortran). ! !GNU libgfortran is free software; you can redistribute it and/or !modify it under the terms of the GNU General Public !License as published by the Free Software Foundation; either !version 3 of the License, or (at your option) any later version. !GNU libgfortran is distributed in the hope that it will be useful, !but WITHOUT ANY WARRANTY; without even the implied warranty of !MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the !GNU General Public License for more details. ! !Under Section 7 of GPL version 3, you are granted additional !permissions described in the GCC Runtime Library Exception, version !3.1, as published by the Free Software Foundation. ! !You should have received a copy of the GNU General Public License and !a copy of the GCC Runtime Library Exception along with this program; !see the files COPYING3 and COPYING.RUNTIME respectively. If not, see !<http://www.gnu.org/licenses/>. ! !This file is machine generated. #include "config.h" #include "kinds.inc" #include "c99_protos.inc" #if defined (HAVE_GFC_COMPLEX_4) elemental function _gfortran_specific__conjg_4 (parm) complex (kind=4), intent (in) :: parm complex (kind=4) :: _gfortran_specific__conjg_4 _gfortran_specific__conjg_4 = conjg (parm) end function #endif
gpl-2.0
weimenlove/u-boot-1.3.2
board/xilinx/common/xversion.c
303
11422
/****************************************************************************** * * Author: Xilinx, Inc. * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS" AS A * COURTESY TO YOU. BY PROVIDING THIS DESIGN, CODE, OR INFORMATION AS * ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE, APPLICATION OR STANDARD, * XILINX IS MAKING NO REPRESENTATION THAT THIS IMPLEMENTATION IS FREE * FROM ANY CLAIMS OF INFRINGEMENT, AND YOU ARE RESPONSIBLE FOR OBTAINING * ANY THIRD PARTY RIGHTS YOU MAY REQUIRE FOR YOUR IMPLEMENTATION. * XILINX EXPRESSLY DISCLAIMS ANY WARRANTY WHATSOEVER WITH RESPECT TO * THE ADEQUACY OF THE IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY * WARRANTIES OR REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM * CLAIMS OF INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE. * * * Xilinx hardware products are not intended for use in life support * appliances, devices, or systems. Use in such applications is * expressly prohibited. * * * (c) Copyright 2002-2004 Xilinx Inc. * All rights reserved. * * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * ******************************************************************************/ /***************************************************************************** * * This file contains the implementation of the XVersion component. This * component represents a version ID. It is encapsulated within a component * so that it's type and implementation can change without affecting users of * it. * * The version is formatted as X.YYZ where X = 0 - 9, Y = 00 - 99, Z = a - z * X is the major revision, YY is the minor revision, and Z is the * compatability revision. * * Packed versions are also utilized for the configuration ROM such that * memory is minimized. A packed version consumes only 16 bits and is * formatted as follows. * * <pre> * Revision Range Bit Positions * * Major Revision 0 - 9 Bits 15 - 12 * Minor Revision 0 - 99 Bits 11 - 5 * Compatability Revision a - z Bits 4 - 0 </pre> * ******************************************************************************/ /***************************** Include Files *********************************/ #include "xbasic_types.h" #include "xversion.h" /************************** Constant Definitions *****************************/ /* the following constants define the masks and shift values to allow the * revisions to be packed and unpacked, a packed version is packed into a 16 * bit value in the following format, XXXXYYYYYYYZZZZZ, where XXXX is the * major revision, YYYYYYY is the minor revision, and ZZZZZ is the compatability * revision */ #define XVE_MAJOR_SHIFT_VALUE 12 #define XVE_MINOR_ONLY_MASK 0x0FE0 #define XVE_MINOR_SHIFT_VALUE 5 #define XVE_COMP_ONLY_MASK 0x001F /* the following constants define the specific characters of a version string * for each character of the revision, a version string is in the following * format, "X.YYZ" where X is the major revision (0 - 9), YY is the minor * revision (00 - 99), and Z is the compatability revision (a - z) */ #define XVE_MAJOR_CHAR 0 /* major revision 0 - 9 */ #define XVE_MINOR_TENS_CHAR 2 /* minor revision tens 0 - 9 */ #define XVE_MINOR_ONES_CHAR 3 /* minor revision ones 0 - 9 */ #define XVE_COMP_CHAR 4 /* compatability revision a - z */ #define XVE_END_STRING_CHAR 5 /**************************** Type Definitions *******************************/ /***************** Macros (Inline Functions) Definitions *********************/ /************************** Function Prototypes ******************************/ static u32 IsVersionStringValid(s8 * StringPtr); /***************************************************************************** * * Unpacks a packed version into the specified version. Versions are packed * into the configuration ROM to reduce the amount storage. A packed version * is a binary format as oppossed to a non-packed version which is implemented * as a string. * * @param InstancePtr points to the version to unpack the packed version into. * @param PackedVersion contains the packed version to unpack. * * @return * * None. * * @note * * None. * ******************************************************************************/ void XVersion_UnPack(XVersion * InstancePtr, u16 PackedVersion) { /* not implemented yet since CROM related */ } /***************************************************************************** * * Packs a version into the specified packed version. Versions are packed into * the configuration ROM to reduce the amount storage. * * @param InstancePtr points to the version to pack. * @param PackedVersionPtr points to the packed version which will receive * the new packed version. * * @return * * A status, XST_SUCCESS, indicating the packing was accomplished * successfully, or an error, XST_INVALID_VERSION, indicating the specified * input version was not valid such that the pack did not occur * <br><br> * The packed version pointed to by PackedVersionPtr is modified with the new * packed version if the status indicates success. * * @note * * None. * ******************************************************************************/ XStatus XVersion_Pack(XVersion * InstancePtr, u16 * PackedVersionPtr) { /* not implemented yet since CROM related */ return XST_SUCCESS; } /***************************************************************************** * * Determines if two versions are equal. * * @param InstancePtr points to the first version to be compared. * @param VersionPtr points to a second version to be compared. * * @return * * TRUE if the versions are equal, FALSE otherwise. * * @note * * None. * ******************************************************************************/ u32 XVersion_IsEqual(XVersion * InstancePtr, XVersion * VersionPtr) { u8 *Version1 = (u8 *) InstancePtr; u8 *Version2 = (u8 *) VersionPtr; int Index; /* assert to verify input arguments */ XASSERT_NONVOID(InstancePtr != NULL); XASSERT_NONVOID(VersionPtr != NULL); /* check each byte of the versions to see if they are the same, * return at any point a byte differs between them */ for (Index = 0; Index < sizeof (XVersion); Index++) { if (Version1[Index] != Version2[Index]) { return FALSE; } } /* No byte was found to be different between the versions, so indicate * the versions are equal */ return TRUE; } /***************************************************************************** * * Converts a version to a null terminated string. * * @param InstancePtr points to the version to convert. * @param StringPtr points to the string which will be the result of the * conversion. This does not need to point to a null terminated * string as an input, but must point to storage which is an adequate * amount to hold the result string. * * @return * * The null terminated string is inserted at the location pointed to by * StringPtr if the status indicates success. * * @note * * It is necessary for the caller to have already allocated the storage to * contain the string. The amount of memory necessary for the string is * specified in the version header file. * ******************************************************************************/ void XVersion_ToString(XVersion * InstancePtr, s8 * StringPtr) { /* assert to verify input arguments */ XASSERT_VOID(InstancePtr != NULL); XASSERT_VOID(StringPtr != NULL); /* since version is implemented as a string, just copy the specified * input into the specified output */ XVersion_Copy(InstancePtr, (XVersion *) StringPtr); } /***************************************************************************** * * Initializes a version from a null terminated string. Since the string may not * be a format which is compatible with the version, an error could occur. * * @param InstancePtr points to the version which is to be initialized. * @param StringPtr points to a null terminated string which will be * converted to a version. The format of the string must match the * version string format which is X.YYX where X = 0 - 9, YY = 00 - 99, * Z = a - z. * * @return * * A status, XST_SUCCESS, indicating the conversion was accomplished * successfully, or XST_INVALID_VERSION indicating the version string format * was not valid. * * @note * * None. * ******************************************************************************/ XStatus XVersion_FromString(XVersion * InstancePtr, s8 * StringPtr) { /* assert to verify input arguments */ XASSERT_NONVOID(InstancePtr != NULL); XASSERT_NONVOID(StringPtr != NULL); /* if the version string specified is not valid, return an error */ if (!IsVersionStringValid(StringPtr)) { return XST_INVALID_VERSION; } /* copy the specified string into the specified version and indicate the * conversion was successful */ XVersion_Copy((XVersion *) StringPtr, InstancePtr); return XST_SUCCESS; } /***************************************************************************** * * Copies the contents of a version to another version. * * @param InstancePtr points to the version which is the source of data for * the copy operation. * @param VersionPtr points to another version which is the destination of * the copy operation. * * @return * * None. * * @note * * None. * ******************************************************************************/ void XVersion_Copy(XVersion * InstancePtr, XVersion * VersionPtr) { u8 *Source = (u8 *) InstancePtr; u8 *Destination = (u8 *) VersionPtr; int Index; /* assert to verify input arguments */ XASSERT_VOID(InstancePtr != NULL); XASSERT_VOID(VersionPtr != NULL); /* copy each byte of the source version to the destination version */ for (Index = 0; Index < sizeof (XVersion); Index++) { Destination[Index] = Source[Index]; } } /***************************************************************************** * * Determines if the specified version is valid. * * @param StringPtr points to the string to be validated. * * @return * * TRUE if the version string is a valid format, FALSE otherwise. * * @note * * None. * ******************************************************************************/ static u32 IsVersionStringValid(s8 * StringPtr) { /* if the input string is not a valid format, "X.YYZ" where X = 0 - 9, * YY = 00 - 99, and Z = a - z, then indicate it's not valid */ if ((StringPtr[XVE_MAJOR_CHAR] < '0') || (StringPtr[XVE_MAJOR_CHAR] > '9') || (StringPtr[XVE_MINOR_TENS_CHAR] < '0') || (StringPtr[XVE_MINOR_TENS_CHAR] > '9') || (StringPtr[XVE_MINOR_ONES_CHAR] < '0') || (StringPtr[XVE_MINOR_ONES_CHAR] > '9') || (StringPtr[XVE_COMP_CHAR] < 'a') || (StringPtr[XVE_COMP_CHAR] > 'z')) { return FALSE; } return TRUE; }
gpl-2.0
digetx/picasso-kernel
tools/perf/builtin-diff.c
303
26453
/* * builtin-diff.c * * Builtin diff command: Analyze two perf.data input files, look up and read * DSOs and symbol information, sort them and produce a diff. */ #include "builtin.h" #include "util/debug.h" #include "util/event.h" #include "util/hist.h" #include "util/evsel.h" #include "util/evlist.h" #include "util/session.h" #include "util/tool.h" #include "util/sort.h" #include "util/symbol.h" #include "util/util.h" #include "util/data.h" #include <stdlib.h> #include <math.h> /* Diff command specific HPP columns. */ enum { PERF_HPP_DIFF__BASELINE, PERF_HPP_DIFF__PERIOD, PERF_HPP_DIFF__PERIOD_BASELINE, PERF_HPP_DIFF__DELTA, PERF_HPP_DIFF__RATIO, PERF_HPP_DIFF__WEIGHTED_DIFF, PERF_HPP_DIFF__FORMULA, PERF_HPP_DIFF__MAX_INDEX }; struct diff_hpp_fmt { struct perf_hpp_fmt fmt; int idx; char *header; int header_width; }; struct data__file { struct perf_session *session; struct perf_data_file file; int idx; struct hists *hists; struct diff_hpp_fmt fmt[PERF_HPP_DIFF__MAX_INDEX]; }; static struct data__file *data__files; static int data__files_cnt; #define data__for_each_file_start(i, d, s) \ for (i = s, d = &data__files[s]; \ i < data__files_cnt; \ i++, d = &data__files[i]) #define data__for_each_file(i, d) data__for_each_file_start(i, d, 0) #define data__for_each_file_new(i, d) data__for_each_file_start(i, d, 1) static bool force; static bool show_period; static bool show_formula; static bool show_baseline_only; static unsigned int sort_compute; static s64 compute_wdiff_w1; static s64 compute_wdiff_w2; enum { COMPUTE_DELTA, COMPUTE_RATIO, COMPUTE_WEIGHTED_DIFF, COMPUTE_MAX, }; const char *compute_names[COMPUTE_MAX] = { [COMPUTE_DELTA] = "delta", [COMPUTE_RATIO] = "ratio", [COMPUTE_WEIGHTED_DIFF] = "wdiff", }; static int compute; static int compute_2_hpp[COMPUTE_MAX] = { [COMPUTE_DELTA] = PERF_HPP_DIFF__DELTA, [COMPUTE_RATIO] = PERF_HPP_DIFF__RATIO, [COMPUTE_WEIGHTED_DIFF] = PERF_HPP_DIFF__WEIGHTED_DIFF, }; #define MAX_COL_WIDTH 70 static struct header_column { const char *name; int width; } columns[PERF_HPP_DIFF__MAX_INDEX] = { [PERF_HPP_DIFF__BASELINE] = { .name = "Baseline", }, [PERF_HPP_DIFF__PERIOD] = { .name = "Period", .width = 14, }, [PERF_HPP_DIFF__PERIOD_BASELINE] = { .name = "Base period", .width = 14, }, [PERF_HPP_DIFF__DELTA] = { .name = "Delta", .width = 7, }, [PERF_HPP_DIFF__RATIO] = { .name = "Ratio", .width = 14, }, [PERF_HPP_DIFF__WEIGHTED_DIFF] = { .name = "Weighted diff", .width = 14, }, [PERF_HPP_DIFF__FORMULA] = { .name = "Formula", .width = MAX_COL_WIDTH, } }; static int setup_compute_opt_wdiff(char *opt) { char *w1_str = opt; char *w2_str; int ret = -EINVAL; if (!opt) goto out; w2_str = strchr(opt, ','); if (!w2_str) goto out; *w2_str++ = 0x0; if (!*w2_str) goto out; compute_wdiff_w1 = strtol(w1_str, NULL, 10); compute_wdiff_w2 = strtol(w2_str, NULL, 10); if (!compute_wdiff_w1 || !compute_wdiff_w2) goto out; pr_debug("compute wdiff w1(%" PRId64 ") w2(%" PRId64 ")\n", compute_wdiff_w1, compute_wdiff_w2); ret = 0; out: if (ret) pr_err("Failed: wrong weight data, use 'wdiff:w1,w2'\n"); return ret; } static int setup_compute_opt(char *opt) { if (compute == COMPUTE_WEIGHTED_DIFF) return setup_compute_opt_wdiff(opt); if (opt) { pr_err("Failed: extra option specified '%s'", opt); return -EINVAL; } return 0; } static int setup_compute(const struct option *opt, const char *str, int unset __maybe_unused) { int *cp = (int *) opt->value; char *cstr = (char *) str; char buf[50]; unsigned i; char *option; if (!str) { *cp = COMPUTE_DELTA; return 0; } option = strchr(str, ':'); if (option) { unsigned len = option++ - str; /* * The str data are not writeable, so we need * to use another buffer. */ /* No option value is longer. */ if (len >= sizeof(buf)) return -EINVAL; strncpy(buf, str, len); buf[len] = 0x0; cstr = buf; } for (i = 0; i < COMPUTE_MAX; i++) if (!strcmp(cstr, compute_names[i])) { *cp = i; return setup_compute_opt(option); } pr_err("Failed: '%s' is not computation method " "(use 'delta','ratio' or 'wdiff')\n", str); return -EINVAL; } static double period_percent(struct hist_entry *he, u64 period) { u64 total = hists__total_period(he->hists); return (period * 100.0) / total; } static double compute_delta(struct hist_entry *he, struct hist_entry *pair) { double old_percent = period_percent(he, he->stat.period); double new_percent = period_percent(pair, pair->stat.period); pair->diff.period_ratio_delta = new_percent - old_percent; pair->diff.computed = true; return pair->diff.period_ratio_delta; } static double compute_ratio(struct hist_entry *he, struct hist_entry *pair) { double old_period = he->stat.period ?: 1; double new_period = pair->stat.period; pair->diff.computed = true; pair->diff.period_ratio = new_period / old_period; return pair->diff.period_ratio; } static s64 compute_wdiff(struct hist_entry *he, struct hist_entry *pair) { u64 old_period = he->stat.period; u64 new_period = pair->stat.period; pair->diff.computed = true; pair->diff.wdiff = new_period * compute_wdiff_w2 - old_period * compute_wdiff_w1; return pair->diff.wdiff; } static int formula_delta(struct hist_entry *he, struct hist_entry *pair, char *buf, size_t size) { u64 he_total = he->hists->stats.total_period; u64 pair_total = pair->hists->stats.total_period; if (symbol_conf.filter_relative) { he_total = he->hists->stats.total_non_filtered_period; pair_total = pair->hists->stats.total_non_filtered_period; } return scnprintf(buf, size, "(%" PRIu64 " * 100 / %" PRIu64 ") - " "(%" PRIu64 " * 100 / %" PRIu64 ")", pair->stat.period, pair_total, he->stat.period, he_total); } static int formula_ratio(struct hist_entry *he, struct hist_entry *pair, char *buf, size_t size) { double old_period = he->stat.period; double new_period = pair->stat.period; return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period); } static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair, char *buf, size_t size) { u64 old_period = he->stat.period; u64 new_period = pair->stat.period; return scnprintf(buf, size, "(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")", new_period, compute_wdiff_w2, old_period, compute_wdiff_w1); } static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair, char *buf, size_t size) { switch (compute) { case COMPUTE_DELTA: return formula_delta(he, pair, buf, size); case COMPUTE_RATIO: return formula_ratio(he, pair, buf, size); case COMPUTE_WEIGHTED_DIFF: return formula_wdiff(he, pair, buf, size); default: BUG_ON(1); } return -1; } static int hists__add_entry(struct hists *hists, struct addr_location *al, u64 period, u64 weight, u64 transaction) { if (__hists__add_entry(hists, al, NULL, NULL, NULL, period, weight, transaction, true) != NULL) return 0; return -ENOMEM; } static int diff__process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { struct addr_location al; struct hists *hists = evsel__hists(evsel); if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { pr_warning("problem processing %d event, skipping it.\n", event->header.type); return -1; } if (hists__add_entry(hists, &al, sample->period, sample->weight, sample->transaction)) { pr_warning("problem incrementing symbol period, skipping event\n"); return -1; } /* * The total_period is updated here before going to the output * tree since normally only the baseline hists will call * hists__output_resort() and precompute needs the total * period in order to sort entries by percentage delta. */ hists->stats.total_period += sample->period; if (!al.filtered) hists->stats.total_non_filtered_period += sample->period; return 0; } static struct perf_tool tool = { .sample = diff__process_sample_event, .mmap = perf_event__process_mmap, .comm = perf_event__process_comm, .exit = perf_event__process_exit, .fork = perf_event__process_fork, .lost = perf_event__process_lost, .ordered_events = true, .ordering_requires_timestamps = true, }; static struct perf_evsel *evsel_match(struct perf_evsel *evsel, struct perf_evlist *evlist) { struct perf_evsel *e; evlist__for_each(evlist, e) { if (perf_evsel__match2(evsel, e)) return e; } return NULL; } static void perf_evlist__collapse_resort(struct perf_evlist *evlist) { struct perf_evsel *evsel; evlist__for_each(evlist, evsel) { struct hists *hists = evsel__hists(evsel); hists__collapse_resort(hists, NULL); } } static struct hist_entry* get_pair_data(struct hist_entry *he, struct data__file *d) { if (hist_entry__has_pairs(he)) { struct hist_entry *pair; list_for_each_entry(pair, &he->pairs.head, pairs.node) if (pair->hists == d->hists) return pair; } return NULL; } static struct hist_entry* get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt) { void *ptr = dfmt - dfmt->idx; struct data__file *d = container_of(ptr, struct data__file, fmt); return get_pair_data(he, d); } static void hists__baseline_only(struct hists *hists) { struct rb_root *root; struct rb_node *next; if (sort__need_collapse) root = &hists->entries_collapsed; else root = hists->entries_in; next = rb_first(root); while (next != NULL) { struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&he->rb_node_in); if (!hist_entry__next_pair(he)) { rb_erase(&he->rb_node_in, root); hist_entry__free(he); } } } static void hists__precompute(struct hists *hists) { struct rb_root *root; struct rb_node *next; if (sort__need_collapse) root = &hists->entries_collapsed; else root = hists->entries_in; next = rb_first(root); while (next != NULL) { struct hist_entry *he, *pair; he = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&he->rb_node_in); pair = get_pair_data(he, &data__files[sort_compute]); if (!pair) continue; switch (compute) { case COMPUTE_DELTA: compute_delta(he, pair); break; case COMPUTE_RATIO: compute_ratio(he, pair); break; case COMPUTE_WEIGHTED_DIFF: compute_wdiff(he, pair); break; default: BUG_ON(1); } } } static int64_t cmp_doubles(double l, double r) { if (l > r) return -1; else if (l < r) return 1; else return 0; } static int64_t __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, int c) { switch (c) { case COMPUTE_DELTA: { double l = left->diff.period_ratio_delta; double r = right->diff.period_ratio_delta; return cmp_doubles(l, r); } case COMPUTE_RATIO: { double l = left->diff.period_ratio; double r = right->diff.period_ratio; return cmp_doubles(l, r); } case COMPUTE_WEIGHTED_DIFF: { s64 l = left->diff.wdiff; s64 r = right->diff.wdiff; return r - l; } default: BUG_ON(1); } return 0; } static int64_t hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, int c) { bool pairs_left = hist_entry__has_pairs(left); bool pairs_right = hist_entry__has_pairs(right); struct hist_entry *p_right, *p_left; if (!pairs_left && !pairs_right) return 0; if (!pairs_left || !pairs_right) return pairs_left ? -1 : 1; p_left = get_pair_data(left, &data__files[sort_compute]); p_right = get_pair_data(right, &data__files[sort_compute]); if (!p_left && !p_right) return 0; if (!p_left || !p_right) return p_left ? -1 : 1; /* * We have 2 entries of same kind, let's * make the data comparison. */ return __hist_entry__cmp_compute(p_left, p_right, c); } static void insert_hist_entry_by_compute(struct rb_root *root, struct hist_entry *he, int c) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct hist_entry *iter; while (*p != NULL) { parent = *p; iter = rb_entry(parent, struct hist_entry, rb_node); if (hist_entry__cmp_compute(he, iter, c) < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&he->rb_node, parent, p); rb_insert_color(&he->rb_node, root); } static void hists__compute_resort(struct hists *hists) { struct rb_root *root; struct rb_node *next; if (sort__need_collapse) root = &hists->entries_collapsed; else root = hists->entries_in; hists->entries = RB_ROOT; next = rb_first(root); hists__reset_stats(hists); hists__reset_col_len(hists); while (next != NULL) { struct hist_entry *he; he = rb_entry(next, struct hist_entry, rb_node_in); next = rb_next(&he->rb_node_in); insert_hist_entry_by_compute(&hists->entries, he, compute); hists__inc_stats(hists, he); if (!he->filtered) hists__calc_col_len(hists, he); } } static void hists__process(struct hists *hists) { if (show_baseline_only) hists__baseline_only(hists); if (sort_compute) { hists__precompute(hists); hists__compute_resort(hists); } else { hists__output_resort(hists); } hists__fprintf(hists, true, 0, 0, 0, stdout); } static void data__fprintf(void) { struct data__file *d; int i; fprintf(stdout, "# Data files:\n"); data__for_each_file(i, d) fprintf(stdout, "# [%d] %s %s\n", d->idx, d->file.path, !d->idx ? "(Baseline)" : ""); fprintf(stdout, "#\n"); } static void data_process(void) { struct perf_evlist *evlist_base = data__files[0].session->evlist; struct perf_evsel *evsel_base; bool first = true; evlist__for_each(evlist_base, evsel_base) { struct hists *hists_base = evsel__hists(evsel_base); struct data__file *d; int i; data__for_each_file_new(i, d) { struct perf_evlist *evlist = d->session->evlist; struct perf_evsel *evsel; struct hists *hists; evsel = evsel_match(evsel_base, evlist); if (!evsel) continue; hists = evsel__hists(evsel); d->hists = hists; hists__match(hists_base, hists); if (!show_baseline_only) hists__link(hists_base, hists); } fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n", perf_evsel__name(evsel_base)); first = false; if (verbose || data__files_cnt > 2) data__fprintf(); hists__process(hists_base); } } static void data__free(struct data__file *d) { int col; for (col = 0; col < PERF_HPP_DIFF__MAX_INDEX; col++) { struct diff_hpp_fmt *fmt = &d->fmt[col]; zfree(&fmt->header); } } static int __cmd_diff(void) { struct data__file *d; int ret = -EINVAL, i; data__for_each_file(i, d) { d->session = perf_session__new(&d->file, false, &tool); if (!d->session) { pr_err("Failed to open %s\n", d->file.path); ret = -1; goto out_delete; } ret = perf_session__process_events(d->session, &tool); if (ret) { pr_err("Failed to process %s\n", d->file.path); goto out_delete; } perf_evlist__collapse_resort(d->session->evlist); } data_process(); out_delete: data__for_each_file(i, d) { if (d->session) perf_session__delete(d->session); data__free(d); } free(data__files); return ret; } static const char * const diff_usage[] = { "perf diff [<options>] [old_file] [new_file]", NULL, }; static const struct option options[] = { OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('b', "baseline-only", &show_baseline_only, "Show only items with match in baseline"), OPT_CALLBACK('c', "compute", &compute, "delta,ratio,wdiff:w1,w2 (default delta)", "Entries differential computation selection", setup_compute), OPT_BOOLEAN('p', "period", &show_period, "Show period values."), OPT_BOOLEAN('F', "formula", &show_formula, "Show formula."), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), OPT_STRING('C', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", "only consider symbols in these comms"), OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", "only consider these symbols"), OPT_STRING('s', "sort", &sort_order, "key[,key2...]", "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..." " Please refer the man page for the complete list."), OPT_STRING('t', "field-separator", &symbol_conf.field_sep, "separator", "separator for columns, no spaces will be added between " "columns '.' is reserved."), OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", "Look for files with symbols relative to this directory"), OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."), OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", "How to display percentage of filtered entries", parse_filter_percentage), OPT_END() }; static double baseline_percent(struct hist_entry *he) { u64 total = hists__total_period(he->hists); return 100.0 * he->stat.period / total; } static int hpp__color_baseline(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) { struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt); double percent = baseline_percent(he); char pfmt[20] = " "; if (!he->dummy) { scnprintf(pfmt, 20, "%%%d.2f%%%%", dfmt->header_width - 1); return percent_color_snprintf(hpp->buf, hpp->size, pfmt, percent); } else return scnprintf(hpp->buf, hpp->size, "%*s", dfmt->header_width, pfmt); } static int hpp__entry_baseline(struct hist_entry *he, char *buf, size_t size) { double percent = baseline_percent(he); const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; int ret = 0; if (!he->dummy) ret = scnprintf(buf, size, fmt, percent); return ret; } static int __hpp__color_compare(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he, int comparison_method) { struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt); struct hist_entry *pair = get_pair_fmt(he, dfmt); double diff; s64 wdiff; char pfmt[20] = " "; if (!pair) goto dummy_print; switch (comparison_method) { case COMPUTE_DELTA: if (pair->diff.computed) diff = pair->diff.period_ratio_delta; else diff = compute_delta(he, pair); if (fabs(diff) < 0.01) goto dummy_print; scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1); return percent_color_snprintf(hpp->buf, hpp->size, pfmt, diff); case COMPUTE_RATIO: if (he->dummy) goto dummy_print; if (pair->diff.computed) diff = pair->diff.period_ratio; else diff = compute_ratio(he, pair); scnprintf(pfmt, 20, "%%%d.6f", dfmt->header_width); return value_color_snprintf(hpp->buf, hpp->size, pfmt, diff); case COMPUTE_WEIGHTED_DIFF: if (he->dummy) goto dummy_print; if (pair->diff.computed) wdiff = pair->diff.wdiff; else wdiff = compute_wdiff(he, pair); scnprintf(pfmt, 20, "%%14ld", dfmt->header_width); return color_snprintf(hpp->buf, hpp->size, get_percent_color(wdiff), pfmt, wdiff); default: BUG_ON(1); } dummy_print: return scnprintf(hpp->buf, hpp->size, "%*s", dfmt->header_width, pfmt); } static int hpp__color_delta(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) { return __hpp__color_compare(fmt, hpp, he, COMPUTE_DELTA); } static int hpp__color_ratio(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) { return __hpp__color_compare(fmt, hpp, he, COMPUTE_RATIO); } static int hpp__color_wdiff(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) { return __hpp__color_compare(fmt, hpp, he, COMPUTE_WEIGHTED_DIFF); } static void hpp__entry_unpair(struct hist_entry *he, int idx, char *buf, size_t size) { switch (idx) { case PERF_HPP_DIFF__PERIOD_BASELINE: scnprintf(buf, size, "%" PRIu64, he->stat.period); break; default: break; } } static void hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair, int idx, char *buf, size_t size) { double diff; double ratio; s64 wdiff; switch (idx) { case PERF_HPP_DIFF__DELTA: if (pair->diff.computed) diff = pair->diff.period_ratio_delta; else diff = compute_delta(he, pair); if (fabs(diff) >= 0.01) scnprintf(buf, size, "%+4.2F%%", diff); break; case PERF_HPP_DIFF__RATIO: /* No point for ratio number if we are dummy.. */ if (he->dummy) break; if (pair->diff.computed) ratio = pair->diff.period_ratio; else ratio = compute_ratio(he, pair); if (ratio > 0.0) scnprintf(buf, size, "%14.6F", ratio); break; case PERF_HPP_DIFF__WEIGHTED_DIFF: /* No point for wdiff number if we are dummy.. */ if (he->dummy) break; if (pair->diff.computed) wdiff = pair->diff.wdiff; else wdiff = compute_wdiff(he, pair); if (wdiff != 0) scnprintf(buf, size, "%14ld", wdiff); break; case PERF_HPP_DIFF__FORMULA: formula_fprintf(he, pair, buf, size); break; case PERF_HPP_DIFF__PERIOD: scnprintf(buf, size, "%" PRIu64, pair->stat.period); break; default: BUG_ON(1); }; } static void __hpp__entry_global(struct hist_entry *he, struct diff_hpp_fmt *dfmt, char *buf, size_t size) { struct hist_entry *pair = get_pair_fmt(he, dfmt); int idx = dfmt->idx; /* baseline is special */ if (idx == PERF_HPP_DIFF__BASELINE) hpp__entry_baseline(he, buf, size); else { if (pair) hpp__entry_pair(he, pair, idx, buf, size); else hpp__entry_unpair(he, idx, buf, size); } } static int hpp__entry_global(struct perf_hpp_fmt *_fmt, struct perf_hpp *hpp, struct hist_entry *he) { struct diff_hpp_fmt *dfmt = container_of(_fmt, struct diff_hpp_fmt, fmt); char buf[MAX_COL_WIDTH] = " "; __hpp__entry_global(he, dfmt, buf, MAX_COL_WIDTH); if (symbol_conf.field_sep) return scnprintf(hpp->buf, hpp->size, "%s", buf); else return scnprintf(hpp->buf, hpp->size, "%*s", dfmt->header_width, buf); } static int hpp__header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct perf_evsel *evsel __maybe_unused) { struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt); BUG_ON(!dfmt->header); return scnprintf(hpp->buf, hpp->size, dfmt->header); } static int hpp__width(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp __maybe_unused, struct perf_evsel *evsel __maybe_unused) { struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt); BUG_ON(dfmt->header_width <= 0); return dfmt->header_width; } static void init_header(struct data__file *d, struct diff_hpp_fmt *dfmt) { #define MAX_HEADER_NAME 100 char buf_indent[MAX_HEADER_NAME]; char buf[MAX_HEADER_NAME]; const char *header = NULL; int width = 0; BUG_ON(dfmt->idx >= PERF_HPP_DIFF__MAX_INDEX); header = columns[dfmt->idx].name; width = columns[dfmt->idx].width; /* Only our defined HPP fmts should appear here. */ BUG_ON(!header); if (data__files_cnt > 2) scnprintf(buf, MAX_HEADER_NAME, "%s/%d", header, d->idx); #define NAME (data__files_cnt > 2 ? buf : header) dfmt->header_width = width; width = (int) strlen(NAME); if (dfmt->header_width < width) dfmt->header_width = width; scnprintf(buf_indent, MAX_HEADER_NAME, "%*s", dfmt->header_width, NAME); dfmt->header = strdup(buf_indent); #undef MAX_HEADER_NAME #undef NAME } static void data__hpp_register(struct data__file *d, int idx) { struct diff_hpp_fmt *dfmt = &d->fmt[idx]; struct perf_hpp_fmt *fmt = &dfmt->fmt; dfmt->idx = idx; fmt->header = hpp__header; fmt->width = hpp__width; fmt->entry = hpp__entry_global; /* TODO more colors */ switch (idx) { case PERF_HPP_DIFF__BASELINE: fmt->color = hpp__color_baseline; break; case PERF_HPP_DIFF__DELTA: fmt->color = hpp__color_delta; break; case PERF_HPP_DIFF__RATIO: fmt->color = hpp__color_ratio; break; case PERF_HPP_DIFF__WEIGHTED_DIFF: fmt->color = hpp__color_wdiff; break; default: break; } init_header(d, dfmt); perf_hpp__column_register(fmt); } static void ui_init(void) { struct data__file *d; int i; data__for_each_file(i, d) { /* * Baseline or compute realted columns: * * PERF_HPP_DIFF__BASELINE * PERF_HPP_DIFF__DELTA * PERF_HPP_DIFF__RATIO * PERF_HPP_DIFF__WEIGHTED_DIFF */ data__hpp_register(d, i ? compute_2_hpp[compute] : PERF_HPP_DIFF__BASELINE); /* * And the rest: * * PERF_HPP_DIFF__FORMULA * PERF_HPP_DIFF__PERIOD * PERF_HPP_DIFF__PERIOD_BASELINE */ if (show_formula && i) data__hpp_register(d, PERF_HPP_DIFF__FORMULA); if (show_period) data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD : PERF_HPP_DIFF__PERIOD_BASELINE); } } static int data_init(int argc, const char **argv) { struct data__file *d; static const char *defaults[] = { "perf.data.old", "perf.data", }; bool use_default = true; int i; data__files_cnt = 2; if (argc) { if (argc == 1) defaults[1] = argv[0]; else { data__files_cnt = argc; use_default = false; } } else if (perf_guest) { defaults[0] = "perf.data.host"; defaults[1] = "perf.data.guest"; } if (sort_compute >= (unsigned int) data__files_cnt) { pr_err("Order option out of limit.\n"); return -EINVAL; } data__files = zalloc(sizeof(*data__files) * data__files_cnt); if (!data__files) return -ENOMEM; data__for_each_file(i, d) { struct perf_data_file *file = &d->file; file->path = use_default ? defaults[i] : argv[i]; file->mode = PERF_DATA_MODE_READ, file->force = force, d->idx = i; } return 0; } int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) { int ret = hists__init(); if (ret < 0) return ret; perf_config(perf_default_config, NULL); argc = parse_options(argc, argv, options, diff_usage, 0); if (symbol__init(NULL) < 0) return -1; if (data_init(argc, argv) < 0) return -1; ui_init(); sort__mode = SORT_MODE__DIFF; if (setup_sorting() < 0) usage_with_options(diff_usage, options); setup_pager(); sort__setup_elide(NULL); return __cmd_diff(); }
gpl-2.0
HRTKernel/Hacker-Kernel-H850
drivers/block/null_blk.c
303
13695
#include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/blk-mq.h> #include <linux/hrtimer.h> struct nullb_cmd { struct list_head list; struct llist_node ll_list; struct call_single_data csd; struct request *rq; struct bio *bio; unsigned int tag; struct nullb_queue *nq; }; struct nullb_queue { unsigned long *tag_map; wait_queue_head_t wait; unsigned int queue_depth; struct nullb_cmd *cmds; }; struct nullb { struct list_head list; unsigned int index; struct request_queue *q; struct gendisk *disk; struct blk_mq_tag_set tag_set; struct hrtimer timer; unsigned int queue_depth; spinlock_t lock; struct nullb_queue *queues; unsigned int nr_queues; }; static LIST_HEAD(nullb_list); static struct mutex lock; static int null_major; static int nullb_indexes; struct completion_queue { struct llist_head list; struct hrtimer timer; }; /* * These are per-cpu for now, they will need to be configured by the * complete_queues parameter and appropriately mapped. */ static DEFINE_PER_CPU(struct completion_queue, completion_queues); enum { NULL_IRQ_NONE = 0, NULL_IRQ_SOFTIRQ = 1, NULL_IRQ_TIMER = 2, }; enum { NULL_Q_BIO = 0, NULL_Q_RQ = 1, NULL_Q_MQ = 2, }; static int submit_queues; module_param(submit_queues, int, S_IRUGO); MODULE_PARM_DESC(submit_queues, "Number of submission queues"); static int home_node = NUMA_NO_NODE; module_param(home_node, int, S_IRUGO); MODULE_PARM_DESC(home_node, "Home node for the device"); static int queue_mode = NULL_Q_MQ; module_param(queue_mode, int, S_IRUGO); MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); static int gb = 250; module_param(gb, int, S_IRUGO); MODULE_PARM_DESC(gb, "Size in GB"); static int bs = 512; module_param(bs, int, S_IRUGO); MODULE_PARM_DESC(bs, "Block size (in bytes)"); static int nr_devices = 2; module_param(nr_devices, int, S_IRUGO); MODULE_PARM_DESC(nr_devices, "Number of devices to register"); static int irqmode = NULL_IRQ_SOFTIRQ; module_param(irqmode, int, S_IRUGO); MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); static int completion_nsec = 10000; module_param(completion_nsec, int, S_IRUGO); MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); static int hw_queue_depth = 64; module_param(hw_queue_depth, int, S_IRUGO); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); static bool use_per_node_hctx = false; module_param(use_per_node_hctx, bool, S_IRUGO); MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); static void put_tag(struct nullb_queue *nq, unsigned int tag) { clear_bit_unlock(tag, nq->tag_map); if (waitqueue_active(&nq->wait)) wake_up(&nq->wait); } static unsigned int get_tag(struct nullb_queue *nq) { unsigned int tag; do { tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); if (tag >= nq->queue_depth) return -1U; } while (test_and_set_bit_lock(tag, nq->tag_map)); return tag; } static void free_cmd(struct nullb_cmd *cmd) { put_tag(cmd->nq, cmd->tag); } static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) { struct nullb_cmd *cmd; unsigned int tag; tag = get_tag(nq); if (tag != -1U) { cmd = &nq->cmds[tag]; cmd->tag = tag; cmd->nq = nq; return cmd; } return NULL; } static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) { struct nullb_cmd *cmd; DEFINE_WAIT(wait); cmd = __alloc_cmd(nq); if (cmd || !can_wait) return cmd; do { prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); cmd = __alloc_cmd(nq); if (cmd) break; io_schedule(); } while (1); finish_wait(&nq->wait, &wait); return cmd; } static void end_cmd(struct nullb_cmd *cmd) { switch (queue_mode) { case NULL_Q_MQ: blk_mq_end_request(cmd->rq, 0); return; case NULL_Q_RQ: INIT_LIST_HEAD(&cmd->rq->queuelist); blk_end_request_all(cmd->rq, 0); break; case NULL_Q_BIO: bio_endio(cmd->bio, 0); break; } free_cmd(cmd); } static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) { struct completion_queue *cq; struct llist_node *entry; struct nullb_cmd *cmd; cq = &per_cpu(completion_queues, smp_processor_id()); while ((entry = llist_del_all(&cq->list)) != NULL) { entry = llist_reverse_order(entry); do { cmd = container_of(entry, struct nullb_cmd, ll_list); entry = entry->next; end_cmd(cmd); } while (entry); } return HRTIMER_NORESTART; } static void null_cmd_end_timer(struct nullb_cmd *cmd) { struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); cmd->ll_list.next = NULL; if (llist_add(&cmd->ll_list, &cq->list)) { ktime_t kt = ktime_set(0, completion_nsec); hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL); } put_cpu(); } static void null_softirq_done_fn(struct request *rq) { if (queue_mode == NULL_Q_MQ) end_cmd(blk_mq_rq_to_pdu(rq)); else end_cmd(rq->special); } static inline void null_handle_cmd(struct nullb_cmd *cmd) { /* Complete IO by inline, softirq or timer */ switch (irqmode) { case NULL_IRQ_SOFTIRQ: switch (queue_mode) { case NULL_Q_MQ: blk_mq_complete_request(cmd->rq); break; case NULL_Q_RQ: blk_complete_request(cmd->rq); break; case NULL_Q_BIO: /* * XXX: no proper submitting cpu information available. */ end_cmd(cmd); break; } break; case NULL_IRQ_NONE: end_cmd(cmd); break; case NULL_IRQ_TIMER: null_cmd_end_timer(cmd); break; } } static struct nullb_queue *nullb_to_queue(struct nullb *nullb) { int index = 0; if (nullb->nr_queues != 1) index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); return &nullb->queues[index]; } static void null_queue_bio(struct request_queue *q, struct bio *bio) { struct nullb *nullb = q->queuedata; struct nullb_queue *nq = nullb_to_queue(nullb); struct nullb_cmd *cmd; cmd = alloc_cmd(nq, 1); cmd->bio = bio; null_handle_cmd(cmd); } static int null_rq_prep_fn(struct request_queue *q, struct request *req) { struct nullb *nullb = q->queuedata; struct nullb_queue *nq = nullb_to_queue(nullb); struct nullb_cmd *cmd; cmd = alloc_cmd(nq, 0); if (cmd) { cmd->rq = req; req->special = cmd; return BLKPREP_OK; } return BLKPREP_DEFER; } static void null_request_fn(struct request_queue *q) { struct request *rq; while ((rq = blk_fetch_request(q)) != NULL) { struct nullb_cmd *cmd = rq->special; spin_unlock_irq(q->queue_lock); null_handle_cmd(cmd); spin_lock_irq(q->queue_lock); } } static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq, bool last) { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); cmd->rq = rq; cmd->nq = hctx->driver_data; blk_mq_start_request(rq); null_handle_cmd(cmd); return BLK_MQ_RQ_QUEUE_OK; } static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) { BUG_ON(!nullb); BUG_ON(!nq); init_waitqueue_head(&nq->wait); nq->queue_depth = nullb->queue_depth; } static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int index) { struct nullb *nullb = data; struct nullb_queue *nq = &nullb->queues[index]; hctx->driver_data = nq; null_init_queue(nullb, nq); nullb->nr_queues++; return 0; } static struct blk_mq_ops null_mq_ops = { .queue_rq = null_queue_rq, .map_queue = blk_mq_map_queue, .init_hctx = null_init_hctx, .complete = null_softirq_done_fn, }; static void null_del_dev(struct nullb *nullb) { list_del_init(&nullb->list); del_gendisk(nullb->disk); blk_cleanup_queue(nullb->q); if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); put_disk(nullb->disk); kfree(nullb); } static int null_open(struct block_device *bdev, fmode_t mode) { return 0; } static void null_release(struct gendisk *disk, fmode_t mode) { } static const struct block_device_operations null_fops = { .owner = THIS_MODULE, .open = null_open, .release = null_release, }; static int setup_commands(struct nullb_queue *nq) { struct nullb_cmd *cmd; int i, tag_size; nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); if (!nq->cmds) return -ENOMEM; tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); if (!nq->tag_map) { kfree(nq->cmds); return -ENOMEM; } for (i = 0; i < nq->queue_depth; i++) { cmd = &nq->cmds[i]; INIT_LIST_HEAD(&cmd->list); cmd->ll_list.next = NULL; cmd->tag = -1U; } return 0; } static void cleanup_queue(struct nullb_queue *nq) { kfree(nq->tag_map); kfree(nq->cmds); } static void cleanup_queues(struct nullb *nullb) { int i; for (i = 0; i < nullb->nr_queues; i++) cleanup_queue(&nullb->queues[i]); kfree(nullb->queues); } static int setup_queues(struct nullb *nullb) { nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), GFP_KERNEL); if (!nullb->queues) return -ENOMEM; nullb->nr_queues = 0; nullb->queue_depth = hw_queue_depth; return 0; } static int init_driver_queues(struct nullb *nullb) { struct nullb_queue *nq; int i, ret = 0; for (i = 0; i < submit_queues; i++) { nq = &nullb->queues[i]; null_init_queue(nullb, nq); ret = setup_commands(nq); if (ret) return ret; nullb->nr_queues++; } return 0; } static int null_add_dev(void) { struct gendisk *disk; struct nullb *nullb; sector_t size; int rv; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) { rv = -ENOMEM; goto out; } spin_lock_init(&nullb->lock); if (queue_mode == NULL_Q_MQ && use_per_node_hctx) submit_queues = nr_online_nodes; rv = setup_queues(nullb); if (rv) goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { nullb->tag_set.ops = &null_mq_ops; nullb->tag_set.nr_hw_queues = submit_queues; nullb->tag_set.queue_depth = hw_queue_depth; nullb->tag_set.numa_node = home_node; nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; nullb->tag_set.driver_data = nullb; rv = blk_mq_alloc_tag_set(&nullb->tag_set); if (rv) goto out_cleanup_queues; nullb->q = blk_mq_init_queue(&nullb->tag_set); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_tags; } } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; } blk_queue_make_request(nullb->q, null_queue_bio); rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; } blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn); rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; } nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); if (!disk) { rv = -ENOMEM; goto out_cleanup_blk_queue; } mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); nullb->index = nullb_indexes++; mutex_unlock(&lock); blk_queue_logical_block_size(nullb->q, bs); blk_queue_physical_block_size(nullb->q, bs); size = gb * 1024 * 1024 * 1024ULL; sector_div(size, bs); set_capacity(disk, size); disk->flags |= GENHD_FL_EXT_DEVT; disk->major = null_major; disk->first_minor = nullb->index; disk->fops = &null_fops; disk->private_data = nullb; disk->queue = nullb->q; sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: if (queue_mode == NULL_Q_MQ) blk_mq_free_tag_set(&nullb->tag_set); out_cleanup_queues: cleanup_queues(nullb); out_free_nullb: kfree(nullb); out: return rv; } static int __init null_init(void) { unsigned int i; if (bs > PAGE_SIZE) { pr_warn("null_blk: invalid block size\n"); pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); bs = PAGE_SIZE; } if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", nr_online_nodes); submit_queues = nr_online_nodes; } } else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) submit_queues = 1; mutex_init(&lock); /* Initialize a separate list for each CPU for issuing softirqs */ for_each_possible_cpu(i) { struct completion_queue *cq = &per_cpu(completion_queues, i); init_llist_head(&cq->list); if (irqmode != NULL_IRQ_TIMER) continue; hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cq->timer.function = null_cmd_timer_expired; } null_major = register_blkdev(0, "nullb"); if (null_major < 0) return null_major; for (i = 0; i < nr_devices; i++) { if (null_add_dev()) { unregister_blkdev(null_major, "nullb"); return -EINVAL; } } pr_info("null: module loaded\n"); return 0; } static void __exit null_exit(void) { struct nullb *nullb; unregister_blkdev(null_major, "nullb"); mutex_lock(&lock); while (!list_empty(&nullb_list)) { nullb = list_entry(nullb_list.next, struct nullb, list); null_del_dev(nullb); } mutex_unlock(&lock); } module_init(null_init); module_exit(null_exit); MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); MODULE_LICENSE("GPL");
gpl-2.0
bhree/android_kernel_jena_msm7x27a
arch/sh/oprofile/backtrace.c
303
2392
/* * SH specific backtracing code for oprofile * * Copyright 2007 STMicroelectronics Ltd. * * Author: Dave Peverley <dpeverley@mpc-data.co.uk> * * Based on ARM oprofile backtrace code by Richard Purdie and in turn, i386 * oprofile backtrace code by John Levon, David Smith * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/oprofile.h> #include <linux/sched.h> #include <linux/kallsyms.h> #include <linux/mm.h> #include <asm/unwinder.h> #include <asm/ptrace.h> #include <asm/uaccess.h> #include <asm/sections.h> #include <asm/stacktrace.h> static void backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) { /* Ignore warnings */ } static void backtrace_warning(void *data, char *msg) { /* Ignore warnings */ } static int backtrace_stack(void *data, char *name) { /* Yes, we want all stacks */ return 0; } static void backtrace_address(void *data, unsigned long addr, int reliable) { unsigned int *depth = data; if ((*depth)--) oprofile_add_trace(addr); } static struct stacktrace_ops backtrace_ops = { .warning = backtrace_warning, .warning_symbol = backtrace_warning_symbol, .stack = backtrace_stack, .address = backtrace_address, }; /* Limit to stop backtracing too far. */ static int backtrace_limit = 20; static unsigned long * user_backtrace(unsigned long *stackaddr, struct pt_regs *regs) { unsigned long buf_stack; /* Also check accessibility of address */ if (!access_ok(VERIFY_READ, stackaddr, sizeof(unsigned long))) return NULL; if (__copy_from_user_inatomic(&buf_stack, stackaddr, sizeof(unsigned long))) return NULL; /* Quick paranoia check */ if (buf_stack & 3) return NULL; oprofile_add_trace(buf_stack); stackaddr++; return stackaddr; } void sh_backtrace(struct pt_regs * const regs, unsigned int depth) { unsigned long *stackaddr; /* * Paranoia - clip max depth as we could get lost in the weeds. */ if (depth > backtrace_limit) depth = backtrace_limit; stackaddr = (unsigned long *)kernel_stack_pointer(regs); if (!user_mode(regs)) { if (depth) unwind_stack(NULL, regs, stackaddr, &backtrace_ops, &depth); return; } while (depth-- && (stackaddr != NULL)) stackaddr = user_backtrace(stackaddr, regs); }
gpl-2.0
securecrt/linux-leo
drivers/staging/comedi/drivers/das6402.c
559
8559
/* Some comments on the code.. - it shouldn't be necessary to use outb_p(). - ignoreirq creates a race condition. It needs to be fixed. */ /* comedi/drivers/das6402.c An experimental driver for Computerboards' DAS6402 I/O card Copyright (C) 1999 Oystein Svendsen <svendsen@pvv.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: das6402 Description: Keithley Metrabyte DAS6402 (& compatibles) Author: Oystein Svendsen <svendsen@pvv.org> Status: bitrotten Devices: [Keithley Metrabyte] DAS6402 (das6402) This driver has suffered bitrot. */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #define DAS6402_SIZE 16 #define N_WORDS 3000*64 #define STOP 0 #define START 1 #define SCANL 0x3f00 #define BYTE unsigned char #define WORD unsigned short /*----- register 8 ----*/ #define CLRINT 0x01 #define CLRXTR 0x02 #define CLRXIN 0x04 #define EXTEND 0x10 #define ARMED 0x20 /* enable conting of post sample conv */ #define POSTMODE 0x40 #define MHZ 0x80 /* 10 MHz clock */ /*---------------------*/ /*----- register 9 ----*/ #define IRQ (0x04 << 4) /* these two are */ #define IRQV 10 /* dependent on each other */ #define CONVSRC 0x03 /* trig src is Intarnal pacer */ #define BURSTEN 0x04 /* enable burst */ #define XINTE 0x08 /* use external int. trig */ #define INTE 0x80 /* enable analog interrupts */ /*---------------------*/ /*----- register 10 ---*/ #define TGEN 0x01 /* Use pin DI1 for externl trigging? */ #define TGSEL 0x02 /* Use edge triggering */ #define TGPOL 0x04 /* active edge is falling */ #define PRETRIG 0x08 /* pretrig */ /*---------------------*/ /*----- register 11 ---*/ #define EOB 0x0c #define FIFOHFULL 0x08 #define GAIN 0x01 #define FIFONEPTY 0x04 #define MODE 0x10 #define SEM 0x20 #define BIP 0x40 /*---------------------*/ #define M0 0x00 #define M2 0x04 #define C0 0x00 #define C1 0x40 #define C2 0x80 #define RWLH 0x30 static int das6402_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int das6402_detach(struct comedi_device *dev); static struct comedi_driver driver_das6402 = { .driver_name = "das6402", .module = THIS_MODULE, .attach = das6402_attach, .detach = das6402_detach, }; COMEDI_INITCLEANUP(driver_das6402); struct das6402_private { int ai_bytes_to_read; int das6402_ignoreirq; }; #define devpriv ((struct das6402_private *)dev->private) static void das6402_ai_fifo_dregs(struct comedi_device *dev, struct comedi_subdevice *s); static void das6402_setcounter(struct comedi_device *dev) { BYTE p; unsigned short ctrlwrd; /* set up counter0 first, mode 0 */ p = M0 | C0 | RWLH; outb_p(p, dev->iobase + 15); ctrlwrd = 2000; p = (BYTE) (0xff & ctrlwrd); outb_p(p, dev->iobase + 12); p = (BYTE) (0xff & (ctrlwrd >> 8)); outb_p(p, dev->iobase + 12); /* set up counter1, mode 2 */ p = M2 | C1 | RWLH; outb_p(p, dev->iobase + 15); ctrlwrd = 10; p = (BYTE) (0xff & ctrlwrd); outb_p(p, dev->iobase + 13); p = (BYTE) (0xff & (ctrlwrd >> 8)); outb_p(p, dev->iobase + 13); /* set up counter1, mode 2 */ p = M2 | C2 | RWLH; outb_p(p, dev->iobase + 15); ctrlwrd = 1000; p = (BYTE) (0xff & ctrlwrd); outb_p(p, dev->iobase + 14); p = (BYTE) (0xff & (ctrlwrd >> 8)); outb_p(p, dev->iobase + 14); } static irqreturn_t intr_handler(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices; if (!dev->attached || devpriv->das6402_ignoreirq) { printk("das6402: BUG: spurious interrupt\n"); return IRQ_HANDLED; } #ifdef DEBUG printk("das6402: interrupt! das6402_irqcount=%i\n", devpriv->das6402_irqcount); printk("das6402: iobase+2=%i\n", inw_p(dev->iobase + 2)); #endif das6402_ai_fifo_dregs(dev, s); if (s->async->buf_write_count >= devpriv->ai_bytes_to_read) { outw_p(SCANL, dev->iobase + 2); /* clears the fifo */ outb(0x07, dev->iobase + 8); /* clears all flip-flops */ #ifdef DEBUG printk("das6402: Got %i samples\n\n", devpriv->das6402_wordsread - diff); #endif s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); } outb(0x01, dev->iobase + 8); /* clear only the interrupt flip-flop */ comedi_event(dev, s); return IRQ_HANDLED; } #if 0 static void das6402_ai_fifo_read(struct comedi_device *dev, short *data, int n) { int i; for (i = 0; i < n; i++) data[i] = inw(dev->iobase); } #endif static void das6402_ai_fifo_dregs(struct comedi_device *dev, struct comedi_subdevice *s) { while (1) { if (!(inb(dev->iobase + 8) & 0x01)) return; comedi_buf_put(s->async, inw(dev->iobase)); } } static int das6402_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { /* * This function should reset the board from whatever condition it * is in (i.e., acquiring data), to a non-active state. */ devpriv->das6402_ignoreirq = 1; #ifdef DEBUG printk("das6402: Stopping acquisition\n"); #endif devpriv->das6402_ignoreirq = 1; outb_p(0x02, dev->iobase + 10); /* disable external trigging */ outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */ outb_p(0, dev->iobase + 9); /* disables interrupts */ outw_p(SCANL, dev->iobase + 2); return 0; } #ifdef unused static int das6402_ai_mode2(struct comedi_device *dev, struct comedi_subdevice *s, comedi_trig * it) { devpriv->das6402_ignoreirq = 1; #ifdef DEBUG printk("das6402: Starting acquisition\n"); #endif outb_p(0x03, dev->iobase + 10); /* enable external trigging */ outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */ outb_p(IRQ | CONVSRC | BURSTEN | INTE, dev->iobase + 9); devpriv->ai_bytes_to_read = it->n * sizeof(short); /* um... ignoreirq is a nasty race condition */ devpriv->das6402_ignoreirq = 0; outw_p(SCANL, dev->iobase + 2); return 0; } #endif static int board_init(struct comedi_device *dev) { BYTE b; devpriv->das6402_ignoreirq = 1; outb(0x07, dev->iobase + 8); /* register 11 */ outb_p(MODE, dev->iobase + 11); b = BIP | SEM | MODE | GAIN | FIFOHFULL; outb_p(b, dev->iobase + 11); /* register 8 */ outb_p(EXTEND, dev->iobase + 8); b = EXTEND | MHZ; outb_p(b, dev->iobase + 8); b = MHZ | CLRINT | CLRXTR | CLRXIN; outb_p(b, dev->iobase + 8); /* register 9 */ b = IRQ | CONVSRC | BURSTEN | INTE; outb_p(b, dev->iobase + 9); /* register 10 */ b = TGSEL | TGEN; outb_p(b, dev->iobase + 10); b = 0x07; outb_p(b, dev->iobase + 8); das6402_setcounter(dev); outw_p(SCANL, dev->iobase + 2); /* reset card fifo */ devpriv->das6402_ignoreirq = 0; return 0; } static int das6402_detach(struct comedi_device *dev) { if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) release_region(dev->iobase, DAS6402_SIZE); return 0; } static int das6402_attach(struct comedi_device *dev, struct comedi_devconfig *it) { unsigned int irq; unsigned long iobase; int ret; struct comedi_subdevice *s; dev->board_name = "das6402"; iobase = it->options[0]; if (iobase == 0) iobase = 0x300; printk("comedi%d: das6402: 0x%04lx", dev->minor, iobase); if (!request_region(iobase, DAS6402_SIZE, "das6402")) { printk(" I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* should do a probe here */ irq = it->options[0]; printk(" ( irq = %u )", irq); ret = request_irq(irq, intr_handler, 0, "das6402", dev); if (ret < 0) { printk("irq conflict\n"); return ret; } dev->irq = irq; ret = alloc_private(dev, sizeof(struct das6402_private)); if (ret < 0) return ret; ret = alloc_subdevices(dev, 1); if (ret < 0) return ret; /* ai subdevice */ s = dev->subdevices + 0; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = 8; /* s->trig[2]=das6402_ai_mode2; */ s->cancel = das6402_ai_cancel; s->maxdata = (1 << 12) - 1; s->len_chanlist = 16; /* ? */ s->range_table = &range_unknown; board_init(dev); return 0; }
gpl-2.0
arjen75/LG-Optimus-Chic-Kernel-2.6.32.59-
drivers/staging/comedi/drivers/amplc_pc236.c
559
17185
/* comedi/drivers/amplc_pc236.c Driver for Amplicon PC36AT and PCI236 DIO boards. Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: amplc_pc236 Description: Amplicon PC36AT, PCI236 Author: Ian Abbott <abbotti@mev.co.uk> Devices: [Amplicon] PC36AT (pc36at), PCI236 (pci236 or amplc_pc236) Updated: Wed, 01 Apr 2009 15:41:25 +0100 Status: works Configuration options - PC36AT: [0] - I/O port base address [1] - IRQ (optional) Configuration options - PCI236: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first available PCI device will be used. The PC36AT ISA board and PCI236 PCI board have a single 8255 appearing as subdevice 0. Subdevice 1 pretends to be a digital input device, but it always returns 0 when read. However, if you run a command with scan_begin_src=TRIG_EXT, a rising edge on port C bit 3 acts as an external trigger, which can be used to wake up tasks. This is like the comedi_parport device, but the only way to physically disable the interrupt on the PC36AT is to remove the IRQ jumper. If no interrupt is connected, then subdevice 1 is unused. */ #include <linux/interrupt.h> #include "../comedidev.h" #include "comedi_pci.h" #include "8255.h" #include "plx9052.h" #define PC236_DRIVER_NAME "amplc_pc236" /* PCI236 PCI configuration register information */ #define PCI_VENDOR_ID_AMPLICON 0x14dc #define PCI_DEVICE_ID_AMPLICON_PCI236 0x0009 #define PCI_DEVICE_ID_INVALID 0xffff /* PC36AT / PCI236 registers */ #define PC236_IO_SIZE 4 #define PC236_LCR_IO_SIZE 128 /* * INTCSR values for PCI236. */ /* Disable interrupt, also clear any interrupt there */ #define PCI236_INTR_DISABLE (PLX9052_INTCSR_LI1ENAB_DISABLED \ | PLX9052_INTCSR_LI1POL_HIGH \ | PLX9052_INTCSR_LI2POL_HIGH \ | PLX9052_INTCSR_PCIENAB_DISABLED \ | PLX9052_INTCSR_LI1SEL_EDGE \ | PLX9052_INTCSR_LI1CLRINT_ASSERTED) /* Enable interrupt, also clear any interrupt there. */ #define PCI236_INTR_ENABLE (PLX9052_INTCSR_LI1ENAB_ENABLED \ | PLX9052_INTCSR_LI1POL_HIGH \ | PLX9052_INTCSR_LI2POL_HIGH \ | PLX9052_INTCSR_PCIENAB_ENABLED \ | PLX9052_INTCSR_LI1SEL_EDGE \ | PLX9052_INTCSR_LI1CLRINT_ASSERTED) /* * Board descriptions for Amplicon PC36AT and PCI236. */ enum pc236_bustype { isa_bustype, pci_bustype }; enum pc236_model { pc36at_model, pci236_model, anypci_model }; struct pc236_board { const char *name; const char *fancy_name; unsigned short devid; enum pc236_bustype bustype; enum pc236_model model; }; static const struct pc236_board pc236_boards[] = { { .name = "pc36at", .fancy_name = "PC36AT", .bustype = isa_bustype, .model = pc36at_model, }, #ifdef CONFIG_COMEDI_PCI { .name = "pci236", .fancy_name = "PCI236", .devid = PCI_DEVICE_ID_AMPLICON_PCI236, .bustype = pci_bustype, .model = pci236_model, }, #endif #ifdef CONFIG_COMEDI_PCI { .name = PC236_DRIVER_NAME, .fancy_name = PC236_DRIVER_NAME, .devid = PCI_DEVICE_ID_INVALID, .bustype = pci_bustype, .model = anypci_model, /* wildcard */ }, #endif }; #ifdef CONFIG_COMEDI_PCI static DEFINE_PCI_DEVICE_TABLE(pc236_pci_table) = { { PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, pc236_pci_table); #endif /* CONFIG_COMEDI_PCI */ /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct pc236_board *)dev->board_ptr) /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct pc236_private { #ifdef CONFIG_COMEDI_PCI /* PCI device */ struct pci_dev *pci_dev; unsigned long lcr_iobase; /* PLX PCI9052 config registers in PCIBAR1 */ #endif int enable_irq; }; #define devpriv ((struct pc236_private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int pc236_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int pc236_detach(struct comedi_device *dev); static struct comedi_driver driver_amplc_pc236 = { .driver_name = PC236_DRIVER_NAME, .module = THIS_MODULE, .attach = pc236_attach, .detach = pc236_detach, .board_name = &pc236_boards[0].name, .offset = sizeof(struct pc236_board), .num_names = ARRAY_SIZE(pc236_boards), }; #ifdef CONFIG_COMEDI_PCI COMEDI_PCI_INITCLEANUP(driver_amplc_pc236, pc236_pci_table); #else COMEDI_INITCLEANUP(driver_amplc_pc236); #endif static int pc236_request_region(unsigned minor, unsigned long from, unsigned long extent); static void pc236_intr_disable(struct comedi_device *dev); static void pc236_intr_enable(struct comedi_device *dev); static int pc236_intr_check(struct comedi_device *dev); static int pc236_intr_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int pc236_intr_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int pc236_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int pc236_intr_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static irqreturn_t pc236_interrupt(int irq, void *d); /* * This function looks for a PCI device matching the requested board name, * bus and slot. */ #ifdef CONFIG_COMEDI_PCI static int pc236_find_pci(struct comedi_device *dev, int bus, int slot, struct pci_dev **pci_dev_p) { struct pci_dev *pci_dev = NULL; *pci_dev_p = NULL; /* Look for matching PCI device. */ for (pci_dev = pci_get_device(PCI_VENDOR_ID_AMPLICON, PCI_ANY_ID, NULL); pci_dev != NULL; pci_dev = pci_get_device(PCI_VENDOR_ID_AMPLICON, PCI_ANY_ID, pci_dev)) { /* If bus/slot specified, check them. */ if (bus || slot) { if (bus != pci_dev->bus->number || slot != PCI_SLOT(pci_dev->devfn)) continue; } if (thisboard->model == anypci_model) { /* Match any supported model. */ int i; for (i = 0; i < ARRAY_SIZE(pc236_boards); i++) { if (pc236_boards[i].bustype != pci_bustype) continue; if (pci_dev->device == pc236_boards[i].devid) { /* Change board_ptr to matched board. */ dev->board_ptr = &pc236_boards[i]; break; } } if (i == ARRAY_SIZE(pc236_boards)) continue; } else { /* Match specific model name. */ if (pci_dev->device != thisboard->devid) continue; } /* Found a match. */ *pci_dev_p = pci_dev; return 0; } /* No match found. */ if (bus || slot) { printk(KERN_ERR "comedi%d: error! no %s found at pci %02x:%02x!\n", dev->minor, thisboard->name, bus, slot); } else { printk(KERN_ERR "comedi%d: error! no %s found!\n", dev->minor, thisboard->name); } return -EIO; } #endif /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int pc236_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase = 0; unsigned int irq = 0; #ifdef CONFIG_COMEDI_PCI struct pci_dev *pci_dev = NULL; int bus = 0, slot = 0; #endif int share_irq = 0; int ret; printk(KERN_DEBUG "comedi%d: %s: attach\n", dev->minor, PC236_DRIVER_NAME); /* * Allocate the private structure area. alloc_private() is a * convenient macro defined in comedidev.h. */ ret = alloc_private(dev, sizeof(struct pc236_private)); if (ret < 0) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return ret; } /* Process options. */ switch (thisboard->bustype) { case isa_bustype: iobase = it->options[0]; irq = it->options[1]; share_irq = 0; break; #ifdef CONFIG_COMEDI_PCI case pci_bustype: bus = it->options[0]; slot = it->options[1]; share_irq = 1; ret = pc236_find_pci(dev, bus, slot, &pci_dev); if (ret < 0) return ret; devpriv->pci_dev = pci_dev; break; #endif /* CONFIG_COMEDI_PCI */ default: printk(KERN_ERR "comedi%d: %s: BUG! cannot determine board type!\n", dev->minor, PC236_DRIVER_NAME); return -EINVAL; break; } /* * Initialize dev->board_name. */ dev->board_name = thisboard->name; /* Enable device and reserve I/O spaces. */ #ifdef CONFIG_COMEDI_PCI if (pci_dev) { ret = comedi_pci_enable(pci_dev, PC236_DRIVER_NAME); if (ret < 0) { printk(KERN_ERR "comedi%d: error! cannot enable PCI device and request regions!\n", dev->minor); return ret; } devpriv->lcr_iobase = pci_resource_start(pci_dev, 1); iobase = pci_resource_start(pci_dev, 2); irq = pci_dev->irq; } else #endif { ret = pc236_request_region(dev->minor, iobase, PC236_IO_SIZE); if (ret < 0) { return ret; } } dev->iobase = iobase; /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ ret = alloc_subdevices(dev, 2); if (ret < 0) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return ret; } s = dev->subdevices + 0; /* digital i/o subdevice (8255) */ ret = subdev_8255_init(dev, s, NULL, iobase); if (ret < 0) { printk(KERN_ERR "comedi%d: error! out of memory!\n", dev->minor); return ret; } s = dev->subdevices + 1; dev->read_subdev = s; s->type = COMEDI_SUBD_UNUSED; pc236_intr_disable(dev); if (irq) { unsigned long flags = share_irq ? IRQF_SHARED : 0; if (request_irq(irq, pc236_interrupt, flags, PC236_DRIVER_NAME, dev) >= 0) { dev->irq = irq; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_CMD_READ; s->n_chan = 1; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = pc236_intr_insn; s->do_cmdtest = pc236_intr_cmdtest; s->do_cmd = pc236_intr_cmd; s->cancel = pc236_intr_cancel; } } printk(KERN_INFO "comedi%d: %s ", dev->minor, dev->board_name); if (thisboard->bustype == isa_bustype) { printk("(base %#lx) ", iobase); } else { #ifdef CONFIG_COMEDI_PCI printk("(pci %s) ", pci_name(pci_dev)); #endif } if (irq) { printk("(irq %u%s) ", irq, (dev->irq ? "" : " UNAVAILABLE")); } else { printk("(no irq) "); } printk("attached\n"); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int pc236_detach(struct comedi_device *dev) { printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor, PC236_DRIVER_NAME); if (devpriv) { pc236_intr_disable(dev); } if (dev->irq) free_irq(dev->irq, dev); if (dev->subdevices) { subdev_8255_cleanup(dev, dev->subdevices + 0); } if (devpriv) { #ifdef CONFIG_COMEDI_PCI if (devpriv->pci_dev) { if (dev->iobase) { comedi_pci_disable(devpriv->pci_dev); } pci_dev_put(devpriv->pci_dev); } else #endif { if (dev->iobase) { release_region(dev->iobase, PC236_IO_SIZE); } } } if (dev->board_name) { printk(KERN_INFO "comedi%d: %s removed\n", dev->minor, dev->board_name); } return 0; } /* * This function checks and requests an I/O region, reporting an error * if there is a conflict. */ static int pc236_request_region(unsigned minor, unsigned long from, unsigned long extent) { if (!from || !request_region(from, extent, PC236_DRIVER_NAME)) { printk(KERN_ERR "comedi%d: I/O port conflict (%#lx,%lu)!\n", minor, from, extent); return -EIO; } return 0; } /* * This function is called to mark the interrupt as disabled (no command * configured on subdevice 1) and to physically disable the interrupt * (not possible on the PC36AT, except by removing the IRQ jumper!). */ static void pc236_intr_disable(struct comedi_device *dev) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); devpriv->enable_irq = 0; #ifdef CONFIG_COMEDI_PCI if (devpriv->lcr_iobase) outl(PCI236_INTR_DISABLE, devpriv->lcr_iobase + PLX9052_INTCSR); #endif spin_unlock_irqrestore(&dev->spinlock, flags); } /* * This function is called to mark the interrupt as enabled (a command * configured on subdevice 1) and to physically enable the interrupt * (not possible on the PC36AT, except by (re)connecting the IRQ jumper!). */ static void pc236_intr_enable(struct comedi_device *dev) { unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); devpriv->enable_irq = 1; #ifdef CONFIG_COMEDI_PCI if (devpriv->lcr_iobase) outl(PCI236_INTR_ENABLE, devpriv->lcr_iobase + PLX9052_INTCSR); #endif spin_unlock_irqrestore(&dev->spinlock, flags); } /* * This function is called when an interrupt occurs to check whether * the interrupt has been marked as enabled and was generated by the * board. If so, the function prepares the hardware for the next * interrupt. * Returns 0 if the interrupt should be ignored. */ static int pc236_intr_check(struct comedi_device *dev) { int retval = 0; unsigned long flags; spin_lock_irqsave(&dev->spinlock, flags); if (devpriv->enable_irq) { retval = 1; #ifdef CONFIG_COMEDI_PCI if (devpriv->lcr_iobase) { if ((inl(devpriv->lcr_iobase + PLX9052_INTCSR) & PLX9052_INTCSR_LI1STAT_MASK) == PLX9052_INTCSR_LI1STAT_INACTIVE) { retval = 0; } else { /* Clear interrupt and keep it enabled. */ outl(PCI236_INTR_ENABLE, devpriv->lcr_iobase + PLX9052_INTCSR); } } #endif } spin_unlock_irqrestore(&dev->spinlock, flags); return retval; } /* * Input from subdevice 1. * Copied from the comedi_parport driver. */ static int pc236_intr_insn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = 0; return 2; } /* * Subdevice 1 command test. * Copied from the comedi_parport driver. */ static int pc236_intr_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* step 1 */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_FOLLOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: ignored */ if (err) return 2; /* step 3: */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_arg != 0) { cmd->scan_begin_arg = 0; err++; } if (cmd->convert_arg != 0) { cmd->convert_arg = 0; err++; } if (cmd->scan_end_arg != 1) { cmd->scan_end_arg = 1; err++; } if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } if (err) return 3; /* step 4: ignored */ if (err) return 4; return 0; } /* * Subdevice 1 command. */ static int pc236_intr_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { pc236_intr_enable(dev); return 0; } /* * Subdevice 1 cancel command. */ static int pc236_intr_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { pc236_intr_disable(dev); return 0; } /* * Interrupt service routine. * Based on the comedi_parport driver. */ static irqreturn_t pc236_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct comedi_subdevice *s = dev->subdevices + 1; int handled; handled = pc236_intr_check(dev); if (dev->attached && handled) { comedi_buf_put(s->async, 0); s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS; comedi_event(dev, s); } return IRQ_RETVAL(handled); }
gpl-2.0
ParanoidAndroid/android_kernel_grouper
net/tipc/name_table.c
559
25514
/* * net/tipc/name_table.c: TIPC name table code * * Copyright (c) 2000-2006, Ericsson AB * Copyright (c) 2004-2008, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "config.h" #include "name_table.h" #include "name_distr.h" #include "subscr.h" #include "port.h" static int tipc_nametbl_size = 1024; /* must be a power of 2 */ /** * struct name_info - name sequence publication info * @node_list: circular list of publications made by own node * @cluster_list: circular list of publications made by own cluster * @zone_list: circular list of publications made by own zone * @node_list_size: number of entries in "node_list" * @cluster_list_size: number of entries in "cluster_list" * @zone_list_size: number of entries in "zone_list" * * Note: The zone list always contains at least one entry, since all * publications of the associated name sequence belong to it. * (The cluster and node lists may be empty.) */ struct name_info { struct list_head node_list; struct list_head cluster_list; struct list_head zone_list; u32 node_list_size; u32 cluster_list_size; u32 zone_list_size; }; /** * struct sub_seq - container for all published instances of a name sequence * @lower: name sequence lower bound * @upper: name sequence upper bound * @info: pointer to name sequence publication info */ struct sub_seq { u32 lower; u32 upper; struct name_info *info; }; /** * struct name_seq - container for all published instances of a name type * @type: 32 bit 'type' value for name sequence * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type'; * sub-sequences are sorted in ascending order * @alloc: number of sub-sequences currently in array * @first_free: array index of first unused sub-sequence entry * @ns_list: links to adjacent name sequences in hash chain * @subscriptions: list of subscriptions for this 'type' * @lock: spinlock controlling access to publication lists of all sub-sequences */ struct name_seq { u32 type; struct sub_seq *sseqs; u32 alloc; u32 first_free; struct hlist_node ns_list; struct list_head subscriptions; spinlock_t lock; }; /** * struct name_table - table containing all existing port name publications * @types: pointer to fixed-sized array of name sequence lists, * accessed via hashing on 'type'; name sequence lists are *not* sorted * @local_publ_count: number of publications issued by this node */ struct name_table { struct hlist_head *types; u32 local_publ_count; }; static struct name_table table; static atomic_t rsv_publ_ok = ATOMIC_INIT(0); DEFINE_RWLOCK(tipc_nametbl_lock); static int hash(int x) { return x & (tipc_nametbl_size - 1); } /** * publ_create - create a publication structure */ static struct publication *publ_create(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port_ref, u32 key) { struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC); if (publ == NULL) { warn("Publication creation failure, no memory\n"); return NULL; } publ->type = type; publ->lower = lower; publ->upper = upper; publ->scope = scope; publ->node = node; publ->ref = port_ref; publ->key = key; INIT_LIST_HEAD(&publ->local_list); INIT_LIST_HEAD(&publ->pport_list); INIT_LIST_HEAD(&publ->subscr.nodesub_list); return publ; } /** * tipc_subseq_alloc - allocate a specified number of sub-sequence structures */ static struct sub_seq *tipc_subseq_alloc(u32 cnt) { struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); return sseq; } /** * tipc_nameseq_create - create a name sequence structure for the specified 'type' * * Allocates a single sub-sequence structure and sets it to all 0's. */ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) { struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); struct sub_seq *sseq = tipc_subseq_alloc(1); if (!nseq || !sseq) { warn("Name sequence creation failed, no memory\n"); kfree(nseq); kfree(sseq); return NULL; } spin_lock_init(&nseq->lock); nseq->type = type; nseq->sseqs = sseq; nseq->alloc = 1; INIT_HLIST_NODE(&nseq->ns_list); INIT_LIST_HEAD(&nseq->subscriptions); hlist_add_head(&nseq->ns_list, seq_head); return nseq; } /** * nameseq_find_subseq - find sub-sequence (if any) matching a name instance * * Very time-critical, so binary searches through sub-sequence array. */ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, u32 instance) { struct sub_seq *sseqs = nseq->sseqs; int low = 0; int high = nseq->first_free - 1; int mid; while (low <= high) { mid = (low + high) / 2; if (instance < sseqs[mid].lower) high = mid - 1; else if (instance > sseqs[mid].upper) low = mid + 1; else return &sseqs[mid]; } return NULL; } /** * nameseq_locate_subseq - determine position of name instance in sub-sequence * * Returns index in sub-sequence array of the entry that contains the specified * instance value; if no entry contains that value, returns the position * where a new entry for it would be inserted in the array. * * Note: Similar to binary search code for locating a sub-sequence. */ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) { struct sub_seq *sseqs = nseq->sseqs; int low = 0; int high = nseq->first_free - 1; int mid; while (low <= high) { mid = (low + high) / 2; if (instance < sseqs[mid].lower) high = mid - 1; else if (instance > sseqs[mid].upper) low = mid + 1; else return mid; } return low; } /** * tipc_nameseq_insert_publ - */ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) { struct subscription *s; struct subscription *st; struct publication *publ; struct sub_seq *sseq; struct name_info *info; int created_subseq = 0; sseq = nameseq_find_subseq(nseq, lower); if (sseq) { /* Lower end overlaps existing entry => need an exact match */ if ((sseq->lower != lower) || (sseq->upper != upper)) { warn("Cannot publish {%u,%u,%u}, overlap error\n", type, lower, upper); return NULL; } info = sseq->info; } else { u32 inspos; struct sub_seq *freesseq; /* Find where lower end should be inserted */ inspos = nameseq_locate_subseq(nseq, lower); /* Fail if upper end overlaps into an existing entry */ if ((inspos < nseq->first_free) && (upper >= nseq->sseqs[inspos].lower)) { warn("Cannot publish {%u,%u,%u}, overlap error\n", type, lower, upper); return NULL; } /* Ensure there is space for new sub-sequence */ if (nseq->first_free == nseq->alloc) { struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2); if (!sseqs) { warn("Cannot publish {%u,%u,%u}, no memory\n", type, lower, upper); return NULL; } memcpy(sseqs, nseq->sseqs, nseq->alloc * sizeof(struct sub_seq)); kfree(nseq->sseqs); nseq->sseqs = sseqs; nseq->alloc *= 2; } info = kzalloc(sizeof(*info), GFP_ATOMIC); if (!info) { warn("Cannot publish {%u,%u,%u}, no memory\n", type, lower, upper); return NULL; } INIT_LIST_HEAD(&info->node_list); INIT_LIST_HEAD(&info->cluster_list); INIT_LIST_HEAD(&info->zone_list); /* Insert new sub-sequence */ sseq = &nseq->sseqs[inspos]; freesseq = &nseq->sseqs[nseq->first_free]; memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq)); memset(sseq, 0, sizeof(*sseq)); nseq->first_free++; sseq->lower = lower; sseq->upper = upper; sseq->info = info; created_subseq = 1; } /* Insert a publication: */ publ = publ_create(type, lower, upper, scope, node, port, key); if (!publ) return NULL; list_add(&publ->zone_list, &info->zone_list); info->zone_list_size++; if (in_own_cluster(node)) { list_add(&publ->cluster_list, &info->cluster_list); info->cluster_list_size++; } if (node == tipc_own_addr) { list_add(&publ->node_list, &info->node_list); info->node_list_size++; } /* * Any subscriptions waiting for notification? */ list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscr_report_overlap(s, publ->lower, publ->upper, TIPC_PUBLISHED, publ->ref, publ->node, created_subseq); } return publ; } /** * tipc_nameseq_remove_publ - * * NOTE: There may be cases where TIPC is asked to remove a publication * that is not in the name table. For example, if another node issues a * publication for a name sequence that overlaps an existing name sequence * the publication will not be recorded, which means the publication won't * be found when the name sequence is later withdrawn by that node. * A failed withdraw request simply returns a failure indication and lets the * caller issue any error or warning messages associated with such a problem. */ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, u32 node, u32 ref, u32 key) { struct publication *publ; struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); struct name_info *info; struct sub_seq *free; struct subscription *s, *st; int removed_subseq = 0; if (!sseq) return NULL; info = sseq->info; /* Locate publication, if it exists */ list_for_each_entry(publ, &info->zone_list, zone_list) { if ((publ->key == key) && (publ->ref == ref) && (!publ->node || (publ->node == node))) goto found; } return NULL; found: /* Remove publication from zone scope list */ list_del(&publ->zone_list); info->zone_list_size--; /* Remove publication from cluster scope list, if present */ if (in_own_cluster(node)) { list_del(&publ->cluster_list); info->cluster_list_size--; } /* Remove publication from node scope list, if present */ if (node == tipc_own_addr) { list_del(&publ->node_list); info->node_list_size--; } /* Contract subseq list if no more publications for that subseq */ if (list_empty(&info->zone_list)) { kfree(info); free = &nseq->sseqs[nseq->first_free--]; memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq)); removed_subseq = 1; } /* Notify any waiting subscriptions */ list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscr_report_overlap(s, publ->lower, publ->upper, TIPC_WITHDRAWN, publ->ref, publ->node, removed_subseq); } return publ; } /** * tipc_nameseq_subscribe: attach a subscription, and issue * the prescribed number of events if there is any sub- * sequence overlapping with the requested sequence */ static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s) { struct sub_seq *sseq = nseq->sseqs; list_add(&s->nameseq_list, &nseq->subscriptions); if (!sseq) return; while (sseq != &nseq->sseqs[nseq->first_free]) { if (tipc_subscr_overlap(s, sseq->lower, sseq->upper)) { struct publication *crs; struct name_info *info = sseq->info; int must_report = 1; list_for_each_entry(crs, &info->zone_list, zone_list) { tipc_subscr_report_overlap(s, sseq->lower, sseq->upper, TIPC_PUBLISHED, crs->ref, crs->node, must_report); must_report = 0; } } sseq++; } } static struct name_seq *nametbl_find_seq(u32 type) { struct hlist_head *seq_head; struct hlist_node *seq_node; struct name_seq *ns; seq_head = &table.types[hash(type)]; hlist_for_each_entry(ns, seq_node, seq_head, ns_list) { if (ns->type == type) return ns; } return NULL; }; struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) { struct name_seq *seq = nametbl_find_seq(type); if (lower > upper) { warn("Failed to publish illegal {%u,%u,%u}\n", type, lower, upper); return NULL; } if (!seq) seq = tipc_nameseq_create(type, &table.types[hash(type)]); if (!seq) return NULL; return tipc_nameseq_insert_publ(seq, type, lower, upper, scope, node, port, key); } struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node, u32 ref, u32 key) { struct publication *publ; struct name_seq *seq = nametbl_find_seq(type); if (!seq) return NULL; publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); if (!seq->first_free && list_empty(&seq->subscriptions)) { hlist_del_init(&seq->ns_list); kfree(seq->sseqs); kfree(seq); } return publ; } /* * tipc_nametbl_translate - translate name to port id * * Note: on entry 'destnode' is the search domain used during translation; * on exit it passes back the node address of the matching port (if any) */ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) { struct sub_seq *sseq; struct name_info *info; struct publication *publ; struct name_seq *seq; u32 ref = 0; if (!tipc_in_scope(*destnode, tipc_own_addr)) return 0; read_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(type); if (unlikely(!seq)) goto not_found; sseq = nameseq_find_subseq(seq, instance); if (unlikely(!sseq)) goto not_found; spin_lock_bh(&seq->lock); info = sseq->info; /* Closest-First Algorithm: */ if (likely(!*destnode)) { if (!list_empty(&info->node_list)) { publ = list_first_entry(&info->node_list, struct publication, node_list); list_move_tail(&publ->node_list, &info->node_list); } else if (!list_empty(&info->cluster_list)) { publ = list_first_entry(&info->cluster_list, struct publication, cluster_list); list_move_tail(&publ->cluster_list, &info->cluster_list); } else { publ = list_first_entry(&info->zone_list, struct publication, zone_list); list_move_tail(&publ->zone_list, &info->zone_list); } } /* Round-Robin Algorithm: */ else if (*destnode == tipc_own_addr) { if (list_empty(&info->node_list)) goto no_match; publ = list_first_entry(&info->node_list, struct publication, node_list); list_move_tail(&publ->node_list, &info->node_list); } else if (in_own_cluster(*destnode)) { if (list_empty(&info->cluster_list)) goto no_match; publ = list_first_entry(&info->cluster_list, struct publication, cluster_list); list_move_tail(&publ->cluster_list, &info->cluster_list); } else { publ = list_first_entry(&info->zone_list, struct publication, zone_list); list_move_tail(&publ->zone_list, &info->zone_list); } ref = publ->ref; *destnode = publ->node; no_match: spin_unlock_bh(&seq->lock); not_found: read_unlock_bh(&tipc_nametbl_lock); return ref; } /** * tipc_nametbl_mc_translate - find multicast destinations * * Creates list of all local ports that overlap the given multicast address; * also determines if any off-node ports overlap. * * Note: Publications with a scope narrower than 'limit' are ignored. * (i.e. local node-scope publications mustn't receive messages arriving * from another node, even if the multcast link brought it here) * * Returns non-zero if any off-node ports overlap */ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, struct port_list *dports) { struct name_seq *seq; struct sub_seq *sseq; struct sub_seq *sseq_stop; struct name_info *info; int res = 0; read_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(type); if (!seq) goto exit; spin_lock_bh(&seq->lock); sseq = seq->sseqs + nameseq_locate_subseq(seq, lower); sseq_stop = seq->sseqs + seq->first_free; for (; sseq != sseq_stop; sseq++) { struct publication *publ; if (sseq->lower > upper) break; info = sseq->info; list_for_each_entry(publ, &info->node_list, node_list) { if (publ->scope <= limit) tipc_port_list_add(dports, publ->ref); } if (info->cluster_list_size != info->node_list_size) res = 1; } spin_unlock_bh(&seq->lock); exit: read_unlock_bh(&tipc_nametbl_lock); return res; } /** * tipc_nametbl_publish_rsv - publish port name using a reserved name type */ int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope, struct tipc_name_seq const *seq) { int res; atomic_inc(&rsv_publ_ok); res = tipc_publish(ref, scope, seq); atomic_dec(&rsv_publ_ok); return res; } /** * tipc_nametbl_publish - add name publication to network name tables */ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, u32 scope, u32 port_ref, u32 key) { struct publication *publ; if (table.local_publ_count >= tipc_max_publications) { warn("Publication failed, local publication limit reached (%u)\n", tipc_max_publications); return NULL; } if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) { warn("Publication failed, reserved name {%u,%u,%u}\n", type, lower, upper); return NULL; } write_lock_bh(&tipc_nametbl_lock); table.local_publ_count++; publ = tipc_nametbl_insert_publ(type, lower, upper, scope, tipc_own_addr, port_ref, key); if (publ && (scope != TIPC_NODE_SCOPE)) tipc_named_publish(publ); write_unlock_bh(&tipc_nametbl_lock); return publ; } /** * tipc_nametbl_withdraw - withdraw name publication from network name tables */ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) { struct publication *publ; write_lock_bh(&tipc_nametbl_lock); publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); if (likely(publ)) { table.local_publ_count--; if (publ->scope != TIPC_NODE_SCOPE) tipc_named_withdraw(publ); write_unlock_bh(&tipc_nametbl_lock); list_del_init(&publ->pport_list); kfree(publ); return 1; } write_unlock_bh(&tipc_nametbl_lock); err("Unable to remove local publication\n" "(type=%u, lower=%u, ref=%u, key=%u)\n", type, lower, ref, key); return 0; } /** * tipc_nametbl_subscribe - add a subscription object to the name table */ void tipc_nametbl_subscribe(struct subscription *s) { u32 type = s->seq.type; struct name_seq *seq; write_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(type); if (!seq) seq = tipc_nameseq_create(type, &table.types[hash(type)]); if (seq) { spin_lock_bh(&seq->lock); tipc_nameseq_subscribe(seq, s); spin_unlock_bh(&seq->lock); } else { warn("Failed to create subscription for {%u,%u,%u}\n", s->seq.type, s->seq.lower, s->seq.upper); } write_unlock_bh(&tipc_nametbl_lock); } /** * tipc_nametbl_unsubscribe - remove a subscription object from name table */ void tipc_nametbl_unsubscribe(struct subscription *s) { struct name_seq *seq; write_lock_bh(&tipc_nametbl_lock); seq = nametbl_find_seq(s->seq.type); if (seq != NULL) { spin_lock_bh(&seq->lock); list_del_init(&s->nameseq_list); spin_unlock_bh(&seq->lock); if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) { hlist_del_init(&seq->ns_list); kfree(seq->sseqs); kfree(seq); } } write_unlock_bh(&tipc_nametbl_lock); } /** * subseq_list: print specified sub-sequence contents into the given buffer */ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, u32 index) { char portIdStr[27]; const char *scope_str[] = {"", " zone", " cluster", " node"}; struct publication *publ; struct name_info *info; tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper); if (depth == 2) { tipc_printf(buf, "\n"); return; } info = sseq->info; list_for_each_entry(publ, &info->zone_list, zone_list) { sprintf(portIdStr, "<%u.%u.%u:%u>", tipc_zone(publ->node), tipc_cluster(publ->node), tipc_node(publ->node), publ->ref); tipc_printf(buf, "%-26s ", portIdStr); if (depth > 3) { tipc_printf(buf, "%-10u %s", publ->key, scope_str[publ->scope]); } if (!list_is_last(&publ->zone_list, &info->zone_list)) tipc_printf(buf, "\n%33s", " "); }; tipc_printf(buf, "\n"); } /** * nameseq_list: print specified name sequence contents into the given buffer */ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, u32 type, u32 lowbound, u32 upbound, u32 index) { struct sub_seq *sseq; char typearea[11]; if (seq->first_free == 0) return; sprintf(typearea, "%-10u", seq->type); if (depth == 1) { tipc_printf(buf, "%s\n", typearea); return; } for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) { if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) { tipc_printf(buf, "%s ", typearea); spin_lock_bh(&seq->lock); subseq_list(sseq, buf, depth, index); spin_unlock_bh(&seq->lock); sprintf(typearea, "%10s", " "); } } } /** * nametbl_header - print name table header into the given buffer */ static void nametbl_header(struct print_buf *buf, u32 depth) { const char *header[] = { "Type ", "Lower Upper ", "Port Identity ", "Publication Scope" }; int i; if (depth > 4) depth = 4; for (i = 0; i < depth; i++) tipc_printf(buf, header[i]); tipc_printf(buf, "\n"); } /** * nametbl_list - print specified name table contents into the given buffer */ static void nametbl_list(struct print_buf *buf, u32 depth_info, u32 type, u32 lowbound, u32 upbound) { struct hlist_head *seq_head; struct hlist_node *seq_node; struct name_seq *seq; int all_types; u32 depth; u32 i; all_types = (depth_info & TIPC_NTQ_ALLTYPES); depth = (depth_info & ~TIPC_NTQ_ALLTYPES); if (depth == 0) return; if (all_types) { /* display all entries in name table to specified depth */ nametbl_header(buf, depth); lowbound = 0; upbound = ~0; for (i = 0; i < tipc_nametbl_size; i++) { seq_head = &table.types[i]; hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { nameseq_list(seq, buf, depth, seq->type, lowbound, upbound, i); } } } else { /* display only the sequence that matches the specified type */ if (upbound < lowbound) { tipc_printf(buf, "invalid name sequence specified\n"); return; } nametbl_header(buf, depth); i = hash(type); seq_head = &table.types[i]; hlist_for_each_entry(seq, seq_node, seq_head, ns_list) { if (seq->type == type) { nameseq_list(seq, buf, depth, type, lowbound, upbound, i); break; } } } } #define MAX_NAME_TBL_QUERY 32768 struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) { struct sk_buff *buf; struct tipc_name_table_query *argv; struct tlv_desc *rep_tlv; struct print_buf b; int str_len; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY)); if (!buf) return NULL; rep_tlv = (struct tlv_desc *)buf->data; tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY); argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area); read_lock_bh(&tipc_nametbl_lock); nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type), ntohl(argv->lowbound), ntohl(argv->upbound)); read_unlock_bh(&tipc_nametbl_lock); str_len = tipc_printbuf_validate(&b); skb_put(buf, TLV_SPACE(str_len)); TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); return buf; } int tipc_nametbl_init(void) { table.types = kcalloc(tipc_nametbl_size, sizeof(struct hlist_head), GFP_ATOMIC); if (!table.types) return -ENOMEM; table.local_publ_count = 0; return 0; } void tipc_nametbl_stop(void) { u32 i; if (!table.types) return; /* Verify name table is empty, then release it */ write_lock_bh(&tipc_nametbl_lock); for (i = 0; i < tipc_nametbl_size; i++) { if (!hlist_empty(&table.types[i])) err("tipc_nametbl_stop(): hash chain %u is non-null\n", i); } kfree(table.types); table.types = NULL; write_unlock_bh(&tipc_nametbl_lock); }
gpl-2.0
ferhung/kernel_mtk
drivers/net/wireless/ath/wil6210/wmi.c
1327
29691
/* * Copyright (c) 2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/etherdevice.h> #include <linux/if_arp.h> #include "wil6210.h" #include "txrx.h" #include "wmi.h" /** * WMI event receiving - theory of operations * * When firmware about to report WMI event, it fills memory area * in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for * the misc IRQ, function @wmi_recv_cmd called by thread IRQ handler. * * @wmi_recv_cmd reads event, allocates memory chunk and attaches it to the * event list @wil->pending_wmi_ev. Then, work queue @wil->wmi_wq wakes up * and handles events within the @wmi_event_worker. Every event get detached * from list, processed and deleted. * * Purpose for this mechanism is to release IRQ thread; otherwise, * if WMI event handling involves another WMI command flow, this 2-nd flow * won't be completed because of blocked IRQ thread. */ /** * Addressing - theory of operations * * There are several buses present on the WIL6210 card. * Same memory areas are visible at different address on * the different busses. There are 3 main bus masters: * - MAC CPU (ucode) * - User CPU (firmware) * - AHB (host) * * On the PCI bus, there is one BAR (BAR0) of 2Mb size, exposing * AHB addresses starting from 0x880000 * * Internally, firmware uses addresses that allows faster access but * are invisible from the host. To read from these addresses, alternative * AHB address must be used. * * Memory mapping * Linker address PCI/Host address * 0x880000 .. 0xa80000 2Mb BAR0 * 0x800000 .. 0x807000 0x900000 .. 0x907000 28k DCCM * 0x840000 .. 0x857000 0x908000 .. 0x91f000 92k PERIPH */ /** * @fw_mapping provides memory remapping table */ static const struct { u32 from; /* linker address - from, inclusive */ u32 to; /* linker address - to, exclusive */ u32 host; /* PCI/Host address - BAR0 + 0x880000 */ } fw_mapping[] = { {0x000000, 0x040000, 0x8c0000}, /* FW code RAM 256k */ {0x800000, 0x808000, 0x900000}, /* FW data RAM 32k */ {0x840000, 0x860000, 0x908000}, /* peripheral data RAM 128k/96k used */ {0x880000, 0x88a000, 0x880000}, /* various RGF */ {0x8c0000, 0x932000, 0x8c0000}, /* trivial mapping for upper area */ /* * 920000..930000 ucode code RAM * 930000..932000 ucode data RAM */ }; /** * return AHB address for given firmware/ucode internal (linker) address * @x - internal address * If address have no valid AHB mapping, return 0 */ static u32 wmi_addr_remap(u32 x) { uint i; for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) { if ((x >= fw_mapping[i].from) && (x < fw_mapping[i].to)) return x + fw_mapping[i].host - fw_mapping[i].from; } return 0; } /** * Check address validity for WMI buffer; remap if needed * @ptr - internal (linker) fw/ucode address * * Valid buffer should be DWORD aligned * * return address for accessing buffer from the host; * if buffer is not valid, return NULL. */ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_) { u32 off; u32 ptr = le32_to_cpu(ptr_); if (ptr % 4) return NULL; ptr = wmi_addr_remap(ptr); if (ptr < WIL6210_FW_HOST_OFF) return NULL; off = HOSTADDR(ptr); if (off > WIL6210_MEM_SIZE - 4) return NULL; return wil->csr + off; } /** * Check address validity */ void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr) { u32 off; if (ptr % 4) return NULL; if (ptr < WIL6210_FW_HOST_OFF) return NULL; off = HOSTADDR(ptr); if (off > WIL6210_MEM_SIZE - 4) return NULL; return wil->csr + off; } int wmi_read_hdr(struct wil6210_priv *wil, __le32 ptr, struct wil6210_mbox_hdr *hdr) { void __iomem *src = wmi_buffer(wil, ptr); if (!src) return -EINVAL; wil_memcpy_fromio_32(hdr, src, sizeof(*hdr)); return 0; } static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) { struct { struct wil6210_mbox_hdr hdr; struct wil6210_mbox_hdr_wmi wmi; } __packed cmd = { .hdr = { .type = WIL_MBOX_HDR_TYPE_WMI, .flags = 0, .len = cpu_to_le16(sizeof(cmd.wmi) + len), }, .wmi = { .id = cpu_to_le16(cmdid), .info1 = 0, }, }; struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx; struct wil6210_mbox_ring_desc d_head; u32 next_head; void __iomem *dst; void __iomem *head = wmi_addr(wil, r->head); uint retry; if (sizeof(cmd) + len > r->entry_size) { wil_err(wil, "WMI size too large: %d bytes, max is %d\n", (int)(sizeof(cmd) + len), r->entry_size); return -ERANGE; } might_sleep(); if (!test_bit(wil_status_fwready, &wil->status)) { wil_err(wil, "FW not ready\n"); return -EAGAIN; } if (!head) { wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head); return -EINVAL; } /* read Tx head till it is not busy */ for (retry = 5; retry > 0; retry--) { wil_memcpy_fromio_32(&d_head, head, sizeof(d_head)); if (d_head.sync == 0) break; msleep(20); } if (d_head.sync != 0) { wil_err(wil, "WMI head busy\n"); return -EBUSY; } /* next head */ next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size); wil_dbg_wmi(wil, "Head 0x%08x -> 0x%08x\n", r->head, next_head); /* wait till FW finish with previous command */ for (retry = 5; retry > 0; retry--) { r->tail = ioread32(wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, tx.tail)); if (next_head != r->tail) break; msleep(20); } if (next_head == r->tail) { wil_err(wil, "WMI ring full\n"); return -EBUSY; } dst = wmi_buffer(wil, d_head.addr); if (!dst) { wil_err(wil, "invalid WMI buffer: 0x%08x\n", le32_to_cpu(d_head.addr)); return -EINVAL; } cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq); /* set command */ wil_dbg_wmi(wil, "WMI command 0x%04x [%d]\n", cmdid, len); wil_hex_dump_wmi("Cmd ", DUMP_PREFIX_OFFSET, 16, 1, &cmd, sizeof(cmd), true); wil_hex_dump_wmi("cmd ", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true); wil_memcpy_toio_32(dst, &cmd, sizeof(cmd)); wil_memcpy_toio_32(dst + sizeof(cmd), buf, len); /* mark entry as full */ iowrite32(1, wil->csr + HOSTADDR(r->head) + offsetof(struct wil6210_mbox_ring_desc, sync)); /* advance next ptr */ iowrite32(r->head = next_head, wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, tx.head)); /* interrupt to FW */ iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT); return 0; } int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len) { int rc; mutex_lock(&wil->wmi_mutex); rc = __wmi_send(wil, cmdid, buf, len); mutex_unlock(&wil->wmi_mutex); return rc; } /*=== Event handlers ===*/ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) { struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; struct wmi_ready_event *evt = d; wil->fw_version = le32_to_cpu(evt->sw_version); wil->n_mids = evt->numof_additional_mids; wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version, evt->mac, wil->n_mids); if (!is_valid_ether_addr(ndev->dev_addr)) { memcpy(ndev->dev_addr, evt->mac, ETH_ALEN); memcpy(ndev->perm_addr, evt->mac, ETH_ALEN); } snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version), "%d", wil->fw_version); } static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d, int len) { wil_dbg_wmi(wil, "WMI: FW ready\n"); set_bit(wil_status_fwready, &wil->status); /* reuse wmi_ready for the firmware ready indication */ complete(&wil->wmi_ready); } static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) { struct wmi_rx_mgmt_packet_event *data = d; struct wiphy *wiphy = wil_to_wiphy(wil); struct ieee80211_mgmt *rx_mgmt_frame = (struct ieee80211_mgmt *)data->payload; int ch_no = data->info.channel+1; u32 freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ); struct ieee80211_channel *channel = ieee80211_get_channel(wiphy, freq); /* TODO convert LE to CPU */ s32 signal = 0; /* TODO */ __le16 fc = rx_mgmt_frame->frame_control; u32 d_len = le32_to_cpu(data->info.len); u16 d_status = le16_to_cpu(data->info.status); wil_dbg_wmi(wil, "MGMT: channel %d MCS %d SNR %d\n", data->info.channel, data->info.mcs, data->info.snr); wil_dbg_wmi(wil, "status 0x%04x len %d stype %04x\n", d_status, d_len, le16_to_cpu(data->info.stype)); wil_dbg_wmi(wil, "qid %d mid %d cid %d\n", data->info.qid, data->info.mid, data->info.cid); if (!channel) { wil_err(wil, "Frame on unsupported channel\n"); return; } if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) { struct cfg80211_bss *bss; bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame, d_len, signal, GFP_KERNEL); if (bss) { wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid); cfg80211_put_bss(wiphy, bss); } else { wil_err(wil, "cfg80211_inform_bss() failed\n"); } } else { cfg80211_rx_mgmt(wil->wdev, freq, signal, (void *)rx_mgmt_frame, d_len, GFP_KERNEL); } } static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, void *d, int len) { if (wil->scan_request) { struct wmi_scan_complete_event *data = d; bool aborted = (data->status != 0); wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); cfg80211_scan_done(wil->scan_request, aborted); wil->scan_request = NULL; } else { wil_err(wil, "SCAN_COMPLETE while not scanning\n"); } } static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) { struct net_device *ndev = wil_to_ndev(wil); struct wireless_dev *wdev = wil->wdev; struct wmi_connect_event *evt = d; int ch; /* channel number */ struct station_info sinfo; u8 *assoc_req_ie, *assoc_resp_ie; size_t assoc_req_ielen, assoc_resp_ielen; /* capinfo(u16) + listen_interval(u16) + IEs */ const size_t assoc_req_ie_offset = sizeof(u16) * 2; /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */ const size_t assoc_resp_ie_offset = sizeof(u16) * 3; if (len < sizeof(*evt)) { wil_err(wil, "Connect event too short : %d bytes\n", len); return; } if (len != sizeof(*evt) + evt->beacon_ie_len + evt->assoc_req_len + evt->assoc_resp_len) { wil_err(wil, "Connect event corrupted : %d != %d + %d + %d + %d\n", len, (int)sizeof(*evt), evt->beacon_ie_len, evt->assoc_req_len, evt->assoc_resp_len); return; } ch = evt->channel + 1; wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n", evt->bssid, ch, evt->cid); wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, evt->assoc_info, len - sizeof(*evt), true); /* figure out IE's */ assoc_req_ie = &evt->assoc_info[evt->beacon_ie_len + assoc_req_ie_offset]; assoc_req_ielen = evt->assoc_req_len - assoc_req_ie_offset; if (evt->assoc_req_len <= assoc_req_ie_offset) { assoc_req_ie = NULL; assoc_req_ielen = 0; } assoc_resp_ie = &evt->assoc_info[evt->beacon_ie_len + evt->assoc_req_len + assoc_resp_ie_offset]; assoc_resp_ielen = evt->assoc_resp_len - assoc_resp_ie_offset; if (evt->assoc_resp_len <= assoc_resp_ie_offset) { assoc_resp_ie = NULL; assoc_resp_ielen = 0; } if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { if (wdev->sme_state != CFG80211_SME_CONNECTING) { wil_err(wil, "Not in connecting state\n"); return; } del_timer_sync(&wil->connect_timer); cfg80211_connect_result(ndev, evt->bssid, assoc_req_ie, assoc_req_ielen, assoc_resp_ie, assoc_resp_ielen, WLAN_STATUS_SUCCESS, GFP_KERNEL); } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { memset(&sinfo, 0, sizeof(sinfo)); sinfo.generation = wil->sinfo_gen++; if (assoc_req_ie) { sinfo.assoc_req_ies = assoc_req_ie; sinfo.assoc_req_ies_len = assoc_req_ielen; sinfo.filled |= STATION_INFO_ASSOC_REQ_IES; } cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); } set_bit(wil_status_fwconnected, &wil->status); /* FIXME FW can transmit only ucast frames to peer */ /* FIXME real ring_id instead of hard coded 0 */ memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN); wil->pending_connect_cid = evt->cid; queue_work(wil->wmi_wq_conn, &wil->connect_worker); } static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, void *d, int len) { struct wmi_disconnect_event *evt = d; wil_dbg_wmi(wil, "Disconnect %pM reason %d proto %d wmi\n", evt->bssid, evt->protocol_reason_status, evt->disconnect_reason); wil->sinfo_gen++; wil6210_disconnect(wil, evt->bssid); } static void wmi_evt_notify(struct wil6210_priv *wil, int id, void *d, int len) { struct wmi_notify_req_done_event *evt = d; if (len < sizeof(*evt)) { wil_err(wil, "Short NOTIFY event\n"); return; } wil->stats.tsf = le64_to_cpu(evt->tsf); wil->stats.snr = le32_to_cpu(evt->snr_val); wil->stats.bf_mcs = le16_to_cpu(evt->bf_mcs); wil->stats.my_rx_sector = le16_to_cpu(evt->my_rx_sector); wil->stats.my_tx_sector = le16_to_cpu(evt->my_tx_sector); wil->stats.peer_rx_sector = le16_to_cpu(evt->other_rx_sector); wil->stats.peer_tx_sector = le16_to_cpu(evt->other_tx_sector); wil_dbg_wmi(wil, "Link status, MCS %d TSF 0x%016llx\n" "BF status 0x%08x SNR 0x%08x\n" "Tx Tpt %d goodput %d Rx goodput %d\n" "Sectors(rx:tx) my %d:%d peer %d:%d\n", wil->stats.bf_mcs, wil->stats.tsf, evt->status, wil->stats.snr, le32_to_cpu(evt->tx_tpt), le32_to_cpu(evt->tx_goodput), le32_to_cpu(evt->rx_goodput), wil->stats.my_rx_sector, wil->stats.my_tx_sector, wil->stats.peer_rx_sector, wil->stats.peer_tx_sector); } /* * Firmware reports EAPOL frame using WME event. * Reconstruct Ethernet frame and deliver it via normal Rx */ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id, void *d, int len) { struct net_device *ndev = wil_to_ndev(wil); struct wmi_eapol_rx_event *evt = d; u16 eapol_len = le16_to_cpu(evt->eapol_len); int sz = eapol_len + ETH_HLEN; struct sk_buff *skb; struct ethhdr *eth; wil_dbg_wmi(wil, "EAPOL len %d from %pM\n", eapol_len, evt->src_mac); if (eapol_len > 196) { /* TODO: revisit size limit */ wil_err(wil, "EAPOL too large\n"); return; } skb = alloc_skb(sz, GFP_KERNEL); if (!skb) { wil_err(wil, "Failed to allocate skb\n"); return; } eth = (struct ethhdr *)skb_put(skb, ETH_HLEN); memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN); memcpy(eth->h_source, evt->src_mac, ETH_ALEN); eth->h_proto = cpu_to_be16(ETH_P_PAE); memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len); skb->protocol = eth_type_trans(skb, ndev); if (likely(netif_rx_ni(skb) == NET_RX_SUCCESS)) { ndev->stats.rx_packets++; ndev->stats.rx_bytes += skb->len; } else { ndev->stats.rx_dropped++; } } static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len) { struct net_device *ndev = wil_to_ndev(wil); struct wmi_data_port_open_event *evt = d; wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid); netif_carrier_on(ndev); } static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len) { struct net_device *ndev = wil_to_ndev(wil); struct wmi_wbe_link_down_event *evt = d; wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n", evt->cid, le32_to_cpu(evt->reason)); netif_carrier_off(ndev); } static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d, int len) { struct wmi_vring_ba_status_event *evt = d; wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n", evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize, __le16_to_cpu(evt->ba_timeout)); } static const struct { int eventid; void (*handler)(struct wil6210_priv *wil, int eventid, void *data, int data_len); } wmi_evt_handlers[] = { {WMI_READY_EVENTID, wmi_evt_ready}, {WMI_FW_READY_EVENTID, wmi_evt_fw_ready}, {WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt}, {WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete}, {WMI_CONNECT_EVENTID, wmi_evt_connect}, {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify}, {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx}, {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup}, {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown}, {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status}, }; /* * Run in IRQ context * Extract WMI command from mailbox. Queue it to the @wil->pending_wmi_ev * that will be eventually handled by the @wmi_event_worker in the thread * context of thread "wil6210_wmi" */ void wmi_recv_cmd(struct wil6210_priv *wil) { struct wil6210_mbox_ring_desc d_tail; struct wil6210_mbox_hdr hdr; struct wil6210_mbox_ring *r = &wil->mbox_ctl.rx; struct pending_wmi_event *evt; u8 *cmd; void __iomem *src; ulong flags; if (!test_bit(wil_status_reset_done, &wil->status)) { wil_err(wil, "Reset not completed\n"); return; } for (;;) { u16 len; r->head = ioread32(wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, rx.head)); if (r->tail == r->head) return; /* read cmd from tail */ wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail), sizeof(struct wil6210_mbox_ring_desc)); if (d_tail.sync == 0) { wil_err(wil, "Mbox evt not owned by FW?\n"); return; } if (0 != wmi_read_hdr(wil, d_tail.addr, &hdr)) { wil_err(wil, "Mbox evt at 0x%08x?\n", le32_to_cpu(d_tail.addr)); return; } len = le16_to_cpu(hdr.len); src = wmi_buffer(wil, d_tail.addr) + sizeof(struct wil6210_mbox_hdr); evt = kmalloc(ALIGN(offsetof(struct pending_wmi_event, event.wmi) + len, 4), GFP_KERNEL); if (!evt) return; evt->event.hdr = hdr; cmd = (void *)&evt->event.wmi; wil_memcpy_fromio_32(cmd, src, len); /* mark entry as empty */ iowrite32(0, wil->csr + HOSTADDR(r->tail) + offsetof(struct wil6210_mbox_ring_desc, sync)); /* indicate */ wil_dbg_wmi(wil, "Mbox evt %04x %04x %04x %02x\n", le16_to_cpu(hdr.seq), len, le16_to_cpu(hdr.type), hdr.flags); if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) && (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { wil_dbg_wmi(wil, "WMI event 0x%04x\n", evt->event.wmi.id); } wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1, &evt->event.hdr, sizeof(hdr) + len, true); /* advance tail */ r->tail = r->base + ((r->tail - r->base + sizeof(struct wil6210_mbox_ring_desc)) % r->size); iowrite32(r->tail, wil->csr + HOST_MBOX + offsetof(struct wil6210_mbox_ctl, rx.tail)); /* add to the pending list */ spin_lock_irqsave(&wil->wmi_ev_lock, flags); list_add_tail(&evt->list, &wil->pending_wmi_ev); spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); { int q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); wil_dbg_wmi(wil, "queue_work -> %d\n", q); } } } int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, u16 reply_id, void *reply, u8 reply_size, int to_msec) { int rc; int remain; mutex_lock(&wil->wmi_mutex); rc = __wmi_send(wil, cmdid, buf, len); if (rc) goto out; wil->reply_id = reply_id; wil->reply_buf = reply; wil->reply_size = reply_size; remain = wait_for_completion_timeout(&wil->wmi_ready, msecs_to_jiffies(to_msec)); if (0 == remain) { wil_err(wil, "wmi_call(0x%04x->0x%04x) timeout %d msec\n", cmdid, reply_id, to_msec); rc = -ETIME; } else { wil_dbg_wmi(wil, "wmi_call(0x%04x->0x%04x) completed in %d msec\n", cmdid, reply_id, to_msec - jiffies_to_msecs(remain)); } wil->reply_id = 0; wil->reply_buf = NULL; wil->reply_size = 0; out: mutex_unlock(&wil->wmi_mutex); return rc; } int wmi_echo(struct wil6210_priv *wil) { struct wmi_echo_cmd cmd = { .value = cpu_to_le32(0x12345678), }; return wmi_call(wil, WMI_ECHO_CMDID, &cmd, sizeof(cmd), WMI_ECHO_RSP_EVENTID, NULL, 0, 20); } int wmi_set_mac_address(struct wil6210_priv *wil, void *addr) { struct wmi_set_mac_address_cmd cmd; memcpy(cmd.mac, addr, ETH_ALEN); wil_dbg_wmi(wil, "Set MAC %pM\n", addr); return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd)); } int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan) { int rc; struct wmi_pcp_start_cmd cmd = { .bcon_interval = cpu_to_le16(bi), .network_type = wmi_nettype, .disable_sec_offload = 1, .channel = chan, }; struct { struct wil6210_mbox_hdr_wmi wmi; struct wmi_pcp_started_event evt; } __packed reply; if (!wil->secure_pcp) cmd.disable_sec = 1; rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd), WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100); if (rc) return rc; if (reply.evt.status != WMI_FW_STATUS_SUCCESS) rc = -EINVAL; return rc; } int wmi_pcp_stop(struct wil6210_priv *wil) { return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0, WMI_PCP_STOPPED_EVENTID, NULL, 0, 20); } int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid) { struct wmi_set_ssid_cmd cmd = { .ssid_len = cpu_to_le32(ssid_len), }; if (ssid_len > sizeof(cmd.ssid)) return -EINVAL; memcpy(cmd.ssid, ssid, ssid_len); return wmi_send(wil, WMI_SET_SSID_CMDID, &cmd, sizeof(cmd)); } int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid) { int rc; struct { struct wil6210_mbox_hdr_wmi wmi; struct wmi_set_ssid_cmd cmd; } __packed reply; int len; /* reply.cmd.ssid_len in CPU order */ rc = wmi_call(wil, WMI_GET_SSID_CMDID, NULL, 0, WMI_GET_SSID_EVENTID, &reply, sizeof(reply), 20); if (rc) return rc; len = le32_to_cpu(reply.cmd.ssid_len); if (len > sizeof(reply.cmd.ssid)) return -EINVAL; *ssid_len = len; memcpy(ssid, reply.cmd.ssid, len); return 0; } int wmi_set_channel(struct wil6210_priv *wil, int channel) { struct wmi_set_pcp_channel_cmd cmd = { .channel = channel - 1, }; return wmi_send(wil, WMI_SET_PCP_CHANNEL_CMDID, &cmd, sizeof(cmd)); } int wmi_get_channel(struct wil6210_priv *wil, int *channel) { int rc; struct { struct wil6210_mbox_hdr_wmi wmi; struct wmi_set_pcp_channel_cmd cmd; } __packed reply; rc = wmi_call(wil, WMI_GET_PCP_CHANNEL_CMDID, NULL, 0, WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20); if (rc) return rc; if (reply.cmd.channel > 3) return -EINVAL; *channel = reply.cmd.channel + 1; return 0; } int wmi_p2p_cfg(struct wil6210_priv *wil, int channel) { struct wmi_p2p_cfg_cmd cmd = { .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD, .channel = channel - 1, }; return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd)); } int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb) { struct wmi_eapol_tx_cmd *cmd; struct ethhdr *eth; u16 eapol_len = skb->len - ETH_HLEN; void *eapol = skb->data + ETH_HLEN; uint i; int rc; skb_set_mac_header(skb, 0); eth = eth_hdr(skb); wil_dbg_wmi(wil, "EAPOL %d bytes to %pM\n", eapol_len, eth->h_dest); for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) { if (memcmp(wil->dst_addr[i], eth->h_dest, ETH_ALEN) == 0) goto found_dest; } return -EINVAL; found_dest: /* find out eapol data & len */ cmd = kzalloc(sizeof(*cmd) + eapol_len, GFP_KERNEL); if (!cmd) return -EINVAL; memcpy(cmd->dst_mac, eth->h_dest, ETH_ALEN); cmd->eapol_len = cpu_to_le16(eapol_len); memcpy(cmd->eapol, eapol, eapol_len); rc = wmi_send(wil, WMI_EAPOL_TX_CMDID, cmd, sizeof(*cmd) + eapol_len); kfree(cmd); return rc; } int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index, const void *mac_addr) { struct wmi_delete_cipher_key_cmd cmd = { .key_index = key_index, }; if (mac_addr) memcpy(cmd.mac, mac_addr, WMI_MAC_LEN); return wmi_send(wil, WMI_DELETE_CIPHER_KEY_CMDID, &cmd, sizeof(cmd)); } int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index, const void *mac_addr, int key_len, const void *key) { struct wmi_add_cipher_key_cmd cmd = { .key_index = key_index, .key_usage = WMI_KEY_USE_PAIRWISE, .key_len = key_len, }; if (!key || (key_len > sizeof(cmd.key))) return -EINVAL; memcpy(cmd.key, key, key_len); if (mac_addr) memcpy(cmd.mac, mac_addr, WMI_MAC_LEN); return wmi_send(wil, WMI_ADD_CIPHER_KEY_CMDID, &cmd, sizeof(cmd)); } int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie) { int rc; u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len; struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->mgmt_frm_type = type; /* BUG: FW API define ieLen as u8. Will fix FW */ cmd->ie_len = cpu_to_le16(ie_len); memcpy(cmd->ie_info, ie, ie_len); rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len); kfree(cmd); return rc; } int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring) { struct wireless_dev *wdev = wil->wdev; struct net_device *ndev = wil_to_ndev(wil); struct wmi_cfg_rx_chain_cmd cmd = { .action = WMI_RX_CHAIN_ADD, .rx_sw_ring = { .max_mpdu_size = cpu_to_le16(RX_BUF_LEN), .ring_mem_base = cpu_to_le64(vring->pa), .ring_size = cpu_to_le16(vring->size), }, .mid = 0, /* TODO - what is it? */ .decap_trans_type = WMI_DECAP_TYPE_802_3, }; struct { struct wil6210_mbox_hdr_wmi wmi; struct wmi_cfg_rx_chain_done_event evt; } __packed evt; int rc; if (wdev->iftype == NL80211_IFTYPE_MONITOR) { struct ieee80211_channel *ch = wdev->preset_chandef.chan; cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON); if (ch) cmd.sniffer_cfg.channel = ch->hw_value - 1; cmd.sniffer_cfg.phy_info_mode = cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP); cmd.sniffer_cfg.phy_support = cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) ? WMI_SNIFFER_CP : WMI_SNIFFER_DP); } /* typical time for secure PCP is 840ms */ rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000); if (rc) return rc; vring->hwtail = le32_to_cpu(evt.evt.rx_ring_tail_ptr); wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n", le32_to_cpu(evt.evt.status), vring->hwtail); if (le32_to_cpu(evt.evt.status) != WMI_CFG_RX_CHAIN_SUCCESS) rc = -EINVAL; return rc; } int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r) { int rc; struct wmi_temp_sense_cmd cmd = { .measure_marlon_m_en = cpu_to_le32(!!t_m), .measure_marlon_r_en = cpu_to_le32(!!t_r), }; struct { struct wil6210_mbox_hdr_wmi wmi; struct wmi_temp_sense_done_event evt; } __packed reply; rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, &cmd, sizeof(cmd), WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100); if (rc) return rc; if (t_m) *t_m = le32_to_cpu(reply.evt.marlon_m_t1000); if (t_r) *t_r = le32_to_cpu(reply.evt.marlon_r_t1000); return 0; } void wmi_event_flush(struct wil6210_priv *wil) { struct pending_wmi_event *evt, *t; wil_dbg_wmi(wil, "%s()\n", __func__); list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) { list_del(&evt->list); kfree(evt); } } static bool wmi_evt_call_handler(struct wil6210_priv *wil, int id, void *d, int len) { uint i; for (i = 0; i < ARRAY_SIZE(wmi_evt_handlers); i++) { if (wmi_evt_handlers[i].eventid == id) { wmi_evt_handlers[i].handler(wil, id, d, len); return true; } } return false; } static void wmi_event_handle(struct wil6210_priv *wil, struct wil6210_mbox_hdr *hdr) { u16 len = le16_to_cpu(hdr->len); if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) && (len >= sizeof(struct wil6210_mbox_hdr_wmi))) { struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]); void *evt_data = (void *)(&wmi[1]); u16 id = le16_to_cpu(wmi->id); /* check if someone waits for this event */ if (wil->reply_id && wil->reply_id == id) { if (wil->reply_buf) { memcpy(wil->reply_buf, wmi, min(len, wil->reply_size)); } else { wmi_evt_call_handler(wil, id, evt_data, len - sizeof(*wmi)); } wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id); complete(&wil->wmi_ready); return; } /* unsolicited event */ /* search for handler */ if (!wmi_evt_call_handler(wil, id, evt_data, len - sizeof(*wmi))) { wil_err(wil, "Unhandled event 0x%04x\n", id); } } else { wil_err(wil, "Unknown event type\n"); print_hex_dump(KERN_ERR, "evt?? ", DUMP_PREFIX_OFFSET, 16, 1, hdr, sizeof(*hdr) + len, true); } } /* * Retrieve next WMI event from the pending list */ static struct list_head *next_wmi_ev(struct wil6210_priv *wil) { ulong flags; struct list_head *ret = NULL; spin_lock_irqsave(&wil->wmi_ev_lock, flags); if (!list_empty(&wil->pending_wmi_ev)) { ret = wil->pending_wmi_ev.next; list_del(ret); } spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); return ret; } /* * Handler for the WMI events */ void wmi_event_worker(struct work_struct *work) { struct wil6210_priv *wil = container_of(work, struct wil6210_priv, wmi_event_worker); struct pending_wmi_event *evt; struct list_head *lh; while ((lh = next_wmi_ev(wil)) != NULL) { evt = list_entry(lh, struct pending_wmi_event, list); wmi_event_handle(wil, &evt->event.hdr); kfree(evt); } }
gpl-2.0
faux123/msm8660-htc-ics
drivers/hwmon/pmbus_core.c
1583
40982
/* * Hardware monitoring driver for PMBus devices * * Copyright (c) 2010, 2011 Ericsson AB. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/delay.h> #include <linux/i2c/pmbus.h> #include "pmbus.h" /* * Constants needed to determine number of sensors, booleans, and labels. */ #define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */ #define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit, crit */ #define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */ #define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */ #define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */ #define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit, crit */ #define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm, lcrit_alarm, crit_alarm; c: alarm, crit_alarm; p: crit_alarm */ #define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm, lcrit_alarm, crit_alarm */ #define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm, crit_alarm */ #define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm */ #define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */ #define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm, lcrit_alarm, crit_alarm */ #define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */ /* * status, status_vout, status_iout, status_fans, status_fan34, and status_temp * are paged. status_input is unpaged. */ #define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1) /* * Index into status register array, per status register group */ #define PB_STATUS_BASE 0 #define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES) #define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES) #define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES) #define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES) #define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES) #define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1) struct pmbus_sensor { char name[I2C_NAME_SIZE]; /* sysfs sensor name */ struct sensor_device_attribute attribute; u8 page; /* page number */ u8 reg; /* register */ enum pmbus_sensor_classes class; /* sensor class */ bool update; /* runtime sensor update needed */ int data; /* Sensor data. Negative if there was a read error */ }; struct pmbus_boolean { char name[I2C_NAME_SIZE]; /* sysfs boolean name */ struct sensor_device_attribute attribute; }; struct pmbus_label { char name[I2C_NAME_SIZE]; /* sysfs label name */ struct sensor_device_attribute attribute; char label[I2C_NAME_SIZE]; /* label */ }; struct pmbus_data { struct device *hwmon_dev; u32 flags; /* from platform data */ int exponent; /* linear mode: exponent for output voltages */ const struct pmbus_driver_info *info; int max_attributes; int num_attributes; struct attribute **attributes; struct attribute_group group; /* * Sensors cover both sensor and limit registers. */ int max_sensors; int num_sensors; struct pmbus_sensor *sensors; /* * Booleans are used for alarms. * Values are determined from status registers. */ int max_booleans; int num_booleans; struct pmbus_boolean *booleans; /* * Labels are used to map generic names (e.g., "in1") * to PMBus specific names (e.g., "vin" or "vout1"). */ int max_labels; int num_labels; struct pmbus_label *labels; struct mutex update_lock; bool valid; unsigned long last_updated; /* in jiffies */ /* * A single status register covers multiple attributes, * so we keep them all together. */ u8 status[PB_NUM_STATUS_REG]; u8 currpage; }; int pmbus_set_page(struct i2c_client *client, u8 page) { struct pmbus_data *data = i2c_get_clientdata(client); int rv = 0; int newpage; if (page != data->currpage) { rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE); if (newpage != page) rv = -EINVAL; else data->currpage = page; } return rv; } EXPORT_SYMBOL_GPL(pmbus_set_page); static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value) { int rv; rv = pmbus_set_page(client, page); if (rv < 0) return rv; return i2c_smbus_write_byte(client, value); } static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) { int rv; rv = pmbus_set_page(client, page); if (rv < 0) return rv; return i2c_smbus_write_word_data(client, reg, word); } int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) { int rv; rv = pmbus_set_page(client, page); if (rv < 0) return rv; return i2c_smbus_read_word_data(client, reg); } EXPORT_SYMBOL_GPL(pmbus_read_word_data); static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg) { int rv; rv = pmbus_set_page(client, page); if (rv < 0) return rv; return i2c_smbus_read_byte_data(client, reg); } static void pmbus_clear_fault_page(struct i2c_client *client, int page) { pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); } void pmbus_clear_faults(struct i2c_client *client) { struct pmbus_data *data = i2c_get_clientdata(client); int i; for (i = 0; i < data->info->pages; i++) pmbus_clear_fault_page(client, i); } EXPORT_SYMBOL_GPL(pmbus_clear_faults); static int pmbus_check_status_cml(struct i2c_client *client, int page) { int status, status2; status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE); if (status < 0 || (status & PB_STATUS_CML)) { status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML); if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND)) return -EINVAL; } return 0; } bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg) { int rv; struct pmbus_data *data = i2c_get_clientdata(client); rv = pmbus_read_byte_data(client, page, reg); if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) rv = pmbus_check_status_cml(client, page); pmbus_clear_fault_page(client, page); return rv >= 0; } EXPORT_SYMBOL_GPL(pmbus_check_byte_register); bool pmbus_check_word_register(struct i2c_client *client, int page, int reg) { int rv; struct pmbus_data *data = i2c_get_clientdata(client); rv = pmbus_read_word_data(client, page, reg); if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) rv = pmbus_check_status_cml(client, page); pmbus_clear_fault_page(client, page); return rv >= 0; } EXPORT_SYMBOL_GPL(pmbus_check_word_register); const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client) { struct pmbus_data *data = i2c_get_clientdata(client); return data->info; } EXPORT_SYMBOL_GPL(pmbus_get_driver_info); /* * _pmbus_read_byte_data() is similar to pmbus_read_byte_data(), but checks if * a device specific mapping funcion exists and calls it if necessary. */ static int _pmbus_read_byte_data(struct i2c_client *client, int page, int reg) { struct pmbus_data *data = i2c_get_clientdata(client); const struct pmbus_driver_info *info = data->info; int status; if (info->read_byte_data) { status = info->read_byte_data(client, page, reg); if (status != -ENODATA) return status; } return pmbus_read_byte_data(client, page, reg); } static struct pmbus_data *pmbus_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct pmbus_data *data = i2c_get_clientdata(client); const struct pmbus_driver_info *info = data->info; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { int i; for (i = 0; i < info->pages; i++) data->status[PB_STATUS_BASE + i] = pmbus_read_byte_data(client, i, PMBUS_STATUS_BYTE); for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT)) continue; data->status[PB_STATUS_VOUT_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_VOUT); } for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT)) continue; data->status[PB_STATUS_IOUT_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_IOUT); } for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP)) continue; data->status[PB_STATUS_TEMP_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_TEMPERATURE); } for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12)) continue; data->status[PB_STATUS_FAN_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_FAN_12); } for (i = 0; i < info->pages; i++) { if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34)) continue; data->status[PB_STATUS_FAN34_BASE + i] = _pmbus_read_byte_data(client, i, PMBUS_STATUS_FAN_34); } if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) data->status[PB_STATUS_INPUT_BASE] = _pmbus_read_byte_data(client, 0, PMBUS_STATUS_INPUT); for (i = 0; i < data->num_sensors; i++) { struct pmbus_sensor *sensor = &data->sensors[i]; if (!data->valid || sensor->update) sensor->data = pmbus_read_word_data(client, sensor->page, sensor->reg); } pmbus_clear_faults(client); data->last_updated = jiffies; data->valid = 1; } mutex_unlock(&data->update_lock); return data; } /* * Convert linear sensor values to milli- or micro-units * depending on sensor type. */ static long pmbus_reg2data_linear(struct pmbus_data *data, struct pmbus_sensor *sensor) { s16 exponent; s32 mantissa; long val; if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */ exponent = data->exponent; mantissa = (u16) sensor->data; } else { /* LINEAR11 */ exponent = (sensor->data >> 11) & 0x001f; mantissa = sensor->data & 0x07ff; if (exponent > 0x0f) exponent |= 0xffe0; /* sign extend exponent */ if (mantissa > 0x03ff) mantissa |= 0xfffff800; /* sign extend mantissa */ } val = mantissa; /* scale result to milli-units for all sensors except fans */ if (sensor->class != PSC_FAN) val = val * 1000L; /* scale result to micro-units for power sensors */ if (sensor->class == PSC_POWER) val = val * 1000L; if (exponent >= 0) val <<= exponent; else val >>= -exponent; return val; } /* * Convert direct sensor values to milli- or micro-units * depending on sensor type. */ static long pmbus_reg2data_direct(struct pmbus_data *data, struct pmbus_sensor *sensor) { long val = (s16) sensor->data; long m, b, R; m = data->info->m[sensor->class]; b = data->info->b[sensor->class]; R = data->info->R[sensor->class]; if (m == 0) return 0; /* X = 1/m * (Y * 10^-R - b) */ R = -R; /* scale result to milli-units for everything but fans */ if (sensor->class != PSC_FAN) { R += 3; b *= 1000; } /* scale result to micro-units for power sensors */ if (sensor->class == PSC_POWER) { R += 3; b *= 1000; } while (R > 0) { val *= 10; R--; } while (R < 0) { val = DIV_ROUND_CLOSEST(val, 10); R++; } return (val - b) / m; } static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor) { long val; if (data->info->direct[sensor->class]) val = pmbus_reg2data_direct(data, sensor); else val = pmbus_reg2data_linear(data, sensor); return val; } #define MAX_MANTISSA (1023 * 1000) #define MIN_MANTISSA (511 * 1000) static u16 pmbus_data2reg_linear(struct pmbus_data *data, enum pmbus_sensor_classes class, long val) { s16 exponent = 0, mantissa; bool negative = false; /* simple case */ if (val == 0) return 0; if (class == PSC_VOLTAGE_OUT) { /* LINEAR16 does not support negative voltages */ if (val < 0) return 0; /* * For a static exponents, we don't have a choice * but to adjust the value to it. */ if (data->exponent < 0) val <<= -data->exponent; else val >>= data->exponent; val = DIV_ROUND_CLOSEST(val, 1000); return val & 0xffff; } if (val < 0) { negative = true; val = -val; } /* Power is in uW. Convert to mW before converting. */ if (class == PSC_POWER) val = DIV_ROUND_CLOSEST(val, 1000L); /* * For simplicity, convert fan data to milli-units * before calculating the exponent. */ if (class == PSC_FAN) val = val * 1000; /* Reduce large mantissa until it fits into 10 bit */ while (val >= MAX_MANTISSA && exponent < 15) { exponent++; val >>= 1; } /* Increase small mantissa to improve precision */ while (val < MIN_MANTISSA && exponent > -15) { exponent--; val <<= 1; } /* Convert mantissa from milli-units to units */ mantissa = DIV_ROUND_CLOSEST(val, 1000); /* Ensure that resulting number is within range */ if (mantissa > 0x3ff) mantissa = 0x3ff; /* restore sign */ if (negative) mantissa = -mantissa; /* Convert to 5 bit exponent, 11 bit mantissa */ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800); } static u16 pmbus_data2reg_direct(struct pmbus_data *data, enum pmbus_sensor_classes class, long val) { long m, b, R; m = data->info->m[class]; b = data->info->b[class]; R = data->info->R[class]; /* Power is in uW. Adjust R and b. */ if (class == PSC_POWER) { R -= 3; b *= 1000; } /* Calculate Y = (m * X + b) * 10^R */ if (class != PSC_FAN) { R -= 3; /* Adjust R and b for data in milli-units */ b *= 1000; } val = val * m + b; while (R > 0) { val *= 10; R--; } while (R < 0) { val = DIV_ROUND_CLOSEST(val, 10); R++; } return val; } static u16 pmbus_data2reg(struct pmbus_data *data, enum pmbus_sensor_classes class, long val) { u16 regval; if (data->info->direct[class]) regval = pmbus_data2reg_direct(data, class, val); else regval = pmbus_data2reg_linear(data, class, val); return regval; } /* * Return boolean calculated from converted data. * <index> defines a status register index and mask, and optionally * two sensor indexes. * The upper half-word references the two sensors, * two sensor indices. * The upper half-word references the two optional sensors, * the lower half word references status register and mask. * The function returns true if (status[reg] & mask) is true and, * if specified, if v1 >= v2. * To determine if an object exceeds upper limits, specify <v, limit>. * To determine if an object exceeds lower limits, specify <limit, v>. * * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of * index are set. s1 and s2 (the sensor index values) are zero in this case. * The function returns true if (status[reg] & mask) is true. * * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against * a specified limit has to be performed to determine the boolean result. * In this case, the function returns true if v1 >= v2 (where v1 and v2 are * sensor values referenced by sensor indices s1 and s2). * * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>. * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>. * * If a negative value is stored in any of the referenced registers, this value * reflects an error code which will be returned. */ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val) { u8 s1 = (index >> 24) & 0xff; u8 s2 = (index >> 16) & 0xff; u8 reg = (index >> 8) & 0xff; u8 mask = index & 0xff; int status; u8 regval; status = data->status[reg]; if (status < 0) return status; regval = status & mask; if (!s1 && !s2) *val = !!regval; else { long v1, v2; struct pmbus_sensor *sensor1, *sensor2; sensor1 = &data->sensors[s1]; if (sensor1->data < 0) return sensor1->data; sensor2 = &data->sensors[s2]; if (sensor2->data < 0) return sensor2->data; v1 = pmbus_reg2data(data, sensor1); v2 = pmbus_reg2data(data, sensor2); *val = !!(regval && v1 >= v2); } return 0; } static ssize_t pmbus_show_boolean(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct pmbus_data *data = pmbus_update_device(dev); int val; int err; err = pmbus_get_boolean(data, attr->index, &val); if (err) return err; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t pmbus_show_sensor(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct pmbus_data *data = pmbus_update_device(dev); struct pmbus_sensor *sensor; sensor = &data->sensors[attr->index]; if (sensor->data < 0) return sensor->data; return snprintf(buf, PAGE_SIZE, "%ld\n", pmbus_reg2data(data, sensor)); } static ssize_t pmbus_set_sensor(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct i2c_client *client = to_i2c_client(dev); struct pmbus_data *data = i2c_get_clientdata(client); struct pmbus_sensor *sensor = &data->sensors[attr->index]; ssize_t rv = count; long val = 0; int ret; u16 regval; if (strict_strtol(buf, 10, &val) < 0) return -EINVAL; mutex_lock(&data->update_lock); regval = pmbus_data2reg(data, sensor->class, val); ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval); if (ret < 0) rv = ret; else data->sensors[attr->index].data = regval; mutex_unlock(&data->update_lock); return rv; } static ssize_t pmbus_show_label(struct device *dev, struct device_attribute *da, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct pmbus_data *data = i2c_get_clientdata(client); struct sensor_device_attribute *attr = to_sensor_dev_attr(da); return snprintf(buf, PAGE_SIZE, "%s\n", data->labels[attr->index].label); } #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \ do { \ struct sensor_device_attribute *a \ = &data->_type##s[data->num_##_type##s].attribute; \ BUG_ON(data->num_attributes >= data->max_attributes); \ sysfs_attr_init(&a->dev_attr.attr); \ a->dev_attr.attr.name = _name; \ a->dev_attr.attr.mode = _mode; \ a->dev_attr.show = _show; \ a->dev_attr.store = _set; \ a->index = _idx; \ data->attributes[data->num_attributes] = &a->dev_attr.attr; \ data->num_attributes++; \ } while (0) #define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \ PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \ pmbus_show_##_type, NULL) #define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \ PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \ pmbus_show_##_type, pmbus_set_##_type) static void pmbus_add_boolean(struct pmbus_data *data, const char *name, const char *type, int seq, int idx) { struct pmbus_boolean *boolean; BUG_ON(data->num_booleans >= data->max_booleans); boolean = &data->booleans[data->num_booleans]; snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s", name, seq, type); PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx); data->num_booleans++; } static void pmbus_add_boolean_reg(struct pmbus_data *data, const char *name, const char *type, int seq, int reg, int bit) { pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit); } static void pmbus_add_boolean_cmp(struct pmbus_data *data, const char *name, const char *type, int seq, int i1, int i2, int reg, int mask) { pmbus_add_boolean(data, name, type, seq, (i1 << 24) | (i2 << 16) | (reg << 8) | mask); } static void pmbus_add_sensor(struct pmbus_data *data, const char *name, const char *type, int seq, int page, int reg, enum pmbus_sensor_classes class, bool update, bool readonly) { struct pmbus_sensor *sensor; BUG_ON(data->num_sensors >= data->max_sensors); sensor = &data->sensors[data->num_sensors]; snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s", name, seq, type); sensor->page = page; sensor->reg = reg; sensor->class = class; sensor->update = update; if (readonly) PMBUS_ADD_GET_ATTR(data, sensor->name, sensor, data->num_sensors); else PMBUS_ADD_SET_ATTR(data, sensor->name, sensor, data->num_sensors); data->num_sensors++; } static void pmbus_add_label(struct pmbus_data *data, const char *name, int seq, const char *lstring, int index) { struct pmbus_label *label; BUG_ON(data->num_labels >= data->max_labels); label = &data->labels[data->num_labels]; snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq); if (!index) strncpy(label->label, lstring, sizeof(label->label) - 1); else snprintf(label->label, sizeof(label->label), "%s%d", lstring, index); PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels); data->num_labels++; } /* * Determine maximum number of sensors, booleans, and labels. * To keep things simple, only make a rough high estimate. */ static void pmbus_find_max_attr(struct i2c_client *client, struct pmbus_data *data) { const struct pmbus_driver_info *info = data->info; int page, max_sensors, max_booleans, max_labels; max_sensors = PMBUS_MAX_INPUT_SENSORS; max_booleans = PMBUS_MAX_INPUT_BOOLEANS; max_labels = PMBUS_MAX_INPUT_LABELS; for (page = 0; page < info->pages; page++) { if (info->func[page] & PMBUS_HAVE_VOUT) { max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE; max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE; max_labels++; } if (info->func[page] & PMBUS_HAVE_IOUT) { max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE; max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE; max_labels++; } if (info->func[page] & PMBUS_HAVE_POUT) { max_sensors += PMBUS_POUT_SENSORS_PER_PAGE; max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE; max_labels++; } if (info->func[page] & PMBUS_HAVE_FAN12) { max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN; max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN; } if (info->func[page] & PMBUS_HAVE_FAN34) { max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN; max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN; } if (info->func[page] & PMBUS_HAVE_TEMP) { max_sensors += PMBUS_MAX_SENSORS_PER_TEMP; max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP; } if (info->func[page] & PMBUS_HAVE_TEMP2) { max_sensors += PMBUS_MAX_SENSORS_PER_TEMP; max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP; } if (info->func[page] & PMBUS_HAVE_TEMP3) { max_sensors += PMBUS_MAX_SENSORS_PER_TEMP; max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP; } } data->max_sensors = max_sensors; data->max_booleans = max_booleans; data->max_labels = max_labels; data->max_attributes = max_sensors + max_booleans + max_labels; } /* * Search for attributes. Allocate sensors, booleans, and labels as needed. */ /* * The pmbus_limit_attr structure describes a single limit attribute * and its associated alarm attribute. */ struct pmbus_limit_attr { u8 reg; /* Limit register */ const char *attr; /* Attribute name */ const char *alarm; /* Alarm attribute name */ u32 sbit; /* Alarm attribute status bit */ }; /* * The pmbus_sensor_attr structure describes one sensor attribute. This * description includes a reference to the associated limit attributes. */ struct pmbus_sensor_attr { u8 reg; /* sensor register */ enum pmbus_sensor_classes class;/* sensor class */ const char *label; /* sensor label */ bool paged; /* true if paged sensor */ bool update; /* true if update needed */ bool compare; /* true if compare function needed */ u32 func; /* sensor mask */ u32 sfunc; /* sensor status mask */ int sbase; /* status base register */ u32 gbit; /* generic status bit */ const struct pmbus_limit_attr *limit;/* limit registers */ int nlimit; /* # of limit registers */ }; /* * Add a set of limit attributes and, if supported, the associated * alarm attributes. */ static bool pmbus_add_limit_attrs(struct i2c_client *client, struct pmbus_data *data, const struct pmbus_driver_info *info, const char *name, int index, int page, int cbase, const struct pmbus_sensor_attr *attr) { const struct pmbus_limit_attr *l = attr->limit; int nlimit = attr->nlimit; bool have_alarm = false; int i, cindex; for (i = 0; i < nlimit; i++) { if (pmbus_check_word_register(client, page, l->reg)) { cindex = data->num_sensors; pmbus_add_sensor(data, name, l->attr, index, page, l->reg, attr->class, attr->update, false); if (info->func[page] & attr->sfunc) { if (attr->compare) { pmbus_add_boolean_cmp(data, name, l->alarm, index, cbase, cindex, attr->sbase + page, l->sbit); } else { pmbus_add_boolean_reg(data, name, l->alarm, index, attr->sbase + page, l->sbit); } have_alarm = true; } } l++; } return have_alarm; } static void pmbus_add_sensor_attrs_one(struct i2c_client *client, struct pmbus_data *data, const struct pmbus_driver_info *info, const char *name, int index, int page, const struct pmbus_sensor_attr *attr) { bool have_alarm; int cbase = data->num_sensors; if (attr->label) pmbus_add_label(data, name, index, attr->label, attr->paged ? page + 1 : 0); pmbus_add_sensor(data, name, "input", index, page, attr->reg, attr->class, true, true); if (attr->sfunc) { have_alarm = pmbus_add_limit_attrs(client, data, info, name, index, page, cbase, attr); /* * Add generic alarm attribute only if there are no individual * alarm attributes, and if there is a global alarm bit. */ if (!have_alarm && attr->gbit) pmbus_add_boolean_reg(data, name, "alarm", index, PB_STATUS_BASE + page, attr->gbit); } } static void pmbus_add_sensor_attrs(struct i2c_client *client, struct pmbus_data *data, const char *name, const struct pmbus_sensor_attr *attrs, int nattrs) { const struct pmbus_driver_info *info = data->info; int index, i; index = 1; for (i = 0; i < nattrs; i++) { int page, pages; pages = attrs->paged ? info->pages : 1; for (page = 0; page < pages; page++) { if (!(info->func[page] & attrs->func)) continue; pmbus_add_sensor_attrs_one(client, data, info, name, index, page, attrs); index++; } attrs++; } } static const struct pmbus_limit_attr vin_limit_attrs[] = { { .reg = PMBUS_VIN_UV_WARN_LIMIT, .attr = "min", .alarm = "min_alarm", .sbit = PB_VOLTAGE_UV_WARNING, }, { .reg = PMBUS_VIN_UV_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_VOLTAGE_UV_FAULT, }, { .reg = PMBUS_VIN_OV_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_VOLTAGE_OV_WARNING, }, { .reg = PMBUS_VIN_OV_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_VOLTAGE_OV_FAULT, }, }; static const struct pmbus_limit_attr vout_limit_attrs[] = { { .reg = PMBUS_VOUT_UV_WARN_LIMIT, .attr = "min", .alarm = "min_alarm", .sbit = PB_VOLTAGE_UV_WARNING, }, { .reg = PMBUS_VOUT_UV_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_VOLTAGE_UV_FAULT, }, { .reg = PMBUS_VOUT_OV_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_VOLTAGE_OV_WARNING, }, { .reg = PMBUS_VOUT_OV_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_VOLTAGE_OV_FAULT, } }; static const struct pmbus_sensor_attr voltage_attributes[] = { { .reg = PMBUS_READ_VIN, .class = PSC_VOLTAGE_IN, .label = "vin", .func = PMBUS_HAVE_VIN, .sfunc = PMBUS_HAVE_STATUS_INPUT, .sbase = PB_STATUS_INPUT_BASE, .gbit = PB_STATUS_VIN_UV, .limit = vin_limit_attrs, .nlimit = ARRAY_SIZE(vin_limit_attrs), }, { .reg = PMBUS_READ_VCAP, .class = PSC_VOLTAGE_IN, .label = "vcap", .func = PMBUS_HAVE_VCAP, }, { .reg = PMBUS_READ_VOUT, .class = PSC_VOLTAGE_OUT, .label = "vout", .paged = true, .func = PMBUS_HAVE_VOUT, .sfunc = PMBUS_HAVE_STATUS_VOUT, .sbase = PB_STATUS_VOUT_BASE, .gbit = PB_STATUS_VOUT_OV, .limit = vout_limit_attrs, .nlimit = ARRAY_SIZE(vout_limit_attrs), } }; /* Current attributes */ static const struct pmbus_limit_attr iin_limit_attrs[] = { { .reg = PMBUS_IIN_OC_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_IIN_OC_WARNING, }, { .reg = PMBUS_IIN_OC_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_IIN_OC_FAULT, } }; static const struct pmbus_limit_attr iout_limit_attrs[] = { { .reg = PMBUS_IOUT_OC_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_IOUT_OC_WARNING, }, { .reg = PMBUS_IOUT_UC_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_IOUT_UC_FAULT, }, { .reg = PMBUS_IOUT_OC_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_IOUT_OC_FAULT, } }; static const struct pmbus_sensor_attr current_attributes[] = { { .reg = PMBUS_READ_IIN, .class = PSC_CURRENT_IN, .label = "iin", .func = PMBUS_HAVE_IIN, .sfunc = PMBUS_HAVE_STATUS_INPUT, .sbase = PB_STATUS_INPUT_BASE, .limit = iin_limit_attrs, .nlimit = ARRAY_SIZE(iin_limit_attrs), }, { .reg = PMBUS_READ_IOUT, .class = PSC_CURRENT_OUT, .label = "iout", .paged = true, .func = PMBUS_HAVE_IOUT, .sfunc = PMBUS_HAVE_STATUS_IOUT, .sbase = PB_STATUS_IOUT_BASE, .gbit = PB_STATUS_IOUT_OC, .limit = iout_limit_attrs, .nlimit = ARRAY_SIZE(iout_limit_attrs), } }; /* Power attributes */ static const struct pmbus_limit_attr pin_limit_attrs[] = { { .reg = PMBUS_PIN_OP_WARN_LIMIT, .attr = "max", .alarm = "alarm", .sbit = PB_PIN_OP_WARNING, } }; static const struct pmbus_limit_attr pout_limit_attrs[] = { { .reg = PMBUS_POUT_MAX, .attr = "cap", .alarm = "cap_alarm", .sbit = PB_POWER_LIMITING, }, { .reg = PMBUS_POUT_OP_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_POUT_OP_WARNING, }, { .reg = PMBUS_POUT_OP_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_POUT_OP_FAULT, } }; static const struct pmbus_sensor_attr power_attributes[] = { { .reg = PMBUS_READ_PIN, .class = PSC_POWER, .label = "pin", .func = PMBUS_HAVE_PIN, .sfunc = PMBUS_HAVE_STATUS_INPUT, .sbase = PB_STATUS_INPUT_BASE, .limit = pin_limit_attrs, .nlimit = ARRAY_SIZE(pin_limit_attrs), }, { .reg = PMBUS_READ_POUT, .class = PSC_POWER, .label = "pout", .paged = true, .func = PMBUS_HAVE_POUT, .sfunc = PMBUS_HAVE_STATUS_IOUT, .sbase = PB_STATUS_IOUT_BASE, .limit = pout_limit_attrs, .nlimit = ARRAY_SIZE(pout_limit_attrs), } }; /* Temperature atributes */ static const struct pmbus_limit_attr temp_limit_attrs[] = { { .reg = PMBUS_UT_WARN_LIMIT, .attr = "min", .alarm = "min_alarm", .sbit = PB_TEMP_UT_WARNING, }, { .reg = PMBUS_UT_FAULT_LIMIT, .attr = "lcrit", .alarm = "lcrit_alarm", .sbit = PB_TEMP_UT_FAULT, }, { .reg = PMBUS_OT_WARN_LIMIT, .attr = "max", .alarm = "max_alarm", .sbit = PB_TEMP_OT_WARNING, }, { .reg = PMBUS_OT_FAULT_LIMIT, .attr = "crit", .alarm = "crit_alarm", .sbit = PB_TEMP_OT_FAULT, } }; static const struct pmbus_sensor_attr temp_attributes[] = { { .reg = PMBUS_READ_TEMPERATURE_1, .class = PSC_TEMPERATURE, .paged = true, .update = true, .compare = true, .func = PMBUS_HAVE_TEMP, .sfunc = PMBUS_HAVE_STATUS_TEMP, .sbase = PB_STATUS_TEMP_BASE, .gbit = PB_STATUS_TEMPERATURE, .limit = temp_limit_attrs, .nlimit = ARRAY_SIZE(temp_limit_attrs), }, { .reg = PMBUS_READ_TEMPERATURE_2, .class = PSC_TEMPERATURE, .paged = true, .update = true, .compare = true, .func = PMBUS_HAVE_TEMP2, .sfunc = PMBUS_HAVE_STATUS_TEMP, .sbase = PB_STATUS_TEMP_BASE, .gbit = PB_STATUS_TEMPERATURE, .limit = temp_limit_attrs, .nlimit = ARRAY_SIZE(temp_limit_attrs), }, { .reg = PMBUS_READ_TEMPERATURE_3, .class = PSC_TEMPERATURE, .paged = true, .update = true, .compare = true, .func = PMBUS_HAVE_TEMP3, .sfunc = PMBUS_HAVE_STATUS_TEMP, .sbase = PB_STATUS_TEMP_BASE, .gbit = PB_STATUS_TEMPERATURE, .limit = temp_limit_attrs, .nlimit = ARRAY_SIZE(temp_limit_attrs), } }; static const int pmbus_fan_registers[] = { PMBUS_READ_FAN_SPEED_1, PMBUS_READ_FAN_SPEED_2, PMBUS_READ_FAN_SPEED_3, PMBUS_READ_FAN_SPEED_4 }; static const int pmbus_fan_config_registers[] = { PMBUS_FAN_CONFIG_12, PMBUS_FAN_CONFIG_12, PMBUS_FAN_CONFIG_34, PMBUS_FAN_CONFIG_34 }; static const int pmbus_fan_status_registers[] = { PMBUS_STATUS_FAN_12, PMBUS_STATUS_FAN_12, PMBUS_STATUS_FAN_34, PMBUS_STATUS_FAN_34 }; static const u32 pmbus_fan_flags[] = { PMBUS_HAVE_FAN12, PMBUS_HAVE_FAN12, PMBUS_HAVE_FAN34, PMBUS_HAVE_FAN34 }; static const u32 pmbus_fan_status_flags[] = { PMBUS_HAVE_STATUS_FAN12, PMBUS_HAVE_STATUS_FAN12, PMBUS_HAVE_STATUS_FAN34, PMBUS_HAVE_STATUS_FAN34 }; /* Fans */ static void pmbus_add_fan_attributes(struct i2c_client *client, struct pmbus_data *data) { const struct pmbus_driver_info *info = data->info; int index = 1; int page; for (page = 0; page < info->pages; page++) { int f; for (f = 0; f < ARRAY_SIZE(pmbus_fan_registers); f++) { int regval; if (!(info->func[page] & pmbus_fan_flags[f])) break; if (!pmbus_check_word_register(client, page, pmbus_fan_registers[f])) break; /* * Skip fan if not installed. * Each fan configuration register covers multiple fans, * so we have to do some magic. */ regval = _pmbus_read_byte_data(client, page, pmbus_fan_config_registers[f]); if (regval < 0 || (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4))))) continue; pmbus_add_sensor(data, "fan", "input", index, page, pmbus_fan_registers[f], PSC_FAN, true, true); /* * Each fan status register covers multiple fans, * so we have to do some magic. */ if ((info->func[page] & pmbus_fan_status_flags[f]) && pmbus_check_byte_register(client, page, pmbus_fan_status_registers[f])) { int base; if (f > 1) /* fan 3, 4 */ base = PB_STATUS_FAN34_BASE + page; else base = PB_STATUS_FAN_BASE + page; pmbus_add_boolean_reg(data, "fan", "alarm", index, base, PB_FAN_FAN1_WARNING >> (f & 1)); pmbus_add_boolean_reg(data, "fan", "fault", index, base, PB_FAN_FAN1_FAULT >> (f & 1)); } index++; } } } static void pmbus_find_attributes(struct i2c_client *client, struct pmbus_data *data) { /* Voltage sensors */ pmbus_add_sensor_attrs(client, data, "in", voltage_attributes, ARRAY_SIZE(voltage_attributes)); /* Current sensors */ pmbus_add_sensor_attrs(client, data, "curr", current_attributes, ARRAY_SIZE(current_attributes)); /* Power sensors */ pmbus_add_sensor_attrs(client, data, "power", power_attributes, ARRAY_SIZE(power_attributes)); /* Temperature sensors */ pmbus_add_sensor_attrs(client, data, "temp", temp_attributes, ARRAY_SIZE(temp_attributes)); /* Fans */ pmbus_add_fan_attributes(client, data); } /* * Identify chip parameters. * This function is called for all chips. */ static int pmbus_identify_common(struct i2c_client *client, struct pmbus_data *data) { int vout_mode = -1, exponent; if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); if (vout_mode >= 0 && vout_mode != 0xff) { /* * Not all chips support the VOUT_MODE command, * so a failure to read it is not an error. */ switch (vout_mode >> 5) { case 0: /* linear mode */ if (data->info->direct[PSC_VOLTAGE_OUT]) return -ENODEV; exponent = vout_mode & 0x1f; /* and sign-extend it */ if (exponent & 0x10) exponent |= ~0x1f; data->exponent = exponent; break; case 2: /* direct mode */ if (!data->info->direct[PSC_VOLTAGE_OUT]) return -ENODEV; break; default: return -ENODEV; } } /* Determine maximum number of sensors, booleans, and labels */ pmbus_find_max_attr(client, data); pmbus_clear_fault_page(client, 0); return 0; } int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id, struct pmbus_driver_info *info) { const struct pmbus_platform_data *pdata = client->dev.platform_data; struct pmbus_data *data; int ret; if (!info) { dev_err(&client->dev, "Missing chip information"); return -ENODEV; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { dev_err(&client->dev, "No memory to allocate driver data\n"); return -ENOMEM; } i2c_set_clientdata(client, data); mutex_init(&data->update_lock); /* Bail out if PMBus status register does not exist. */ if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) { dev_err(&client->dev, "PMBus status register not found\n"); ret = -ENODEV; goto out_data; } if (pdata) data->flags = pdata->flags; data->info = info; pmbus_clear_faults(client); if (info->identify) { ret = (*info->identify)(client, info); if (ret < 0) { dev_err(&client->dev, "Chip identification failed\n"); goto out_data; } } if (info->pages <= 0 || info->pages > PMBUS_PAGES) { dev_err(&client->dev, "Bad number of PMBus pages: %d\n", info->pages); ret = -EINVAL; goto out_data; } /* * Bail out if more than one page was configured, but we can not * select the highest page. This is an indication that the wrong * chip type was selected. Better bail out now than keep * returning errors later on. */ if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) { dev_err(&client->dev, "Failed to select page %d\n", info->pages - 1); ret = -EINVAL; goto out_data; } ret = pmbus_identify_common(client, data); if (ret < 0) { dev_err(&client->dev, "Failed to identify chip capabilities\n"); goto out_data; } ret = -ENOMEM; data->sensors = kzalloc(sizeof(struct pmbus_sensor) * data->max_sensors, GFP_KERNEL); if (!data->sensors) { dev_err(&client->dev, "No memory to allocate sensor data\n"); goto out_data; } data->booleans = kzalloc(sizeof(struct pmbus_boolean) * data->max_booleans, GFP_KERNEL); if (!data->booleans) { dev_err(&client->dev, "No memory to allocate boolean data\n"); goto out_sensors; } data->labels = kzalloc(sizeof(struct pmbus_label) * data->max_labels, GFP_KERNEL); if (!data->labels) { dev_err(&client->dev, "No memory to allocate label data\n"); goto out_booleans; } data->attributes = kzalloc(sizeof(struct attribute *) * data->max_attributes, GFP_KERNEL); if (!data->attributes) { dev_err(&client->dev, "No memory to allocate attribute data\n"); goto out_labels; } pmbus_find_attributes(client, data); /* * If there are no attributes, something is wrong. * Bail out instead of trying to register nothing. */ if (!data->num_attributes) { dev_err(&client->dev, "No attributes found\n"); ret = -ENODEV; goto out_attributes; } /* Register sysfs hooks */ data->group.attrs = data->attributes; ret = sysfs_create_group(&client->dev.kobj, &data->group); if (ret) { dev_err(&client->dev, "Failed to create sysfs entries\n"); goto out_attributes; } data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "Failed to register hwmon device\n"); goto out_hwmon_device_register; } return 0; out_hwmon_device_register: sysfs_remove_group(&client->dev.kobj, &data->group); out_attributes: kfree(data->attributes); out_labels: kfree(data->labels); out_booleans: kfree(data->booleans); out_sensors: kfree(data->sensors); out_data: kfree(data); return ret; } EXPORT_SYMBOL_GPL(pmbus_do_probe); int pmbus_do_remove(struct i2c_client *client) { struct pmbus_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &data->group); kfree(data->attributes); kfree(data->labels); kfree(data->booleans); kfree(data->sensors); kfree(data); return 0; } EXPORT_SYMBOL_GPL(pmbus_do_remove); MODULE_AUTHOR("Guenter Roeck"); MODULE_DESCRIPTION("PMBus core driver"); MODULE_LICENSE("GPL");
gpl-2.0
A2109devs/lenovo_a2109a_kernel
arch/powerpc/platforms/pseries/eeh_sysfs.c
2863
3539
/* * Sysfs entries for PCI Error Recovery for PAPR-compliant platform. * Copyright IBM Corporation 2007 * Copyright Linas Vepstas <linas@austin.ibm.com> 2007 * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com> */ #include <linux/pci.h> #include <asm/ppc-pci.h> #include <asm/pci-bridge.h> /** * EEH_SHOW_ATTR -- create sysfs entry for eeh statistic * @_name: name of file in sysfs directory * @_memb: name of member in struct pci_dn to access * @_format: printf format for display * * All of the attributes look very similar, so just * auto-gen a cut-n-paste routine to display them. */ #define EEH_SHOW_ATTR(_name,_memb,_format) \ static ssize_t eeh_show_##_name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct pci_dev *pdev = to_pci_dev(dev); \ struct device_node *dn = pci_device_to_OF_node(pdev); \ struct pci_dn *pdn; \ \ if (!dn || PCI_DN(dn) == NULL) \ return 0; \ \ pdn = PCI_DN(dn); \ return sprintf(buf, _format "\n", pdn->_memb); \ } \ static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL); EEH_SHOW_ATTR(eeh_mode, eeh_mode, "0x%x"); EEH_SHOW_ATTR(eeh_config_addr, eeh_config_addr, "0x%x"); EEH_SHOW_ATTR(eeh_pe_config_addr, eeh_pe_config_addr, "0x%x"); EEH_SHOW_ATTR(eeh_check_count, eeh_check_count, "%d"); EEH_SHOW_ATTR(eeh_freeze_count, eeh_freeze_count, "%d"); EEH_SHOW_ATTR(eeh_false_positives, eeh_false_positives, "%d"); void eeh_sysfs_add_device(struct pci_dev *pdev) { int rc=0; rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); rc += device_create_file(&pdev->dev, &dev_attr_eeh_check_count); rc += device_create_file(&pdev->dev, &dev_attr_eeh_false_positives); rc += device_create_file(&pdev->dev, &dev_attr_eeh_freeze_count); if (rc) printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); } void eeh_sysfs_remove_device(struct pci_dev *pdev) { device_remove_file(&pdev->dev, &dev_attr_eeh_mode); device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); device_remove_file(&pdev->dev, &dev_attr_eeh_check_count); device_remove_file(&pdev->dev, &dev_attr_eeh_false_positives); device_remove_file(&pdev->dev, &dev_attr_eeh_freeze_count); }
gpl-2.0
flex-devices/android_kernel_lge_g3
fs/hpfs/buffer.c
7983
3825
/* * linux/fs/hpfs/buffer.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * general buffer i/o */ #include <linux/sched.h> #include <linux/slab.h> #include "hpfs_fn.h" /* Map a sector into a buffer and return pointers to it and to the buffer. */ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp, int ahead) { struct buffer_head *bh; hpfs_lock_assert(s); cond_resched(); *bhp = bh = sb_bread(s, secno); if (bh != NULL) return bh->b_data; else { printk("HPFS: hpfs_map_sector: read error\n"); return NULL; } } /* Like hpfs_map_sector but don't read anything */ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp) { struct buffer_head *bh; /*return hpfs_map_sector(s, secno, bhp, 0);*/ hpfs_lock_assert(s); cond_resched(); if ((*bhp = bh = sb_getblk(s, secno)) != NULL) { if (!buffer_uptodate(bh)) wait_on_buffer(bh); set_buffer_uptodate(bh); return bh->b_data; } else { printk("HPFS: hpfs_get_sector: getblk failed\n"); return NULL; } } /* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh, int ahead) { struct buffer_head *bh; char *data; hpfs_lock_assert(s); cond_resched(); if (secno & 3) { printk("HPFS: hpfs_map_4sectors: unaligned read\n"); return NULL; } qbh->data = data = kmalloc(2048, GFP_NOFS); if (!data) { printk("HPFS: hpfs_map_4sectors: out of memory\n"); goto bail; } qbh->bh[0] = bh = sb_bread(s, secno); if (!bh) goto bail0; memcpy(data, bh->b_data, 512); qbh->bh[1] = bh = sb_bread(s, secno + 1); if (!bh) goto bail1; memcpy(data + 512, bh->b_data, 512); qbh->bh[2] = bh = sb_bread(s, secno + 2); if (!bh) goto bail2; memcpy(data + 2 * 512, bh->b_data, 512); qbh->bh[3] = bh = sb_bread(s, secno + 3); if (!bh) goto bail3; memcpy(data + 3 * 512, bh->b_data, 512); return data; bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: kfree(data); printk("HPFS: hpfs_map_4sectors: read error\n"); bail: return NULL; } /* Don't read sectors */ void *hpfs_get_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh) { cond_resched(); hpfs_lock_assert(s); if (secno & 3) { printk("HPFS: hpfs_get_4sectors: unaligned read\n"); return NULL; } /*return hpfs_map_4sectors(s, secno, qbh, 0);*/ if (!(qbh->data = kmalloc(2048, GFP_NOFS))) { printk("HPFS: hpfs_get_4sectors: out of memory\n"); return NULL; } if (!(hpfs_get_sector(s, secno, &qbh->bh[0]))) goto bail0; if (!(hpfs_get_sector(s, secno + 1, &qbh->bh[1]))) goto bail1; if (!(hpfs_get_sector(s, secno + 2, &qbh->bh[2]))) goto bail2; if (!(hpfs_get_sector(s, secno + 3, &qbh->bh[3]))) goto bail3; memcpy(qbh->data, qbh->bh[0]->b_data, 512); memcpy(qbh->data + 512, qbh->bh[1]->b_data, 512); memcpy(qbh->data + 2*512, qbh->bh[2]->b_data, 512); memcpy(qbh->data + 3*512, qbh->bh[3]->b_data, 512); return qbh->data; bail3: brelse(qbh->bh[2]); bail2: brelse(qbh->bh[1]); bail1: brelse(qbh->bh[0]); bail0: return NULL; } void hpfs_brelse4(struct quad_buffer_head *qbh) { brelse(qbh->bh[3]); brelse(qbh->bh[2]); brelse(qbh->bh[1]); brelse(qbh->bh[0]); kfree(qbh->data); } void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh) { PRINTK(("hpfs_mark_4buffers_dirty\n")); memcpy(qbh->bh[0]->b_data, qbh->data, 512); memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512); memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512); memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512); mark_buffer_dirty(qbh->bh[0]); mark_buffer_dirty(qbh->bh[1]); mark_buffer_dirty(qbh->bh[2]); mark_buffer_dirty(qbh->bh[3]); }
gpl-2.0
GameTheory-/android_kernel_lge_l1m
drivers/hid/hid-a4tech.c
8239
3731
/* * HID driver for some a4tech "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include "hid-ids.h" #define A4_2WHEEL_MOUSE_HACK_7 0x01 #define A4_2WHEEL_MOUSE_HACK_B8 0x02 struct a4tech_sc { unsigned long quirks; unsigned int hw_wheel; __s32 delayed_value; }; static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); if (usage->type == EV_REL && usage->code == REL_WHEEL) set_bit(REL_HWHEEL, *bit); if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) return -1; return 0; } static int a4_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); struct input_dev *input; if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || !usage->type) return 0; input = field->hidinput->input; if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) { if (usage->type == EV_REL && usage->code == REL_WHEEL) { a4->delayed_value = value; return 1; } if (usage->hid == 0x000100b8) { input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, a4->delayed_value); return 1; } } if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) { a4->hw_wheel = !!value; return 1; } if (usage->code == REL_WHEEL && a4->hw_wheel) { input_event(input, usage->type, REL_HWHEEL, value); return 1; } return 0; } static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct a4tech_sc *a4; int ret; a4 = kzalloc(sizeof(*a4), GFP_KERNEL); if (a4 == NULL) { hid_err(hdev, "can't alloc device descriptor\n"); ret = -ENOMEM; goto err_free; } a4->quirks = id->driver_data; hid_set_drvdata(hdev, a4); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: kfree(a4); return ret; } static void a4_remove(struct hid_device *hdev) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); hid_hw_stop(hdev); kfree(a4); } static const struct hid_device_id a4_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU), .driver_data = A4_2WHEEL_MOUSE_HACK_7 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { } }; MODULE_DEVICE_TABLE(hid, a4_devices); static struct hid_driver a4_driver = { .name = "a4tech", .id_table = a4_devices, .input_mapped = a4_input_mapped, .event = a4_event, .probe = a4_probe, .remove = a4_remove, }; static int __init a4_init(void) { return hid_register_driver(&a4_driver); } static void __exit a4_exit(void) { hid_unregister_driver(&a4_driver); } module_init(a4_init); module_exit(a4_exit); MODULE_LICENSE("GPL");
gpl-2.0
CalmYak/N1-Kernel-Source-4.2
drivers/hid/hid-a4tech.c
8239
3731
/* * HID driver for some a4tech "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include "hid-ids.h" #define A4_2WHEEL_MOUSE_HACK_7 0x01 #define A4_2WHEEL_MOUSE_HACK_B8 0x02 struct a4tech_sc { unsigned long quirks; unsigned int hw_wheel; __s32 delayed_value; }; static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); if (usage->type == EV_REL && usage->code == REL_WHEEL) set_bit(REL_HWHEEL, *bit); if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) return -1; return 0; } static int a4_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); struct input_dev *input; if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput || !usage->type) return 0; input = field->hidinput->input; if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) { if (usage->type == EV_REL && usage->code == REL_WHEEL) { a4->delayed_value = value; return 1; } if (usage->hid == 0x000100b8) { input_event(input, EV_REL, value ? REL_HWHEEL : REL_WHEEL, a4->delayed_value); return 1; } } if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007) { a4->hw_wheel = !!value; return 1; } if (usage->code == REL_WHEEL && a4->hw_wheel) { input_event(input, usage->type, REL_HWHEEL, value); return 1; } return 0; } static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct a4tech_sc *a4; int ret; a4 = kzalloc(sizeof(*a4), GFP_KERNEL); if (a4 == NULL) { hid_err(hdev, "can't alloc device descriptor\n"); ret = -ENOMEM; goto err_free; } a4->quirks = id->driver_data; hid_set_drvdata(hdev, a4); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err_free; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } return 0; err_free: kfree(a4); return ret; } static void a4_remove(struct hid_device *hdev) { struct a4tech_sc *a4 = hid_get_drvdata(hdev); hid_hw_stop(hdev); kfree(a4); } static const struct hid_device_id a4_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU), .driver_data = A4_2WHEEL_MOUSE_HACK_7 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { } }; MODULE_DEVICE_TABLE(hid, a4_devices); static struct hid_driver a4_driver = { .name = "a4tech", .id_table = a4_devices, .input_mapped = a4_input_mapped, .event = a4_event, .probe = a4_probe, .remove = a4_remove, }; static int __init a4_init(void) { return hid_register_driver(&a4_driver); } static void __exit a4_exit(void) { hid_unregister_driver(&a4_driver); } module_init(a4_init); module_exit(a4_exit); MODULE_LICENSE("GPL");
gpl-2.0
rogersb11/android_kernel_samsung_smdk4412
arch/cris/arch-v10/drivers/eeprom.c
11311
22088
/*!***************************************************************************** *! *! Implements an interface for i2c compatible eeproms to run under Linux. *! Supports 2k, 8k(?) and 16k. Uses adaptive timing adjustments by *! Johan.Adolfsson@axis.com *! *! Probing results: *! 8k or not is detected (the assumes 2k or 16k) *! 2k or 16k detected using test reads and writes. *! *!------------------------------------------------------------------------ *! HISTORY *! *! DATE NAME CHANGES *! ---- ---- ------- *! Aug 28 1999 Edgar Iglesias Initial Version *! Aug 31 1999 Edgar Iglesias Allow simultaneous users. *! Sep 03 1999 Edgar Iglesias Updated probe. *! Sep 03 1999 Edgar Iglesias Added bail-out stuff if we get interrupted *! in the spin-lock. *! *! (c) 1999 Axis Communications AB, Lund, Sweden *!*****************************************************************************/ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/wait.h> #include <asm/uaccess.h> #include "i2c.h" #define D(x) /* If we should use adaptive timing or not: */ /* #define EEPROM_ADAPTIVE_TIMING */ #define EEPROM_MAJOR_NR 122 /* use a LOCAL/EXPERIMENTAL major for now */ #define EEPROM_MINOR_NR 0 /* Empirical sane initial value of the delay, the value will be adapted to * what the chip needs when using EEPROM_ADAPTIVE_TIMING. */ #define INITIAL_WRITEDELAY_US 4000 #define MAX_WRITEDELAY_US 10000 /* 10 ms according to spec for 2KB EEPROM */ /* This one defines how many times to try when eeprom fails. */ #define EEPROM_RETRIES 10 #define EEPROM_2KB (2 * 1024) /*#define EEPROM_4KB (4 * 1024)*/ /* Exists but not used in Axis products */ #define EEPROM_8KB (8 * 1024 - 1 ) /* Last byte has write protection bit */ #define EEPROM_16KB (16 * 1024) #define i2c_delay(x) udelay(x) /* * This structure describes the attached eeprom chip. * The values are probed for. */ struct eeprom_type { unsigned long size; unsigned long sequential_write_pagesize; unsigned char select_cmd; unsigned long usec_delay_writecycles; /* Min time between write cycles (up to 10ms for some models) */ unsigned long usec_delay_step; /* For adaptive algorithm */ int adapt_state; /* 1 = To high , 0 = Even, -1 = To low */ /* this one is to keep the read/write operations atomic */ struct mutex lock; int retry_cnt_addr; /* Used to keep track of number of retries for adaptive timing adjustments */ int retry_cnt_read; }; static int eeprom_open(struct inode * inode, struct file * file); static loff_t eeprom_lseek(struct file * file, loff_t offset, int orig); static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t *off); static ssize_t eeprom_write(struct file * file, const char * buf, size_t count, loff_t *off); static int eeprom_close(struct inode * inode, struct file * file); static int eeprom_address(unsigned long addr); static int read_from_eeprom(char * buf, int count); static int eeprom_write_buf(loff_t addr, const char * buf, int count); static int eeprom_read_buf(loff_t addr, char * buf, int count); static void eeprom_disable_write_protect(void); static const char eeprom_name[] = "eeprom"; /* chip description */ static struct eeprom_type eeprom; /* This is the exported file-operations structure for this device. */ const struct file_operations eeprom_fops = { .llseek = eeprom_lseek, .read = eeprom_read, .write = eeprom_write, .open = eeprom_open, .release = eeprom_close }; /* eeprom init call. Probes for different eeprom models. */ int __init eeprom_init(void) { mutex_init(&eeprom.lock); #ifdef CONFIG_ETRAX_I2C_EEPROM_PROBE #define EETEXT "Found" #else #define EETEXT "Assuming" #endif if (register_chrdev(EEPROM_MAJOR_NR, eeprom_name, &eeprom_fops)) { printk(KERN_INFO "%s: unable to get major %d for eeprom device\n", eeprom_name, EEPROM_MAJOR_NR); return -1; } printk("EEPROM char device v0.3, (c) 2000 Axis Communications AB\n"); /* * Note: Most of this probing method was taken from the printserver (5470e) * codebase. It did not contain a way of finding the 16kB chips * (M24128 or variants). The method used here might not work * for all models. If you encounter problems the easiest way * is probably to define your model within #ifdef's, and hard- * code it. */ eeprom.size = 0; eeprom.usec_delay_writecycles = INITIAL_WRITEDELAY_US; eeprom.usec_delay_step = 128; eeprom.adapt_state = 0; #ifdef CONFIG_ETRAX_I2C_EEPROM_PROBE i2c_start(); i2c_outbyte(0x80); if(!i2c_getack()) { /* It's not 8k.. */ int success = 0; unsigned char buf_2k_start[16]; /* Im not sure this will work... :) */ /* assume 2kB, if failure go for 16kB */ /* Test with 16kB settings.. */ /* If it's a 2kB EEPROM and we address it outside it's range * it will mirror the address space: * 1. We read two locations (that are mirrored), * if the content differs * it's a 16kB EEPROM. * 2. if it doesn't differ - write different value to one of the locations, * check the other - if content still is the same it's a 2k EEPROM, * restore original data. */ #define LOC1 8 #define LOC2 (0x1fb) /*1fb, 3ed, 5df, 7d1 */ /* 2k settings */ i2c_stop(); eeprom.size = EEPROM_2KB; eeprom.select_cmd = 0xA0; eeprom.sequential_write_pagesize = 16; if( eeprom_read_buf( 0, buf_2k_start, 16 ) == 16 ) { D(printk("2k start: '%16.16s'\n", buf_2k_start)); } else { printk(KERN_INFO "%s: Failed to read in 2k mode!\n", eeprom_name); } /* 16k settings */ eeprom.size = EEPROM_16KB; eeprom.select_cmd = 0xA0; eeprom.sequential_write_pagesize = 64; { unsigned char loc1[4], loc2[4], tmp[4]; if( eeprom_read_buf(LOC2, loc2, 4) == 4) { if( eeprom_read_buf(LOC1, loc1, 4) == 4) { D(printk("0 loc1: (%i) '%4.4s' loc2 (%i) '%4.4s'\n", LOC1, loc1, LOC2, loc2)); #if 0 if (memcmp(loc1, loc2, 4) != 0 ) { /* It's 16k */ printk(KERN_INFO "%s: 16k detected in step 1\n", eeprom_name); eeprom.size = EEPROM_16KB; success = 1; } else #endif { /* Do step 2 check */ /* Invert value */ loc1[0] = ~loc1[0]; if (eeprom_write_buf(LOC1, loc1, 1) == 1) { /* If 2k EEPROM this write will actually write 10 bytes * from pos 0 */ D(printk("1 loc1: (%i) '%4.4s' loc2 (%i) '%4.4s'\n", LOC1, loc1, LOC2, loc2)); if( eeprom_read_buf(LOC1, tmp, 4) == 4) { D(printk("2 loc1: (%i) '%4.4s' tmp '%4.4s'\n", LOC1, loc1, tmp)); if (memcmp(loc1, tmp, 4) != 0 ) { printk(KERN_INFO "%s: read and write differs! Not 16kB\n", eeprom_name); loc1[0] = ~loc1[0]; if (eeprom_write_buf(LOC1, loc1, 1) == 1) { success = 1; } else { printk(KERN_INFO "%s: Restore 2k failed during probe," " EEPROM might be corrupt!\n", eeprom_name); } i2c_stop(); /* Go to 2k mode and write original data */ eeprom.size = EEPROM_2KB; eeprom.select_cmd = 0xA0; eeprom.sequential_write_pagesize = 16; if( eeprom_write_buf(0, buf_2k_start, 16) == 16) { } else { printk(KERN_INFO "%s: Failed to write back 2k start!\n", eeprom_name); } eeprom.size = EEPROM_2KB; } } if(!success) { if( eeprom_read_buf(LOC2, loc2, 1) == 1) { D(printk("0 loc1: (%i) '%4.4s' loc2 (%i) '%4.4s'\n", LOC1, loc1, LOC2, loc2)); if (memcmp(loc1, loc2, 4) == 0 ) { /* Data the same, must be mirrored -> 2k */ /* Restore data */ printk(KERN_INFO "%s: 2k detected in step 2\n", eeprom_name); loc1[0] = ~loc1[0]; if (eeprom_write_buf(LOC1, loc1, 1) == 1) { success = 1; } else { printk(KERN_INFO "%s: Restore 2k failed during probe," " EEPROM might be corrupt!\n", eeprom_name); } eeprom.size = EEPROM_2KB; } else { printk(KERN_INFO "%s: 16k detected in step 2\n", eeprom_name); loc1[0] = ~loc1[0]; /* Data differs, assume 16k */ /* Restore data */ if (eeprom_write_buf(LOC1, loc1, 1) == 1) { success = 1; } else { printk(KERN_INFO "%s: Restore 16k failed during probe," " EEPROM might be corrupt!\n", eeprom_name); } eeprom.size = EEPROM_16KB; } } } } } /* read LOC1 */ } /* address LOC1 */ if (!success) { printk(KERN_INFO "%s: Probing failed!, using 2KB!\n", eeprom_name); eeprom.size = EEPROM_2KB; } } /* read */ } } else { i2c_outbyte(0x00); if(!i2c_getack()) { /* No 8k */ eeprom.size = EEPROM_2KB; } else { i2c_start(); i2c_outbyte(0x81); if (!i2c_getack()) { eeprom.size = EEPROM_2KB; } else { /* It's a 8kB */ i2c_inbyte(); eeprom.size = EEPROM_8KB; } } } i2c_stop(); #elif defined(CONFIG_ETRAX_I2C_EEPROM_16KB) eeprom.size = EEPROM_16KB; #elif defined(CONFIG_ETRAX_I2C_EEPROM_8KB) eeprom.size = EEPROM_8KB; #elif defined(CONFIG_ETRAX_I2C_EEPROM_2KB) eeprom.size = EEPROM_2KB; #endif switch(eeprom.size) { case (EEPROM_2KB): printk("%s: " EETEXT " i2c compatible 2kB eeprom.\n", eeprom_name); eeprom.sequential_write_pagesize = 16; eeprom.select_cmd = 0xA0; break; case (EEPROM_8KB): printk("%s: " EETEXT " i2c compatible 8kB eeprom.\n", eeprom_name); eeprom.sequential_write_pagesize = 16; eeprom.select_cmd = 0x80; break; case (EEPROM_16KB): printk("%s: " EETEXT " i2c compatible 16kB eeprom.\n", eeprom_name); eeprom.sequential_write_pagesize = 64; eeprom.select_cmd = 0xA0; break; default: eeprom.size = 0; printk("%s: Did not find a supported eeprom\n", eeprom_name); break; } eeprom_disable_write_protect(); return 0; } /* Opens the device. */ static int eeprom_open(struct inode * inode, struct file * file) { if(iminor(inode) != EEPROM_MINOR_NR) return -ENXIO; if(imajor(inode) != EEPROM_MAJOR_NR) return -ENXIO; if( eeprom.size > 0 ) { /* OK */ return 0; } /* No EEprom found */ return -EFAULT; } /* Changes the current file position. */ static loff_t eeprom_lseek(struct file * file, loff_t offset, int orig) { /* * orig 0: position from begning of eeprom * orig 1: relative from current position * orig 2: position from last eeprom address */ switch (orig) { case 0: file->f_pos = offset; break; case 1: file->f_pos += offset; break; case 2: file->f_pos = eeprom.size - offset; break; default: return -EINVAL; } /* truncate position */ if (file->f_pos < 0) { file->f_pos = 0; return(-EOVERFLOW); } if (file->f_pos >= eeprom.size) { file->f_pos = eeprom.size - 1; return(-EOVERFLOW); } return ( file->f_pos ); } /* Reads data from eeprom. */ static int eeprom_read_buf(loff_t addr, char * buf, int count) { return eeprom_read(NULL, buf, count, &addr); } /* Reads data from eeprom. */ static ssize_t eeprom_read(struct file * file, char * buf, size_t count, loff_t *off) { int read=0; unsigned long p = *off; unsigned char page; if(p >= eeprom.size) /* Address i 0 - (size-1) */ { return -EFAULT; } if (mutex_lock_interruptible(&eeprom.lock)) return -EINTR; page = (unsigned char) (p >> 8); if(!eeprom_address(p)) { printk(KERN_INFO "%s: Read failed to address the eeprom: " "0x%08X (%i) page: %i\n", eeprom_name, (int)p, (int)p, page); i2c_stop(); /* don't forget to wake them up */ mutex_unlock(&eeprom.lock); return -EFAULT; } if( (p + count) > eeprom.size) { /* truncate count */ count = eeprom.size - p; } /* stop dummy write op and initiate the read op */ i2c_start(); /* special case for small eeproms */ if(eeprom.size < EEPROM_16KB) { i2c_outbyte( eeprom.select_cmd | 1 | (page << 1) ); } /* go on with the actual read */ read = read_from_eeprom( buf, count); if(read > 0) { *off += read; } mutex_unlock(&eeprom.lock); return read; } /* Writes data to eeprom. */ static int eeprom_write_buf(loff_t addr, const char * buf, int count) { return eeprom_write(NULL, buf, count, &addr); } /* Writes data to eeprom. */ static ssize_t eeprom_write(struct file * file, const char * buf, size_t count, loff_t *off) { int i, written, restart=1; unsigned long p; if (!access_ok(VERIFY_READ, buf, count)) { return -EFAULT; } /* bail out if we get interrupted */ if (mutex_lock_interruptible(&eeprom.lock)) return -EINTR; for(i = 0; (i < EEPROM_RETRIES) && (restart > 0); i++) { restart = 0; written = 0; p = *off; while( (written < count) && (p < eeprom.size)) { /* address the eeprom */ if(!eeprom_address(p)) { printk(KERN_INFO "%s: Write failed to address the eeprom: " "0x%08X (%i) \n", eeprom_name, (int)p, (int)p); i2c_stop(); /* don't forget to wake them up */ mutex_unlock(&eeprom.lock); return -EFAULT; } #ifdef EEPROM_ADAPTIVE_TIMING /* Adaptive algorithm to adjust timing */ if (eeprom.retry_cnt_addr > 0) { /* To Low now */ D(printk(">D=%i d=%i\n", eeprom.usec_delay_writecycles, eeprom.usec_delay_step)); if (eeprom.usec_delay_step < 4) { eeprom.usec_delay_step++; eeprom.usec_delay_writecycles += eeprom.usec_delay_step; } else { if (eeprom.adapt_state > 0) { /* To Low before */ eeprom.usec_delay_step *= 2; if (eeprom.usec_delay_step > 2) { eeprom.usec_delay_step--; } eeprom.usec_delay_writecycles += eeprom.usec_delay_step; } else if (eeprom.adapt_state < 0) { /* To High before (toggle dir) */ eeprom.usec_delay_writecycles += eeprom.usec_delay_step; if (eeprom.usec_delay_step > 1) { eeprom.usec_delay_step /= 2; eeprom.usec_delay_step--; } } } eeprom.adapt_state = 1; } else { /* To High (or good) now */ D(printk("<D=%i d=%i\n", eeprom.usec_delay_writecycles, eeprom.usec_delay_step)); if (eeprom.adapt_state < 0) { /* To High before */ if (eeprom.usec_delay_step > 1) { eeprom.usec_delay_step *= 2; eeprom.usec_delay_step--; if (eeprom.usec_delay_writecycles > eeprom.usec_delay_step) { eeprom.usec_delay_writecycles -= eeprom.usec_delay_step; } } } else if (eeprom.adapt_state > 0) { /* To Low before (toggle dir) */ if (eeprom.usec_delay_writecycles > eeprom.usec_delay_step) { eeprom.usec_delay_writecycles -= eeprom.usec_delay_step; } if (eeprom.usec_delay_step > 1) { eeprom.usec_delay_step /= 2; eeprom.usec_delay_step--; } eeprom.adapt_state = -1; } if (eeprom.adapt_state > -100) { eeprom.adapt_state--; } else { /* Restart adaption */ D(printk("#Restart\n")); eeprom.usec_delay_step++; } } #endif /* EEPROM_ADAPTIVE_TIMING */ /* write until we hit a page boundary or count */ do { i2c_outbyte(buf[written]); if(!i2c_getack()) { restart=1; printk(KERN_INFO "%s: write error, retrying. %d\n", eeprom_name, i); i2c_stop(); break; } written++; p++; } while( written < count && ( p % eeprom.sequential_write_pagesize )); /* end write cycle */ i2c_stop(); i2c_delay(eeprom.usec_delay_writecycles); } /* while */ } /* for */ mutex_unlock(&eeprom.lock); if (written == 0 && p >= eeprom.size){ return -ENOSPC; } *off = p; return written; } /* Closes the device. */ static int eeprom_close(struct inode * inode, struct file * file) { /* do nothing for now */ return 0; } /* Sets the current address of the eeprom. */ static int eeprom_address(unsigned long addr) { int i; unsigned char page, offset; page = (unsigned char) (addr >> 8); offset = (unsigned char) addr; for(i = 0; i < EEPROM_RETRIES; i++) { /* start a dummy write for addressing */ i2c_start(); if(eeprom.size == EEPROM_16KB) { i2c_outbyte( eeprom.select_cmd ); i2c_getack(); i2c_outbyte(page); } else { i2c_outbyte( eeprom.select_cmd | (page << 1) ); } if(!i2c_getack()) { /* retry */ i2c_stop(); /* Must have a delay here.. 500 works, >50, 100->works 5th time*/ i2c_delay(MAX_WRITEDELAY_US / EEPROM_RETRIES * i); /* The chip needs up to 10 ms from write stop to next start */ } else { i2c_outbyte(offset); if(!i2c_getack()) { /* retry */ i2c_stop(); } else break; } } eeprom.retry_cnt_addr = i; D(printk("%i\n", eeprom.retry_cnt_addr)); if(eeprom.retry_cnt_addr == EEPROM_RETRIES) { /* failed */ return 0; } return 1; } /* Reads from current address. */ static int read_from_eeprom(char * buf, int count) { int i, read=0; for(i = 0; i < EEPROM_RETRIES; i++) { if(eeprom.size == EEPROM_16KB) { i2c_outbyte( eeprom.select_cmd | 1 ); } if(i2c_getack()) { break; } } if(i == EEPROM_RETRIES) { printk(KERN_INFO "%s: failed to read from eeprom\n", eeprom_name); i2c_stop(); return -EFAULT; } while( (read < count)) { if (put_user(i2c_inbyte(), &buf[read++])) { i2c_stop(); return -EFAULT; } /* * make sure we don't ack last byte or you will get very strange * results! */ if(read < count) { i2c_sendack(); } } /* stop the operation */ i2c_stop(); return read; } /* Disables write protection if applicable. */ #define DBP_SAVE(x) #define ax_printf printk static void eeprom_disable_write_protect(void) { /* Disable write protect */ if (eeprom.size == EEPROM_8KB) { /* Step 1 Set WEL = 1 (write 00000010 to address 1FFFh */ i2c_start(); i2c_outbyte(0xbe); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false\n")); } i2c_outbyte(0xFF); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 2\n")); } i2c_outbyte(0x02); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 3\n")); } i2c_stop(); i2c_delay(1000); /* Step 2 Set RWEL = 1 (write 00000110 to address 1FFFh */ i2c_start(); i2c_outbyte(0xbe); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 55\n")); } i2c_outbyte(0xFF); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 52\n")); } i2c_outbyte(0x06); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 53\n")); } i2c_stop(); /* Step 3 Set BP1, BP0, and/or WPEN bits (write 00000110 to address 1FFFh */ i2c_start(); i2c_outbyte(0xbe); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 56\n")); } i2c_outbyte(0xFF); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 57\n")); } i2c_outbyte(0x06); if(!i2c_getack()) { DBP_SAVE(ax_printf("Get ack returns false 58\n")); } i2c_stop(); /* Write protect disabled */ } } module_init(eeprom_init);
gpl-2.0
ishtob/qmk_firmware
keyboards/basekeys/trifecta/trifecta.c
48
2960
/* Copyright 2020 Swiftrax and Basekeys.com * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "trifecta.h" #ifdef RGB_MATRIX_ENABLE led_config_t g_led_config = { { { NO_LED, 70, 61, 52, 43, 34, 24, 14 }, { 79, 69, 60, 51, 42, 33, 23, NO_LED }, { NO_LED, 71, 62, 53, 44, 35, 25, 15 }, { 78, 68, 59, 50, 41, 32, 22, NO_LED }, { 77, 67, 58, 49, 40, 31, NO_LED, 16 }, { NO_LED, 72, 63, 54, 45, 36, 26, NO_LED }, { NO_LED, 73, 64, 55, 46, 37, 27, 17 }, { 76, 66, 57, 48, 39, 30, 21, NO_LED }, { NO_LED, 74, 56, NO_LED, NO_LED, 38, 28, 18 }, { 75, 65, NO_LED, NO_LED, 47, 29, 20, 19 } }, { // Underglow { 218, 7 }, { 214, 45 }, { 180, 47 }, { 147, 50 }, { 94, 62 }, { 37, 51 }, { 4, 8 }, { 36, 15 }, { 62, 18 }, { 78, 5 }, { 119, 7 }, { 145, 16 }, { 166, 3 }, { 200, 16 }, //Per Key { 185, 11 }, { 191, 22 }, { 188, 33 }, { 200, 46 }, { 200, 57 }, { 212, 57 }, { 188, 57 }, { 181, 44 }, { 176, 22 }, { 168, 11 }, { 155, 11 }, { 164, 22 }, { 169, 32 }, { 165, 44 }, { 172, 55 }, { 157, 54 }, { 153, 44 }, { 156, 33 }, { 153, 22 }, { 142, 12 }, { 130, 13 }, { 138, 23 }, { 143, 33 }, { 140, 45 }, { 137, 56 }, { 128, 47 }, { 132, 35 }, { 127, 25 }, { 119, 16 }, { 107, 17 }, { 115, 27 }, { 120, 37 }, { 116, 48 }, { 117, 59 }, { 104, 51 }, { 109, 39 }, { 104, 29 }, { 86, 19 }, { 74, 16 }, { 78, 28 }, { 79, 39 }, { 82, 50 }, { 73, 60 }, { 71, 48 }, { 67, 37 }, { 66, 26 }, { 63, 15 }, { 50, 12 }, { 55, 24 }, { 55, 35 }, { 59, 47 }, { 50, 56 }, { 47, 45 }, { 43, 33 }, { 43, 22 }, { 39, 10 }, { 25, 11 }, { 29, 22 }, { 30, 33 }, { 34, 44 }, { 24, 54 }, { 9, 55 }, { 15, 44 }, { 14, 33 }, { 14, 22 }, { 14, 11 } }, { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 } }; #endif
gpl-2.0
ahmedammar/linux-xlnx
fs/xfs/xfs_iops.c
48
29209
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_acl.h" #include "xfs_log.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_itable.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_vnodeops.h" #include "xfs_inode_item.h" #include "xfs_trace.h" #include <linux/capability.h> #include <linux/xattr.h> #include <linux/namei.h> #include <linux/posix_acl.h> #include <linux/security.h> #include <linux/fiemap.h> #include <linux/slab.h> static int xfs_initxattrs( struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; struct xfs_inode *ip = XFS_I(inode); int error = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { error = xfs_attr_set(ip, xattr->name, xattr->value, xattr->value_len, ATTR_SECURE); if (error < 0) break; } return error; } /* * Hook in SELinux. This is not quite correct yet, what we really need * here (as we do for default ACLs) is a mechanism by which creation of * these attrs can be journalled at inode creation time (along with the * inode, of course, such that log replay can't cause these to be lost). */ STATIC int xfs_init_security( struct inode *inode, struct inode *dir, const struct qstr *qstr) { return security_inode_init_security(inode, dir, qstr, &xfs_initxattrs, NULL); } static void xfs_dentry_to_name( struct xfs_name *namep, struct dentry *dentry) { namep->name = dentry->d_name.name; namep->len = dentry->d_name.len; } STATIC void xfs_cleanup_inode( struct inode *dir, struct inode *inode, struct dentry *dentry) { struct xfs_name teardown; /* Oh, the horror. * If we can't add the ACL or we fail in * xfs_init_security we must back out. * ENOSPC can hit here, among other things. */ xfs_dentry_to_name(&teardown, dentry); xfs_remove(XFS_I(dir), &teardown, XFS_I(inode)); iput(inode); } STATIC int xfs_vn_mknod( struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct inode *inode; struct xfs_inode *ip = NULL; struct posix_acl *default_acl = NULL; struct xfs_name name; int error; /* * Irix uses Missed'em'V split, but doesn't want to see * the upper 5 bits of (14bit) major. */ if (S_ISCHR(mode) || S_ISBLK(mode)) { if (unlikely(!sysv_valid_dev(rdev) || MAJOR(rdev) & ~0x1ff)) return -EINVAL; rdev = sysv_encode_dev(rdev); } else { rdev = 0; } if (IS_POSIXACL(dir)) { default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(default_acl)) return PTR_ERR(default_acl); if (!default_acl) mode &= ~current_umask(); } xfs_dentry_to_name(&name, dentry); error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip); if (unlikely(error)) goto out_free_acl; inode = VFS_I(ip); error = xfs_init_security(inode, dir, &dentry->d_name); if (unlikely(error)) goto out_cleanup_inode; if (default_acl) { error = -xfs_inherit_acl(inode, default_acl); default_acl = NULL; if (unlikely(error)) goto out_cleanup_inode; } d_instantiate(dentry, inode); return -error; out_cleanup_inode: xfs_cleanup_inode(dir, inode, dentry); out_free_acl: posix_acl_release(default_acl); return -error; } STATIC int xfs_vn_create( struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { return xfs_vn_mknod(dir, dentry, mode, 0); } STATIC int xfs_vn_mkdir( struct inode *dir, struct dentry *dentry, umode_t mode) { return xfs_vn_mknod(dir, dentry, mode|S_IFDIR, 0); } STATIC struct dentry * xfs_vn_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct xfs_inode *cip; struct xfs_name name; int error; if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); xfs_dentry_to_name(&name, dentry); error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); if (unlikely(error)) { if (unlikely(error != ENOENT)) return ERR_PTR(-error); d_add(dentry, NULL); return NULL; } return d_splice_alias(VFS_I(cip), dentry); } STATIC struct dentry * xfs_vn_ci_lookup( struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct xfs_inode *ip; struct xfs_name xname; struct xfs_name ci_name; struct qstr dname; int error; if (dentry->d_name.len >= MAXNAMELEN) return ERR_PTR(-ENAMETOOLONG); xfs_dentry_to_name(&xname, dentry); error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); if (unlikely(error)) { if (unlikely(error != ENOENT)) return ERR_PTR(-error); /* * call d_add(dentry, NULL) here when d_drop_negative_children * is called in xfs_vn_mknod (ie. allow negative dentries * with CI filesystems). */ return NULL; } /* if exact match, just splice and exit */ if (!ci_name.name) return d_splice_alias(VFS_I(ip), dentry); /* else case-insensitive match... */ dname.name = ci_name.name; dname.len = ci_name.len; dentry = d_add_ci(dentry, VFS_I(ip), &dname); kmem_free(ci_name.name); return dentry; } STATIC int xfs_vn_link( struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct xfs_name name; int error; xfs_dentry_to_name(&name, dentry); error = xfs_link(XFS_I(dir), XFS_I(inode), &name); if (unlikely(error)) return -error; ihold(inode); d_instantiate(dentry, inode); return 0; } STATIC int xfs_vn_unlink( struct inode *dir, struct dentry *dentry) { struct xfs_name name; int error; xfs_dentry_to_name(&name, dentry); error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); if (error) return error; /* * With unlink, the VFS makes the dentry "negative": no inode, * but still hashed. This is incompatible with case-insensitive * mode, so invalidate (unhash) the dentry in CI-mode. */ if (xfs_sb_version_hasasciici(&XFS_M(dir->i_sb)->m_sb)) d_invalidate(dentry); return 0; } STATIC int xfs_vn_symlink( struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; struct xfs_inode *cip = NULL; struct xfs_name name; int error; umode_t mode; mode = S_IFLNK | (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO); xfs_dentry_to_name(&name, dentry); error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip); if (unlikely(error)) goto out; inode = VFS_I(cip); error = xfs_init_security(inode, dir, &dentry->d_name); if (unlikely(error)) goto out_cleanup_inode; d_instantiate(dentry, inode); return 0; out_cleanup_inode: xfs_cleanup_inode(dir, inode, dentry); out: return -error; } STATIC int xfs_vn_rename( struct inode *odir, struct dentry *odentry, struct inode *ndir, struct dentry *ndentry) { struct inode *new_inode = ndentry->d_inode; struct xfs_name oname; struct xfs_name nname; xfs_dentry_to_name(&oname, odentry); xfs_dentry_to_name(&nname, ndentry); return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), XFS_I(ndir), &nname, new_inode ? XFS_I(new_inode) : NULL); } /* * careful here - this function can get called recursively, so * we need to be very careful about how much stack we use. * uio is kmalloced for this reason... */ STATIC void * xfs_vn_follow_link( struct dentry *dentry, struct nameidata *nd) { char *link; int error = -ENOMEM; link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); if (!link) goto out_err; error = -xfs_readlink(XFS_I(dentry->d_inode), link); if (unlikely(error)) goto out_kfree; nd_set_link(nd, link); return NULL; out_kfree: kfree(link); out_err: nd_set_link(nd, ERR_PTR(error)); return NULL; } STATIC void xfs_vn_put_link( struct dentry *dentry, struct nameidata *nd, void *p) { char *s = nd_get_link(nd); if (!IS_ERR(s)) kfree(s); } STATIC int xfs_vn_getattr( struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; trace_xfs_getattr(ip); if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); stat->size = XFS_ISIZE(ip); stat->dev = inode->i_sb->s_dev; stat->mode = ip->i_d.di_mode; stat->nlink = ip->i_d.di_nlink; stat->uid = ip->i_d.di_uid; stat->gid = ip->i_d.di_gid; stat->ino = ip->i_ino; stat->atime = inode->i_atime; stat->mtime = inode->i_mtime; stat->ctime = inode->i_ctime; stat->blocks = XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: stat->blksize = BLKDEV_IOSIZE; stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: if (XFS_IS_REALTIME_INODE(ip)) { /* * If the file blocks are being allocated from a * realtime volume, then return the inode's realtime * extent size or the realtime volume's extent size. */ stat->blksize = xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; } else stat->blksize = xfs_preferred_iosize(mp); stat->rdev = 0; break; } return 0; } int xfs_setattr_nonsize( struct xfs_inode *ip, struct iattr *iattr, int flags) { xfs_mount_t *mp = ip->i_mount; struct inode *inode = VFS_I(ip); int mask = iattr->ia_valid; xfs_trans_t *tp; int error; uid_t uid = 0, iuid = 0; gid_t gid = 0, igid = 0; struct xfs_dquot *udqp = NULL, *gdqp = NULL; struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL; trace_xfs_setattr(ip); if (mp->m_flags & XFS_MOUNT_RDONLY) return XFS_ERROR(EROFS); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = -inode_change_ok(inode, iattr); if (error) return XFS_ERROR(error); ASSERT((mask & ATTR_SIZE) == 0); /* * If disk quotas is on, we make sure that the dquots do exist on disk, * before we start any other transactions. Trying to do this later * is messy. We don't care to take a readlock to look at the ids * in inode here, because we can't hold it across the trans_reserve. * If the IDs do change before we take the ilock, we're covered * because the i_*dquot fields will get updated anyway. */ if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { uint qflags = 0; if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { uid = iattr->ia_uid; qflags |= XFS_QMOPT_UQUOTA; } else { uid = ip->i_d.di_uid; } if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { gid = iattr->ia_gid; qflags |= XFS_QMOPT_GQUOTA; } else { gid = ip->i_d.di_gid; } /* * We take a reference when we initialize udqp and gdqp, * so it is important that we never blindly double trip on * the same variable. See xfs_create() for an example. */ ASSERT(udqp == NULL); ASSERT(gdqp == NULL); error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), qflags, &udqp, &gdqp); if (error) return error; } tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); if (error) goto out_dqrele; xfs_ilock(ip, XFS_ILOCK_EXCL); /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * These IDs could have changed since we last looked at them. * But, we're assured that if the ownership did change * while we didn't have the inode locked, inode's dquot(s) * would have changed also. */ iuid = ip->i_d.di_uid; igid = ip->i_d.di_gid; gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; /* * Do a quota reservation only if uid/gid is actually * going to change. */ if (XFS_IS_QUOTA_RUNNING(mp) && ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { ASSERT(tp); error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0); if (error) /* out of quota */ goto out_trans_cancel; } } xfs_trans_ijoin(tp, ip, 0); /* * Change file ownership. Must be the owner or privileged. */ if (mask & (ATTR_UID|ATTR_GID)) { /* * CAP_FSETID overrides the following restrictions: * * The set-user-ID and set-group-ID bits of a file will be * cleared upon successful return from chown() */ if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && !capable(CAP_FSETID)) ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); /* * Change the ownerships and register quota modifications * in the transaction. */ if (iuid != uid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { ASSERT(mask & ATTR_UID); ASSERT(udqp); olddquot1 = xfs_qm_vop_chown(tp, ip, &ip->i_udquot, udqp); } ip->i_d.di_uid = uid; inode->i_uid = uid; } if (igid != gid) { if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { ASSERT(!XFS_IS_PQUOTA_ON(mp)); ASSERT(mask & ATTR_GID); ASSERT(gdqp); olddquot2 = xfs_qm_vop_chown(tp, ip, &ip->i_gdquot, gdqp); } ip->i_d.di_gid = gid; inode->i_gid = gid; } } /* * Change file access modes. */ if (mask & ATTR_MODE) { umode_t mode = iattr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; ip->i_d.di_mode &= S_IFMT; ip->i_d.di_mode |= mode & ~S_IFMT; inode->i_mode &= S_IFMT; inode->i_mode |= mode & ~S_IFMT; } /* * Change file access or modified times. */ if (mask & ATTR_ATIME) { inode->i_atime = iattr->ia_atime; ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; } if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(xs_ig_attrchg); if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); /* * Release any dquot(s) the inode had kept before chown. */ xfs_qm_dqrele(olddquot1); xfs_qm_dqrele(olddquot2); xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); if (error) return XFS_ERROR(error); /* * XXX(hch): Updating the ACL entries is not atomic vs the i_mode * update. We could avoid this with linked transactions * and passing down the transaction pointer all the way * to attr_set. No previous user of the generic * Posix ACL code seems to care about this issue either. */ if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { error = -xfs_acl_chmod(inode); if (error) return XFS_ERROR(error); } return 0; out_trans_cancel: xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_ILOCK_EXCL); out_dqrele: xfs_qm_dqrele(udqp); xfs_qm_dqrele(gdqp); return error; } /* * Truncate file. Must have write permission and not be a directory. */ int xfs_setattr_size( struct xfs_inode *ip, struct iattr *iattr, int flags) { struct xfs_mount *mp = ip->i_mount; struct inode *inode = VFS_I(ip); int mask = iattr->ia_valid; xfs_off_t oldsize, newsize; struct xfs_trans *tp; int error; uint lock_flags = 0; uint commit_flags = 0; trace_xfs_setattr(ip); if (mp->m_flags & XFS_MOUNT_RDONLY) return XFS_ERROR(EROFS); if (XFS_FORCED_SHUTDOWN(mp)) return XFS_ERROR(EIO); error = -inode_change_ok(inode, iattr); if (error) return XFS_ERROR(error); ASSERT(S_ISREG(ip->i_d.di_mode)); ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); if (!(flags & XFS_ATTR_NOLOCK)) { lock_flags |= XFS_IOLOCK_EXCL; xfs_ilock(ip, lock_flags); } oldsize = inode->i_size; newsize = iattr->ia_size; /* * Short circuit the truncate case for zero length files. */ if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) { if (!(mask & (ATTR_CTIME|ATTR_MTIME))) goto out_unlock; /* * Use the regular setattr path to update the timestamps. */ xfs_iunlock(ip, lock_flags); iattr->ia_valid &= ~ATTR_SIZE; return xfs_setattr_nonsize(ip, iattr, 0); } /* * Make sure that the dquots are attached to the inode. */ error = xfs_qm_dqattach(ip, 0); if (error) goto out_unlock; /* * Now we can make the changes. Before we join the inode to the * transaction, take care of the part of the truncation that must be * done without the inode lock. This needs to be done before joining * the inode to the transaction, because the inode cannot be unlocked * once it is a part of the transaction. */ if (newsize > oldsize) { /* * Do the first part of growing a file: zero any data in the * last block that is beyond the old EOF. We need to do this * before the inode is joined to the transaction to modify * i_size. */ error = xfs_zero_eof(ip, newsize, oldsize); if (error) goto out_unlock; } /* * We are going to log the inode size change in this transaction so * any previous writes that are beyond the on disk EOF and the new * EOF that have not been written out need to be written here. If we * do not write the data out, we expose ourselves to the null files * problem. * * Only flush from the on disk size to the smaller of the in memory * file size or the new size as that's the range we really care about * here and prevents waiting for other data not within the range we * care about here. */ if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { error = xfs_flush_pages(ip, ip->i_d.di_size, newsize, 0, FI_NONE); if (error) goto out_unlock; } /* * Wait for all direct I/O to complete. */ inode_dio_wait(inode); error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); if (error) goto out_unlock; tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) goto out_trans_cancel; truncate_setsize(inode, newsize); commit_flags = XFS_TRANS_RELEASE_LOG_RES; lock_flags |= XFS_ILOCK_EXCL; xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); /* * Only change the c/mtime if we are changing the size or we are * explicitly asked to change it. This handles the semantic difference * between truncate() and ftruncate() as implemented in the VFS. * * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a * special case where we need to update the times despite not having * these flags set. For all other operations the VFS set these flags * explicitly if it wants a timestamp update. */ if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { iattr->ia_ctime = iattr->ia_mtime = current_fs_time(inode->i_sb); mask |= ATTR_CTIME | ATTR_MTIME; } /* * The first thing we do is set the size to new_size permanently on * disk. This way we don't have to worry about anyone ever being able * to look at the data being freed even in the face of a crash. * What we're getting around here is the case where we free a block, it * is allocated to another file, it is written to, and then we crash. * If the new data gets written to the file but the log buffers * containing the free and reallocation don't, then we'd end up with * garbage in the blocks being freed. As long as we make the new size * permanent before actually freeing any blocks it doesn't matter if * they get written to. */ ip->i_d.di_size = newsize; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); if (newsize <= oldsize) { error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize); if (error) goto out_trans_abort; /* * Truncated "down", so we're removing references to old data * here - if we delay flushing for a long time, we expose * ourselves unduly to the notorious NULL files problem. So, * we mark this inode and flush it when the file is closed, * and do not wait the usual (long) time for writeout. */ xfs_iflags_set(ip, XFS_ITRUNCATED); } if (mask & ATTR_CTIME) { inode->i_ctime = iattr->ia_ctime; ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; } if (mask & ATTR_MTIME) { inode->i_mtime = iattr->ia_mtime; ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; } xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(xs_ig_attrchg); if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); out_unlock: if (lock_flags) xfs_iunlock(ip, lock_flags); return error; out_trans_abort: commit_flags |= XFS_TRANS_ABORT; out_trans_cancel: xfs_trans_cancel(tp, commit_flags); goto out_unlock; } STATIC int xfs_vn_setattr( struct dentry *dentry, struct iattr *iattr) { if (iattr->ia_valid & ATTR_SIZE) return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0); return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0); } #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) /* * Call fiemap helper to fill in user data. * Returns positive errors to xfs_getbmap. */ STATIC int xfs_fiemap_format( void **arg, struct getbmapx *bmv, int *full) { int error; struct fiemap_extent_info *fieinfo = *arg; u32 fiemap_flags = 0; u64 logical, physical, length; /* Do nothing for a hole */ if (bmv->bmv_block == -1LL) return 0; logical = BBTOB(bmv->bmv_offset); physical = BBTOB(bmv->bmv_block); length = BBTOB(bmv->bmv_length); if (bmv->bmv_oflags & BMV_OF_PREALLOC) fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN; else if (bmv->bmv_oflags & BMV_OF_DELALLOC) { fiemap_flags |= FIEMAP_EXTENT_DELALLOC; physical = 0; /* no block yet */ } if (bmv->bmv_oflags & BMV_OF_LAST) fiemap_flags |= FIEMAP_EXTENT_LAST; error = fiemap_fill_next_extent(fieinfo, logical, physical, length, fiemap_flags); if (error > 0) { error = 0; *full = 1; /* user array now full */ } return -error; } STATIC int xfs_vn_fiemap( struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 length) { xfs_inode_t *ip = XFS_I(inode); struct getbmapx bm; int error; error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS); if (error) return error; /* Set up bmap header for xfs internal routine */ bm.bmv_offset = BTOBB(start); /* Special case for whole file */ if (length == FIEMAP_MAX_OFFSET) bm.bmv_length = -1LL; else bm.bmv_length = BTOBB(length); /* We add one because in getbmap world count includes the header */ bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM : fieinfo->fi_extents_max + 1; bm.bmv_count = min_t(__s32, bm.bmv_count, (PAGE_SIZE * 16 / sizeof(struct getbmapx))); bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES; if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) bm.bmv_iflags |= BMV_IF_ATTRFORK; if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC)) bm.bmv_iflags |= BMV_IF_DELALLOC; error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); if (error) return -error; return 0; } static const struct inode_operations xfs_inode_operations = { .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, .fiemap = xfs_vn_fiemap, }; static const struct inode_operations xfs_dir_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_lookup, .link = xfs_vn_link, .unlink = xfs_vn_unlink, .symlink = xfs_vn_symlink, .mkdir = xfs_vn_mkdir, /* * Yes, XFS uses the same method for rmdir and unlink. * * There are some subtile differences deeper in the code, * but we use S_ISDIR to check for those. */ .rmdir = xfs_vn_unlink, .mknod = xfs_vn_mknod, .rename = xfs_vn_rename, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; static const struct inode_operations xfs_dir_ci_inode_operations = { .create = xfs_vn_create, .lookup = xfs_vn_ci_lookup, .link = xfs_vn_link, .unlink = xfs_vn_unlink, .symlink = xfs_vn_symlink, .mkdir = xfs_vn_mkdir, /* * Yes, XFS uses the same method for rmdir and unlink. * * There are some subtile differences deeper in the code, * but we use S_ISDIR to check for those. */ .rmdir = xfs_vn_unlink, .mknod = xfs_vn_mknod, .rename = xfs_vn_rename, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; static const struct inode_operations xfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = xfs_vn_follow_link, .put_link = xfs_vn_put_link, .get_acl = xfs_get_acl, .getattr = xfs_vn_getattr, .setattr = xfs_vn_setattr, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, }; STATIC void xfs_diflags_to_iflags( struct inode *inode, struct xfs_inode *ip) { if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE) inode->i_flags |= S_IMMUTABLE; else inode->i_flags &= ~S_IMMUTABLE; if (ip->i_d.di_flags & XFS_DIFLAG_APPEND) inode->i_flags |= S_APPEND; else inode->i_flags &= ~S_APPEND; if (ip->i_d.di_flags & XFS_DIFLAG_SYNC) inode->i_flags |= S_SYNC; else inode->i_flags &= ~S_SYNC; if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME) inode->i_flags |= S_NOATIME; else inode->i_flags &= ~S_NOATIME; } /* * Initialize the Linux inode, set up the operation vectors and * unlock the inode. * * When reading existing inodes from disk this is called directly * from xfs_iget, when creating a new inode it is called from * xfs_ialloc after setting up the inode. * * We are always called with an uninitialised linux inode here. * We need to initialise the necessary fields and take a reference * on it. */ void xfs_setup_inode( struct xfs_inode *ip) { struct inode *inode = &ip->i_vnode; inode->i_ino = ip->i_ino; inode->i_state = I_NEW; inode_sb_list_add(inode); /* make the inode look hashed for the writeback code */ hlist_add_fake(&inode->i_hash); inode->i_mode = ip->i_d.di_mode; set_nlink(inode, ip->i_d.di_nlink); inode->i_uid = ip->i_d.di_uid; inode->i_gid = ip->i_d.di_gid; switch (inode->i_mode & S_IFMT) { case S_IFBLK: case S_IFCHR: inode->i_rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, sysv_minor(ip->i_df.if_u2.if_rdev)); break; default: inode->i_rdev = 0; break; } inode->i_generation = ip->i_d.di_gen; i_size_write(inode, ip->i_d.di_size); inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; xfs_diflags_to_iflags(inode, ip); switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_op = &xfs_inode_operations; inode->i_fop = &xfs_file_operations; inode->i_mapping->a_ops = &xfs_address_space_operations; break; case S_IFDIR: if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb)) inode->i_op = &xfs_dir_ci_inode_operations; else inode->i_op = &xfs_dir_inode_operations; inode->i_fop = &xfs_dir_file_operations; break; case S_IFLNK: inode->i_op = &xfs_symlink_inode_operations; if (!(ip->i_df.if_flags & XFS_IFINLINE)) inode->i_mapping->a_ops = &xfs_address_space_operations; break; default: inode->i_op = &xfs_inode_operations; init_special_inode(inode, inode->i_mode, inode->i_rdev); break; } /* * If there is no attribute fork no ACL can exist on this inode, * and it can't have any file capabilities attached to it either. */ if (!XFS_IFORK_Q(ip)) { inode_has_no_xattr(inode); cache_no_acl(inode); } xfs_iflags_clear(ip, XFS_INEW); barrier(); unlock_new_inode(inode); }
gpl-2.0
huangyukun2012/linux-2.4.21
arch/s390x/mm/ioremap.c
48
3386
/* * arch/s390/mm/ioremap.c * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner (hp@de.ibm.com) * * Derived from "arch/i386/mm/extable.c" * (C) Copyright 1995 1996 Linus Torvalds * * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's */ #include <linux/vmalloc.h> #include <asm/io.h> #include <asm/pgalloc.h> static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, unsigned long phys_addr, unsigned long flags) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; if (address >= end) BUG(); do { if (!pte_none(*pte)) { printk("remap_area_pte: page already exists\n"); BUG(); } set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | flags))); address += PAGE_SIZE; phys_addr += PAGE_SIZE; pte++; } while (address && (address < end)); } static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, unsigned long phys_addr, unsigned long flags) { unsigned long end; address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; phys_addr -= address; if (address >= end) BUG(); do { pte_t * pte = pte_alloc(&init_mm, pmd, address); if (!pte) return -ENOMEM; remap_area_pte(pte, address, end - address, address + phys_addr, flags); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); return 0; } static int remap_area_pages(unsigned long address, unsigned long phys_addr, unsigned long size, unsigned long flags) { int error; pgd_t * dir; unsigned long end = address + size; phys_addr -= address; dir = pgd_offset(&init_mm, address); flush_cache_all(); if (address >= end) BUG(); spin_lock(&init_mm.page_table_lock); do { pmd_t *pmd; pmd = pmd_alloc(&init_mm, dir, address); error = -ENOMEM; if (!pmd) break; if (remap_area_pmd(pmd, address, end - address, phys_addr + address, flags)) break; error = 0; address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); spin_unlock(&init_mm.page_table_lock); flush_tlb_all(); return 0; } /* * Generic mapping function (not visible outside): */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. */ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void * addr; struct vm_struct * area; if (phys_addr < virt_to_phys(high_memory)) return phys_to_virt(phys_addr); if (phys_addr & ~PAGE_MASK) return NULL; size = PAGE_ALIGN(size); if (!size || size > phys_addr + size) return NULL; area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; addr = area->addr; if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { vfree(addr); return NULL; } return addr; } void iounmap(void *addr) { if (addr > high_memory) return vfree(addr); }
gpl-2.0
lshain-android-source/external-qemu
distrib/jpeg-6b/jddctmgr.c
48
10779
/* * jddctmgr.c * * Copyright (C) 1994-1996, Thomas G. Lane. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains the inverse-DCT management logic. * This code selects a particular IDCT implementation to be used, * and it performs related housekeeping chores. No code in this file * is executed per IDCT step, only during output pass setup. * * Note that the IDCT routines are responsible for performing coefficient * dequantization as well as the IDCT proper. This module sets up the * dequantization multiplier table needed by the IDCT routine. */ #define JPEG_INTERNALS #include "jinclude.h" #include "jpeglib.h" #include "jdct.h" /* Private declarations for DCT subsystem */ #ifdef ANDROID_ARMV6_IDCT #undef ANDROID_ARMV6_IDCT #ifdef __arm__ #include <machine/cpu-features.h> #if __ARM_ARCH__ >= 6 #define ANDROID_ARMV6_IDCT #else #warning "ANDROID_ARMV6_IDCT is disabled" #endif #endif #endif #ifdef ANDROID_ARMV6_IDCT /* Intentionally declare the prototype with arguments of primitive types instead * of type-defined ones. This will at least generate some warnings if jmorecfg.h * is changed and becomes incompatible with the assembly code. */ extern void armv6_idct(short *coefs, int *quans, unsigned char **rows, int col); void jpeg_idct_armv6 (j_decompress_ptr cinfo, jpeg_component_info * compptr, JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col) { IFAST_MULT_TYPE *dct_table = (IFAST_MULT_TYPE *)compptr->dct_table; armv6_idct(coef_block, dct_table, output_buf, output_col); } #endif #ifdef ANDROID_INTELSSE2_IDCT extern short __attribute__((aligned(16))) quantptrSSE[DCTSIZE2]; extern void jpeg_idct_intelsse (j_decompress_ptr cinfo, jpeg_component_info * compptr, JCOEFPTR coef_block, JSAMPARRAY output_buf, JDIMENSION output_col); #endif /* * The decompressor input side (jdinput.c) saves away the appropriate * quantization table for each component at the start of the first scan * involving that component. (This is necessary in order to correctly * decode files that reuse Q-table slots.) * When we are ready to make an output pass, the saved Q-table is converted * to a multiplier table that will actually be used by the IDCT routine. * The multiplier table contents are IDCT-method-dependent. To support * application changes in IDCT method between scans, we can remake the * multiplier tables if necessary. * In buffered-image mode, the first output pass may occur before any data * has been seen for some components, and thus before their Q-tables have * been saved away. To handle this case, multiplier tables are preset * to zeroes; the result of the IDCT will be a neutral gray level. */ /* Private subobject for this module */ typedef struct { struct jpeg_inverse_dct pub; /* public fields */ /* This array contains the IDCT method code that each multiplier table * is currently set up for, or -1 if it's not yet set up. * The actual multiplier tables are pointed to by dct_table in the * per-component comp_info structures. */ int cur_method[MAX_COMPONENTS]; } my_idct_controller; typedef my_idct_controller * my_idct_ptr; /* Allocated multiplier tables: big enough for any supported variant */ typedef union { ISLOW_MULT_TYPE islow_array[DCTSIZE2]; #ifdef DCT_IFAST_SUPPORTED IFAST_MULT_TYPE ifast_array[DCTSIZE2]; #endif #ifdef DCT_FLOAT_SUPPORTED FLOAT_MULT_TYPE float_array[DCTSIZE2]; #endif } multiplier_table; /* The current scaled-IDCT routines require ISLOW-style multiplier tables, * so be sure to compile that code if either ISLOW or SCALING is requested. */ #ifdef DCT_ISLOW_SUPPORTED #define PROVIDE_ISLOW_TABLES #else #ifdef IDCT_SCALING_SUPPORTED #define PROVIDE_ISLOW_TABLES #endif #endif /* * Prepare for an output pass. * Here we select the proper IDCT routine for each component and build * a matching multiplier table. */ METHODDEF(void) start_pass (j_decompress_ptr cinfo) { my_idct_ptr idct = (my_idct_ptr) cinfo->idct; int ci, i; jpeg_component_info *compptr; int method = 0; inverse_DCT_method_ptr method_ptr = NULL; JQUANT_TBL * qtbl; for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { /* Select the proper IDCT routine for this component's scaling */ switch (compptr->DCT_scaled_size) { #ifdef IDCT_SCALING_SUPPORTED case 1: method_ptr = jpeg_idct_1x1; method = JDCT_ISLOW; /* jidctred uses islow-style table */ break; case 2: method_ptr = jpeg_idct_2x2; method = JDCT_ISLOW; /* jidctred uses islow-style table */ break; case 4: method_ptr = jpeg_idct_4x4; method = JDCT_ISLOW; /* jidctred uses islow-style table */ break; #endif case DCTSIZE: switch (cinfo->dct_method) { #ifdef ANDROID_ARMV6_IDCT case JDCT_ISLOW: case JDCT_IFAST: method_ptr = jpeg_idct_armv6; method = JDCT_IFAST; break; #else /* ANDROID_ARMV6_IDCT */ #ifdef ANDROID_INTELSSE2_IDCT case JDCT_ISLOW: case JDCT_IFAST: method_ptr = jpeg_idct_intelsse; method = JDCT_ISLOW; /* Use quant table of ISLOW.*/ break; #else #ifdef DCT_ISLOW_SUPPORTED case JDCT_ISLOW: method_ptr = jpeg_idct_islow; method = JDCT_ISLOW; break; #endif #ifdef DCT_IFAST_SUPPORTED case JDCT_IFAST: method_ptr = jpeg_idct_ifast; method = JDCT_IFAST; break; #endif #endif #endif /* ANDROID_ARMV6_IDCT */ #ifdef DCT_FLOAT_SUPPORTED case JDCT_FLOAT: method_ptr = jpeg_idct_float; method = JDCT_FLOAT; break; #endif default: ERREXIT(cinfo, JERR_NOT_COMPILED); break; } break; default: ERREXIT1(cinfo, JERR_BAD_DCTSIZE, compptr->DCT_scaled_size); break; } idct->pub.inverse_DCT[ci] = method_ptr; /* Create multiplier table from quant table. * However, we can skip this if the component is uninteresting * or if we already built the table. Also, if no quant table * has yet been saved for the component, we leave the * multiplier table all-zero; we'll be reading zeroes from the * coefficient controller's buffer anyway. */ if (! compptr->component_needed || idct->cur_method[ci] == method) continue; qtbl = compptr->quant_table; if (qtbl == NULL) /* happens if no data yet for component */ continue; idct->cur_method[ci] = method; switch (method) { #ifdef PROVIDE_ISLOW_TABLES case JDCT_ISLOW: { /* For LL&M IDCT method, multipliers are equal to raw quantization * coefficients, but are stored as ints to ensure access efficiency. */ ISLOW_MULT_TYPE * ismtbl = (ISLOW_MULT_TYPE *) compptr->dct_table; for (i = 0; i < DCTSIZE2; i++) { ismtbl[i] = (ISLOW_MULT_TYPE) qtbl->quantval[i]; } } break; #endif #ifdef DCT_IFAST_SUPPORTED case JDCT_IFAST: { /* For AA&N IDCT method, multipliers are equal to quantization * coefficients scaled by scalefactor[row]*scalefactor[col], where * scalefactor[0] = 1 * scalefactor[k] = cos(k*PI/16) * sqrt(2) for k=1..7 * For integer operation, the multiplier table is to be scaled by * IFAST_SCALE_BITS. */ IFAST_MULT_TYPE * ifmtbl = (IFAST_MULT_TYPE *) compptr->dct_table; #ifdef ANDROID_ARMV6_IDCT /* Precomputed values scaled up by 15 bits. */ static const unsigned short scales[DCTSIZE2] = { 32768, 45451, 42813, 38531, 32768, 25746, 17734, 9041, 45451, 63042, 59384, 53444, 45451, 35710, 24598, 12540, 42813, 59384, 55938, 50343, 42813, 33638, 23170, 11812, 38531, 53444, 50343, 45308, 38531, 30274, 20853, 10631, 32768, 45451, 42813, 38531, 32768, 25746, 17734, 9041, 25746, 35710, 33638, 30274, 25746, 20228, 13933, 7103, 17734, 24598, 23170, 20853, 17734, 13933, 9598, 4893, 9041, 12540, 11812, 10631, 9041, 7103, 4893, 2494, }; /* Inverse map of [7, 5, 1, 3, 0, 2, 4, 6]. */ static const char orders[DCTSIZE] = {4, 2, 5, 3, 6, 1, 7, 0}; /* Reorder the columns after transposing. */ for (i = 0; i < DCTSIZE2; ++i) { int j = ((i & 7) << 3) + orders[i >> 3]; ifmtbl[j] = (qtbl->quantval[i] * scales[i] + 2) >> 2; } #else /* ANDROID_ARMV6_IDCT */ #define CONST_BITS 14 static const INT16 aanscales[DCTSIZE2] = { /* precomputed values scaled up by 14 bits */ 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, 22725, 31521, 29692, 26722, 22725, 17855, 12299, 6270, 21407, 29692, 27969, 25172, 21407, 16819, 11585, 5906, 19266, 26722, 25172, 22654, 19266, 15137, 10426, 5315, 16384, 22725, 21407, 19266, 16384, 12873, 8867, 4520, 12873, 17855, 16819, 15137, 12873, 10114, 6967, 3552, 8867, 12299, 11585, 10426, 8867, 6967, 4799, 2446, 4520, 6270, 5906, 5315, 4520, 3552, 2446, 1247 }; SHIFT_TEMPS for (i = 0; i < DCTSIZE2; i++) { ifmtbl[i] = (IFAST_MULT_TYPE) DESCALE(MULTIPLY16V16((INT32) qtbl->quantval[i], (INT32) aanscales[i]), CONST_BITS-IFAST_SCALE_BITS); } #endif /* ANDROID_ARMV6_IDCT */ } break; #endif #ifdef DCT_FLOAT_SUPPORTED case JDCT_FLOAT: { /* For float AA&N IDCT method, multipliers are equal to quantization * coefficients scaled by scalefactor[row]*scalefactor[col], where * scalefactor[0] = 1 * scalefactor[k] = cos(k*PI/16) * sqrt(2) for k=1..7 */ FLOAT_MULT_TYPE * fmtbl = (FLOAT_MULT_TYPE *) compptr->dct_table; int row, col; static const double aanscalefactor[DCTSIZE] = { 1.0, 1.387039845, 1.306562965, 1.175875602, 1.0, 0.785694958, 0.541196100, 0.275899379 }; i = 0; for (row = 0; row < DCTSIZE; row++) { for (col = 0; col < DCTSIZE; col++) { fmtbl[i] = (FLOAT_MULT_TYPE) ((double) qtbl->quantval[i] * aanscalefactor[row] * aanscalefactor[col]); i++; } } } break; #endif default: ERREXIT(cinfo, JERR_NOT_COMPILED); break; } } } /* * Initialize IDCT manager. */ GLOBAL(void) jinit_inverse_dct (j_decompress_ptr cinfo) { my_idct_ptr idct; int ci; jpeg_component_info *compptr; idct = (my_idct_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, SIZEOF(my_idct_controller)); cinfo->idct = (struct jpeg_inverse_dct *) idct; idct->pub.start_pass = start_pass; for (ci = 0, compptr = cinfo->comp_info; ci < cinfo->num_components; ci++, compptr++) { /* Allocate and pre-zero a multiplier table for each component */ compptr->dct_table = (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, SIZEOF(multiplier_table)); MEMZERO(compptr->dct_table, SIZEOF(multiplier_table)); /* Mark multiplier table not yet set up for any method */ idct->cur_method[ci] = -1; } }
gpl-2.0
119/aircam-openwrt
build_dir/toolchain-arm_v5te_gcc-linaro_uClibc-0.9.32_eabi/linux-2.6.28.fa2/crypto/md4.c
48
6363
/* * Cryptographic API. * * MD4 Message Digest Algorithm (RFC1320). * * Implementation derived from Andrew Tridgell and Steve French's * CIFS MD4 implementation, and the cryptoapi implementation * originally based on the public domain implementation written * by Colin Plumb in 1993. * * Copyright (c) Andrew Tridgell 1997-1998. * Modified by Steve French (sfrench@us.ibm.com) 2002 * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 David S. Miller (davem@redhat.com) * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/crypto.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> #define MD4_DIGEST_SIZE 16 #define MD4_HMAC_BLOCK_SIZE 64 #define MD4_BLOCK_WORDS 16 #define MD4_HASH_WORDS 4 struct md4_ctx { u32 hash[MD4_HASH_WORDS]; u32 block[MD4_BLOCK_WORDS]; u64 byte_count; }; static inline u32 lshift(u32 x, unsigned int s) { x &= 0xFFFFFFFF; return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s)); } static inline u32 F(u32 x, u32 y, u32 z) { return (x & y) | ((~x) & z); } static inline u32 G(u32 x, u32 y, u32 z) { return (x & y) | (x & z) | (y & z); } static inline u32 H(u32 x, u32 y, u32 z) { return x ^ y ^ z; } #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s)) #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s)) /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(u32 *buf, unsigned int words) { while (words--) { __le32_to_cpus(buf); buf++; } } static inline void cpu_to_le32_array(u32 *buf, unsigned int words) { while (words--) { __cpu_to_le32s(buf); buf++; } } static void md4_transform(u32 *hash, u32 const *in) { u32 a, b, c, d; a = hash[0]; b = hash[1]; c = hash[2]; d = hash[3]; ROUND1(a, b, c, d, in[0], 3); ROUND1(d, a, b, c, in[1], 7); ROUND1(c, d, a, b, in[2], 11); ROUND1(b, c, d, a, in[3], 19); ROUND1(a, b, c, d, in[4], 3); ROUND1(d, a, b, c, in[5], 7); ROUND1(c, d, a, b, in[6], 11); ROUND1(b, c, d, a, in[7], 19); ROUND1(a, b, c, d, in[8], 3); ROUND1(d, a, b, c, in[9], 7); ROUND1(c, d, a, b, in[10], 11); ROUND1(b, c, d, a, in[11], 19); ROUND1(a, b, c, d, in[12], 3); ROUND1(d, a, b, c, in[13], 7); ROUND1(c, d, a, b, in[14], 11); ROUND1(b, c, d, a, in[15], 19); ROUND2(a, b, c, d,in[ 0], 3); ROUND2(d, a, b, c, in[4], 5); ROUND2(c, d, a, b, in[8], 9); ROUND2(b, c, d, a, in[12], 13); ROUND2(a, b, c, d, in[1], 3); ROUND2(d, a, b, c, in[5], 5); ROUND2(c, d, a, b, in[9], 9); ROUND2(b, c, d, a, in[13], 13); ROUND2(a, b, c, d, in[2], 3); ROUND2(d, a, b, c, in[6], 5); ROUND2(c, d, a, b, in[10], 9); ROUND2(b, c, d, a, in[14], 13); ROUND2(a, b, c, d, in[3], 3); ROUND2(d, a, b, c, in[7], 5); ROUND2(c, d, a, b, in[11], 9); ROUND2(b, c, d, a, in[15], 13); ROUND3(a, b, c, d,in[ 0], 3); ROUND3(d, a, b, c, in[8], 9); ROUND3(c, d, a, b, in[4], 11); ROUND3(b, c, d, a, in[12], 15); ROUND3(a, b, c, d, in[2], 3); ROUND3(d, a, b, c, in[10], 9); ROUND3(c, d, a, b, in[6], 11); ROUND3(b, c, d, a, in[14], 15); ROUND3(a, b, c, d, in[1], 3); ROUND3(d, a, b, c, in[9], 9); ROUND3(c, d, a, b, in[5], 11); ROUND3(b, c, d, a, in[13], 15); ROUND3(a, b, c, d, in[3], 3); ROUND3(d, a, b, c, in[11], 9); ROUND3(c, d, a, b, in[7], 11); ROUND3(b, c, d, a, in[15], 15); hash[0] += a; hash[1] += b; hash[2] += c; hash[3] += d; } static inline void md4_transform_helper(struct md4_ctx *ctx) { le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); md4_transform(ctx->hash, ctx->block); } static void md4_init(struct crypto_tfm *tfm) { struct md4_ctx *mctx = crypto_tfm_ctx(tfm); mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; mctx->hash[2] = 0x98badcfe; mctx->hash[3] = 0x10325476; mctx->byte_count = 0; } static void md4_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len) { struct md4_ctx *mctx = crypto_tfm_ctx(tfm); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); md4_transform_helper(mctx); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { memcpy(mctx->block, data, sizeof(mctx->block)); md4_transform_helper(mctx); data += sizeof(mctx->block); len -= sizeof(mctx->block); } memcpy(mctx->block, data, len); } static void md4_final(struct crypto_tfm *tfm, u8 *out) { struct md4_ctx *mctx = crypto_tfm_ctx(tfm); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (u64)); md4_transform_helper(mctx); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = mctx->byte_count << 3; mctx->block[15] = mctx->byte_count >> 29; le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - sizeof(u64)) / sizeof(u32)); md4_transform(mctx->hash, mctx->block); cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); } static struct crypto_alg alg = { .cra_name = "md4", .cra_flags = CRYPTO_ALG_TYPE_DIGEST, .cra_blocksize = MD4_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct md4_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(alg.cra_list), .cra_u = { .digest = { .dia_digestsize = MD4_DIGEST_SIZE, .dia_init = md4_init, .dia_update = md4_update, .dia_final = md4_final } } }; static int __init md4_mod_init(void) { return crypto_register_alg(&alg); } static void __exit md4_mod_fini(void) { crypto_unregister_alg(&alg); } module_init(md4_mod_init); module_exit(md4_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
gpl-2.0
lnfamous/Kernel_Htc_Pico_Stock
drivers/scsi/tmscsim.c
304
77543
/************************************************************************ * FILE NAME : TMSCSIM.C * * BY : C.L. Huang, ching@tekram.com.tw * * Description: Device Driver for Tekram DC-390(T) PCI SCSI * * Bus Master Host Adapter * * (C)Copyright 1995-1996 Tekram Technology Co., Ltd. * ************************************************************************ * (C) Copyright: put under GNU GPL in 10/96 * * (see Documentation/scsi/tmscsim.txt) * ************************************************************************ * $Id: tmscsim.c,v 2.60.2.30 2000/12/20 01:07:12 garloff Exp $ * * Enhancements and bugfixes by * * Kurt Garloff <kurt@garloff.de> <garloff@suse.de> * ************************************************************************ * HISTORY: * * * * REV# DATE NAME DESCRIPTION * * 1.00 96/04/24 CLH First release * * 1.01 96/06/12 CLH Fixed bug of Media Change for Removable * * Device, scan all LUN. Support Pre2.0.10 * * 1.02 96/06/18 CLH Fixed bug of Command timeout ... * * 1.03 96/09/25 KG Added tmscsim_proc_info() * * 1.04 96/10/11 CLH Updating for support KV 2.0.x * * 1.05 96/10/18 KG Fixed bug in DC390_abort(null ptr deref)* * 1.06 96/10/25 KG Fixed module support * * 1.07 96/11/09 KG Fixed tmscsim_proc_info() * * 1.08 96/11/18 KG Fixed null ptr in DC390_Disconnect() * * 1.09 96/11/30 KG Added register the allocated IO space * * 1.10 96/12/05 CLH Modified tmscsim_proc_info(), and reset * * pending interrupt in DC390_detect() * * 1.11 97/02/05 KG/CLH Fixeds problem with partitions greater * * than 1GB * * 1.12 98/02/15 MJ Rewritten PCI probing * * 1.13 98/04/08 KG Support for non DC390, __initfunc decls,* * changed max devs from 10 to 16 * * 1.14a 98/05/05 KG Dynamic DCB allocation, add-single-dev * * for LUNs if LUN_SCAN (BIOS) not set * * runtime config using /proc interface * * 1.14b 98/05/06 KG eliminated cli (); sti (); spinlocks * * 1.14c 98/05/07 KG 2.0.x compatibility * * 1.20a 98/05/07 KG changed names of funcs to be consistent * * DC390_ (entry points), dc390_ (internal)* * reworked locking * * 1.20b 98/05/12 KG bugs: version, kfree, _ctmp * * debug output * * 1.20c 98/05/12 KG bugs: kfree, parsing, EEpromDefaults * * 1.20d 98/05/14 KG bugs: list linkage, clear flag after * * reset on startup, code cleanup * * 1.20e 98/05/15 KG spinlock comments, name space cleanup * * pLastDCB now part of ACB structure * * added stats, timeout for 2.1, TagQ bug * * RESET and INQUIRY interface commands * * 1.20f 98/05/18 KG spinlocks fixes, max_lun fix, free DCBs * * for missing LUNs, pending int * * 1.20g 98/05/19 KG Clean up: Avoid short * * 1.20h 98/05/21 KG Remove AdaptSCSIID, max_lun ... * * 1.20i 98/05/21 KG Aiiie: Bug with TagQMask * * 1.20j 98/05/24 KG Handle STAT_BUSY, handle pACB->pLinkDCB * * == 0 in remove_dev and DoingSRB_Done * * 1.20k 98/05/25 KG DMA_INT (experimental) * * 1.20l 98/05/27 KG remove DMA_INT; DMA_IDLE cmds added; * * 1.20m 98/06/10 KG glitch configurable; made some global * * vars part of ACB; use DC390_readX * * 1.20n 98/06/11 KG startup params * * 1.20o 98/06/15 KG added TagMaxNum to boot/module params * * Device Nr -> Idx, TagMaxNum power of 2 * * 1.20p 98/06/17 KG Docu updates. Reset depends on settings * * pci_set_master added; 2.0.xx: pcibios_* * * used instead of MechNum things ... * * 1.20q 98/06/23 KG Changed defaults. Added debug code for * * removable media and fixed it. TagMaxNum * * fixed for DC390. Locking: ACB, DRV for * * better IRQ sharing. Spelling: Queueing * * Parsing and glitch_cfg changes. Display * * real SyncSpeed value. Made DisConn * * functional (!) * * 1.20r 98/06/30 KG Debug macros, allow disabling DsCn, set * * BIT4 in CtrlR4, EN_PAGE_INT, 2.0 module * * param -1 fixed. * * 1.20s 98/08/20 KG Debug info on abort(), try to check PCI,* * phys_to_bus instead of phys_to_virt, * * fixed sel. process, fixed locking, * * added MODULE_XXX infos, changed IRQ * * request flags, disable DMA_INT * * 1.20t 98/09/07 KG TagQ report fixed; Write Erase DMA Stat;* * initfunc -> __init; better abort; * * Timeout for XFER_DONE & BLAST_COMPLETE; * * Allow up to 33 commands being processed * * 2.0a 98/10/14 KG Max Cmnds back to 17. DMA_Stat clearing * * all flags. Clear within while() loops * * in DataIn_0/Out_0. Null ptr in dumpinfo * * for pSRB==0. Better locking during init.* * bios_param() now respects part. table. * * 2.0b 98/10/24 KG Docu fixes. Timeout Msg in DMA Blast. * * Disallow illegal idx in INQUIRY/REMOVE * * 2.0c 98/11/19 KG Cleaned up detect/init for SMP boxes, * * Write Erase DMA (1.20t) caused problems * * 2.0d 98/12/25 KG Christmas release ;-) Message handling * * completely reworked. Handle target ini- * * tiated SDTR correctly. * * 2.0d1 99/01/25 KG Try to handle RESTORE_PTR * * 2.0d2 99/02/08 KG Check for failure of kmalloc, correct * * inclusion of scsicam.h, DelayReset * * 2.0d3 99/05/31 KG DRIVER_OK -> DID_OK, DID_NO_CONNECT, * * detect Target mode and warn. * * pcmd->result handling cleaned up. * * 2.0d4 99/06/01 KG Cleaned selection process. Found bug * * which prevented more than 16 tags. Now: * * 24. SDTR cleanup. Cleaner multi-LUN * * handling. Don't modify ControlRegs/FIFO * * when connected. * * 2.0d5 99/06/01 KG Clear DevID, Fix INQUIRY after cfg chg. * * 2.0d6 99/06/02 KG Added ADD special command to allow cfg. * * before detection. Reset SYNC_NEGO_DONE * * after a bus reset. * * 2.0d7 99/06/03 KG Fixed bugs wrt add,remove commands * * 2.0d8 99/06/04 KG Removed copying of cmnd into CmdBlock. * * Fixed Oops in _release(). * * 2.0d9 99/06/06 KG Also tag queue INQUIRY, T_U_R, ... * * Allow arb. no. of Tagged Cmnds. Max 32 * * 2.0d1099/06/20 KG TagMaxNo changes now honoured! Queueing * * clearified (renamed ..) TagMask handling* * cleaned. * * 2.0d1199/06/28 KG cmd->result now identical to 2.0d2 * * 2.0d1299/07/04 KG Changed order of processing in IRQ * * 2.0d1399/07/05 KG Don't update DCB fields if removed * * 2.0d1499/07/05 KG remove_dev: Move kfree() to the end * * 2.0d1599/07/12 KG use_new_eh_code: 0, ULONG -> UINT where * * appropriate * * 2.0d1699/07/13 KG Reenable StartSCSI interrupt, Retry msg * * 2.0d1799/07/15 KG Remove debug msg. Disable recfg. when * * there are queued cmnds * * 2.0d1899/07/18 KG Selection timeout: Don't requeue * * 2.0d1999/07/18 KG Abort: Only call scsi_done if dequeued * * 2.0d2099/07/19 KG Rst_Detect: DoingSRB_Done * * 2.0d2199/08/15 KG dev_id for request/free_irq, cmnd[0] for* * RETRY, SRBdone does DID_ABORT for the * * cmd passed by DC390_reset() * * 2.0d2299/08/25 KG dev_id fixed. can_queue: 42 * * 2.0d2399/08/25 KG Removed some debugging code. dev_id * * now is set to pACB. Use u8,u16,u32. * * 2.0d2499/11/14 KG Unreg. I/O if failed IRQ alloc. Call * * done () w/ DID_BAD_TARGET in case of * * missing DCB. We are old EH!! * * 2.0d2500/01/15 KG 2.3.3x compat from Andreas Schultz * * set unique_id. Disable RETRY message. * * 2.0d2600/01/29 KG Go to new EH. * * 2.0d2700/01/31 KG ... but maintain 2.0 compat. * * and fix DCB freeing * * 2.0d2800/02/14 KG Queue statistics fixed, dump special cmd* * Waiting_Timer for failed StartSCSI * * New EH: Don't return cmnds to ML on RST * * Use old EH (don't have new EH fns yet) * * Reset: Unlock, but refuse to queue * * 2.3 __setup function * * 2.0e 00/05/22 KG Return residual for 2.3 * * 2.0e1 00/05/25 KG Compile fixes for 2.3.99 * * 2.0e2 00/05/27 KG Jeff Garzik's pci_enable_device() * * 2.0e3 00/09/29 KG Some 2.4 changes. Don't try Sync Nego * * before INQUIRY has reported ability. * * Recognise INQUIRY as scanning command. * * 2.0e4 00/10/13 KG Allow compilation into 2.4 kernel * * 2.0e5 00/11/17 KG Store Inq.flags in DCB * * 2.0e6 00/11/22 KG 2.4 init function (Thx to O.Schumann) * * 2.4 PCI device table (Thx to A.Richter) * * 2.0e7 00/11/28 KG Allow overriding of BIOS settings * * 2.0f 00/12/20 KG Handle failed INQUIRYs during scan * * 2.1a 03/11/29 GL, KG Initial fixing for 2.6. Convert to * * use the current PCI-mapping API, update * * command-queuing. * * 2.1b 04/04/13 GL Fix for 64-bit platforms * * 2.1b1 04/01/31 GL (applied 05.04) Remove internal * * command-queuing. * * 2.1b2 04/02/01 CH (applied 05.04) Fix error-handling * * 2.1c 04/05/23 GL Update to use the new pci_driver API, * * some scsi EH updates, more cleanup. * * 2.1d 04/05/27 GL Moved setting of scan_devices to * * slave_alloc/_configure/_destroy, as * * suggested by CH. * ***********************************************************************/ /* DEBUG options */ //#define DC390_DEBUG0 //#define DC390_DEBUG1 //#define DC390_DCBDEBUG //#define DC390_PARSEDEBUG //#define DC390_REMOVABLEDEBUG //#define DC390_LOCKDEBUG //#define NOP do{}while(0) #define C_NOP /* Debug definitions */ #ifdef DC390_DEBUG0 # define DEBUG0(x) x #else # define DEBUG0(x) C_NOP #endif #ifdef DC390_DEBUG1 # define DEBUG1(x) x #else # define DEBUG1(x) C_NOP #endif #ifdef DC390_DCBDEBUG # define DCBDEBUG(x) x #else # define DCBDEBUG(x) C_NOP #endif #ifdef DC390_PARSEDEBUG # define PARSEDEBUG(x) x #else # define PARSEDEBUG(x) C_NOP #endif #ifdef DC390_REMOVABLEDEBUG # define REMOVABLEDEBUG(x) x #else # define REMOVABLEDEBUG(x) C_NOP #endif #define DCBDEBUG1(x) C_NOP #include <linux/module.h> #include <linux/delay.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsicam.h> #include <scsi/scsi_tcq.h> #define DC390_BANNER "Tekram DC390/AM53C974" #define DC390_VERSION "2.1d 2004-05-27" #define PCI_DEVICE_ID_AMD53C974 PCI_DEVICE_ID_AMD_SCSI #include "tmscsim.h" static void dc390_DataOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_DataIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus); static void dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB ); static void dc390_Disconnect( struct dc390_acb* pACB ); static void dc390_Reselect( struct dc390_acb* pACB ); static void dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB ); static void dc390_ScsiRstDetect( struct dc390_acb* pACB ); static void dc390_EnableMsgOut_Abort(struct dc390_acb*, struct dc390_srb*); static void dc390_dumpinfo(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB); static void dc390_ResetDevParam(struct dc390_acb* pACB); static u32 dc390_laststatus = 0; static u8 dc390_adapterCnt = 0; static int disable_clustering; module_param(disable_clustering, int, S_IRUGO); MODULE_PARM_DESC(disable_clustering, "If you experience problems with your devices, try setting to 1"); /* Startup values, to be overriden on the commandline */ static int tmscsim[] = {-2, -2, -2, -2, -2, -2}; module_param_array(tmscsim, int, NULL, 0); MODULE_PARM_DESC(tmscsim, "Host SCSI ID, Speed (0=10MHz), Device Flags, Adapter Flags, Max Tags (log2(tags)-1), DelayReset (s)"); MODULE_AUTHOR("C.L. Huang / Kurt Garloff"); MODULE_DESCRIPTION("SCSI host adapter driver for Tekram DC390 and other AMD53C974A based PCI SCSI adapters"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); static void *dc390_phase0[]={ dc390_DataOut_0, dc390_DataIn_0, dc390_Command_0, dc390_Status_0, dc390_Nop_0, dc390_Nop_0, dc390_MsgOut_0, dc390_MsgIn_0, dc390_Nop_1 }; static void *dc390_phase1[]={ dc390_DataOutPhase, dc390_DataInPhase, dc390_CommandPhase, dc390_StatusPhase, dc390_Nop_0, dc390_Nop_0, dc390_MsgOutPhase, dc390_MsgInPhase, dc390_Nop_1 }; #ifdef DC390_DEBUG1 static char* dc390_p0_str[] = { "dc390_DataOut_0", "dc390_DataIn_0", "dc390_Command_0", "dc390_Status_0", "dc390_Nop_0", "dc390_Nop_0", "dc390_MsgOut_0", "dc390_MsgIn_0", "dc390_Nop_1" }; static char* dc390_p1_str[] = { "dc390_DataOutPhase", "dc390_DataInPhase", "dc390_CommandPhase", "dc390_StatusPhase", "dc390_Nop_0", "dc390_Nop_0", "dc390_MsgOutPhase", "dc390_MsgInPhase", "dc390_Nop_1" }; #endif static u8 dc390_eepromBuf[MAX_ADAPTER_NUM][EE_LEN]; static u8 dc390_clock_period1[] = {4, 5, 6, 7, 8, 10, 13, 20}; static u8 dc390_clock_speed[] = {100,80,67,57,50, 40, 31, 20}; /*********************************************************************** * Functions for the management of the internal structures * (DCBs, SRBs, Queueing) * **********************************************************************/ static void inline dc390_start_segment(struct dc390_srb* pSRB) { struct scatterlist *psgl = pSRB->pSegmentList; /* start new sg segment */ pSRB->SGBusAddr = sg_dma_address(psgl); pSRB->SGToBeXferLen = sg_dma_len(psgl); } static unsigned long inline dc390_advance_segment(struct dc390_srb* pSRB, u32 residue) { unsigned long xfer = pSRB->SGToBeXferLen - residue; /* xfer more bytes transferred */ pSRB->SGBusAddr += xfer; pSRB->TotalXferredLen += xfer; pSRB->SGToBeXferLen = residue; return xfer; } static struct dc390_dcb __inline__ *dc390_findDCB ( struct dc390_acb* pACB, u8 id, u8 lun) { struct dc390_dcb* pDCB = pACB->pLinkDCB; if (!pDCB) return NULL; while (pDCB->TargetID != id || pDCB->TargetLUN != lun) { pDCB = pDCB->pNextDCB; if (pDCB == pACB->pLinkDCB) return NULL; } DCBDEBUG1( printk (KERN_DEBUG "DCB %p (%02x,%02x) found.\n", \ pDCB, pDCB->TargetID, pDCB->TargetLUN)); return pDCB; } /* Insert SRB oin top of free list */ static __inline__ void dc390_Free_insert (struct dc390_acb* pACB, struct dc390_srb* pSRB) { DEBUG0(printk ("DC390: Free SRB %p\n", pSRB)); pSRB->pNextSRB = pACB->pFreeSRB; pACB->pFreeSRB = pSRB; } static __inline__ void dc390_Going_append (struct dc390_dcb* pDCB, struct dc390_srb* pSRB) { pDCB->GoingSRBCnt++; DEBUG0(printk("DC390: Append SRB %p to Going\n", pSRB)); /* Append to the list of Going commands */ if( pDCB->pGoingSRB ) pDCB->pGoingLast->pNextSRB = pSRB; else pDCB->pGoingSRB = pSRB; pDCB->pGoingLast = pSRB; /* No next one in sent list */ pSRB->pNextSRB = NULL; } static __inline__ void dc390_Going_remove (struct dc390_dcb* pDCB, struct dc390_srb* pSRB) { DEBUG0(printk("DC390: Remove SRB %p from Going\n", pSRB)); if (pSRB == pDCB->pGoingSRB) pDCB->pGoingSRB = pSRB->pNextSRB; else { struct dc390_srb* psrb = pDCB->pGoingSRB; while (psrb && psrb->pNextSRB != pSRB) psrb = psrb->pNextSRB; if (!psrb) { printk (KERN_ERR "DC390: Remove non-ex. SRB %p from Going!\n", pSRB); return; } psrb->pNextSRB = pSRB->pNextSRB; if (pSRB == pDCB->pGoingLast) pDCB->pGoingLast = psrb; } pDCB->GoingSRBCnt--; } static struct scatterlist* dc390_sg_build_single(struct scatterlist *sg, void *addr, unsigned int length) { sg_init_one(sg, addr, length); return sg; } /* Create pci mapping */ static int dc390_pci_map (struct dc390_srb* pSRB) { int error = 0; struct scsi_cmnd *pcmd = pSRB->pcmd; struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev; dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp)); /* Map sense buffer */ if (pSRB->SRBFlag & AUTO_REQSENSE) { pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1, DMA_FROM_DEVICE); cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList); /* TODO: error handling */ if (pSRB->SGcount != 1) error = 1; DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __func__, pcmd->sense_buffer, cmdp->saved_dma_handle)); /* Map SG list */ } else if (scsi_sg_count(pcmd)) { int nseg; nseg = scsi_dma_map(pcmd); pSRB->pSegmentList = scsi_sglist(pcmd); pSRB->SGcount = nseg; /* TODO: error handling */ if (nseg < 0) error = 1; DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\ __func__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd))); /* Map single segment */ } else pSRB->SGcount = 0; return error; } /* Remove pci mapping */ static void dc390_pci_unmap (struct dc390_srb* pSRB) { struct scsi_cmnd *pcmd = pSRB->pcmd; struct pci_dev *pdev = pSRB->pSRBDCB->pDCBACB->pdev; DEBUG1(dc390_cmd_scp_t* cmdp = ((dc390_cmd_scp_t*)(&pcmd->SCp))); if (pSRB->SRBFlag) { pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE); DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __func__, cmdp->saved_dma_handle)); } else { scsi_dma_unmap(pcmd); DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", __func__, scsi_sglist(pcmd), scsi_sg_count(pcmd))); } } static void __inline__ dc390_freetag (struct dc390_dcb* pDCB, struct dc390_srb* pSRB) { if (pSRB->TagNumber != SCSI_NO_TAG) { pDCB->TagMask &= ~(1 << pSRB->TagNumber); /* free tag mask */ pSRB->TagNumber = SCSI_NO_TAG; } } static int dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB ) { struct scsi_cmnd *scmd = pSRB->pcmd; struct scsi_device *sdev = scmd->device; u8 cmd, disc_allowed, try_sync_nego; char tag[2]; pSRB->ScsiPhase = SCSI_NOP0; if (pACB->Connected) { // Should not happen normally printk (KERN_WARNING "DC390: Can't select when connected! (%08x,%02x)\n", pSRB->SRBState, pSRB->SRBFlag); pSRB->SRBState = SRB_READY; pACB->SelConn++; return 1; } if (time_before (jiffies, pACB->pScsiHost->last_reset)) { DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n")); return 1; } /* KG: Moved pci mapping here */ dc390_pci_map(pSRB); /* TODO: error handling */ DC390_write8 (Scsi_Dest_ID, pDCB->TargetID); DC390_write8 (Sync_Period, pDCB->SyncPeriod); DC390_write8 (Sync_Offset, pDCB->SyncOffset); DC390_write8 (CtrlReg1, pDCB->CtrlR1); DC390_write8 (CtrlReg3, pDCB->CtrlR3); DC390_write8 (CtrlReg4, pDCB->CtrlR4); DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); /* Flush FIFO */ DEBUG1(printk (KERN_INFO "DC390: Start SCSI command: %02x (Sync:%02x)\n",\ scmd->cmnd[0], pDCB->SyncMode)); /* Don't disconnect on AUTO_REQSENSE, cause it might be an * Contingent Allegiance Condition (6.6), where no tags should be used. * All other have to be allowed to disconnect to prevent Incorrect * Initiator Connection (6.8.2/6.5.2) */ /* Changed KG, 99/06/06 */ if (! (pSRB->SRBFlag & AUTO_REQSENSE)) disc_allowed = pDCB->DevMode & EN_DISCONNECT_; else disc_allowed = 0; if ((pDCB->SyncMode & SYNC_ENABLE) && pDCB->TargetLUN == 0 && sdev->sdtr && (((scmd->cmnd[0] == REQUEST_SENSE || (pSRB->SRBFlag & AUTO_REQSENSE)) && !(pDCB->SyncMode & SYNC_NEGO_DONE)) || scmd->cmnd[0] == INQUIRY)) try_sync_nego = 1; else try_sync_nego = 0; pSRB->MsgCnt = 0; cmd = SEL_W_ATN; DC390_write8 (ScsiFifo, IDENTIFY(disc_allowed, pDCB->TargetLUN)); /* Change 99/05/31: Don't use tags when not disconnecting (BUSY) */ if ((pDCB->SyncMode & EN_TAG_QUEUEING) && disc_allowed && scsi_populate_tag_msg(scmd, tag)) { DC390_write8(ScsiFifo, tag[0]); pDCB->TagMask |= 1 << tag[1]; pSRB->TagNumber = tag[1]; DC390_write8(ScsiFifo, tag[1]); DEBUG1(printk(KERN_INFO "DC390: Select w/DisCn for Cmd %li (SRB %p), block tag %02x\n", scmd->serial_number, pSRB, tag[1])); cmd = SEL_W_ATN3; } else { /* No TagQ */ //no_tag: DEBUG1(printk(KERN_INFO "DC390: Select w%s/DisCn for Cmd %li (SRB %p), No TagQ\n", disc_allowed ? "" : "o", scmd->serial_number, pSRB)); } pSRB->SRBState = SRB_START_; if (try_sync_nego) { u8 Sync_Off = pDCB->SyncOffset; DEBUG0(printk (KERN_INFO "DC390: NEW Sync Nego code triggered (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN)); pSRB->MsgOutBuf[0] = EXTENDED_MESSAGE; pSRB->MsgOutBuf[1] = 3; pSRB->MsgOutBuf[2] = EXTENDED_SDTR; pSRB->MsgOutBuf[3] = pDCB->NegoPeriod; if (!(Sync_Off & 0x0f)) Sync_Off = SYNC_NEGO_OFFSET; pSRB->MsgOutBuf[4] = Sync_Off; pSRB->MsgCnt = 5; //pSRB->SRBState = SRB_MSGOUT_; pSRB->SRBState |= DO_SYNC_NEGO; cmd = SEL_W_ATN_STOP; } /* Command is written in CommandPhase, if SEL_W_ATN_STOP ... */ if (cmd != SEL_W_ATN_STOP) { if( pSRB->SRBFlag & AUTO_REQSENSE ) { DC390_write8 (ScsiFifo, REQUEST_SENSE); DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5); DC390_write8 (ScsiFifo, 0); DC390_write8 (ScsiFifo, 0); DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE); DC390_write8 (ScsiFifo, 0); DEBUG1(printk (KERN_DEBUG "DC390: AutoReqSense !\n")); } else /* write cmnd to bus */ { u8 *ptr; u8 i; ptr = (u8 *)scmd->cmnd; for (i = 0; i < scmd->cmd_len; i++) DC390_write8 (ScsiFifo, *(ptr++)); } } DEBUG0(if (pACB->pActiveDCB) \ printk (KERN_WARNING "DC390: ActiveDCB != 0\n")); DEBUG0(if (pDCB->pActiveSRB) \ printk (KERN_WARNING "DC390: ActiveSRB != 0\n")); //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); if (DC390_read8 (Scsi_Status) & INTERRUPT) { dc390_freetag (pDCB, pSRB); DEBUG0(printk ("DC390: Interrupt during Start SCSI (pid %li, target %02i-%02i)\n", scmd->serial_number, scmd->device->id, scmd->device->lun)); pSRB->SRBState = SRB_READY; //DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); pACB->SelLost++; return 1; } DC390_write8 (ScsiCmd, cmd); pACB->pActiveDCB = pDCB; pDCB->pActiveSRB = pSRB; pACB->Connected = 1; pSRB->ScsiPhase = SCSI_NOP1; return 0; } static void __inline__ dc390_InvalidCmd(struct dc390_acb* pACB) { if (pACB->pActiveDCB->pActiveSRB->SRBState & (SRB_START_ | SRB_MSGOUT)) DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); } static irqreturn_t __inline__ DC390_Interrupt(void *dev_id) { struct dc390_acb *pACB = dev_id; struct dc390_dcb *pDCB; struct dc390_srb *pSRB; u8 sstatus=0; u8 phase; void (*stateV)( struct dc390_acb*, struct dc390_srb*, u8 *); u8 istate, istatus; sstatus = DC390_read8 (Scsi_Status); if( !(sstatus & INTERRUPT) ) return IRQ_NONE; DEBUG1(printk (KERN_DEBUG "sstatus=%02x,", sstatus)); //DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT); //dstatus = DC390_read8 (DMA_Status); //DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); spin_lock_irq(pACB->pScsiHost->host_lock); istate = DC390_read8 (Intern_State); istatus = DC390_read8 (INT_Status); /* This clears Scsi_Status, Intern_State and INT_Status ! */ DEBUG1(printk (KERN_INFO "Istatus(Res,Inv,Dis,Serv,Succ,ReS,SelA,Sel)=%02x,",istatus)); dc390_laststatus &= ~0x00ffffff; dc390_laststatus |= /* dstatus<<24 | */ sstatus<<16 | istate<<8 | istatus; if (sstatus & ILLEGAL_OP_ERR) { printk ("DC390: Illegal Operation detected (%08x)!\n", dc390_laststatus); dc390_dumpinfo (pACB, pACB->pActiveDCB, pACB->pActiveDCB->pActiveSRB); } else if (istatus & INVALID_CMD) { printk ("DC390: Invalid Command detected (%08x)!\n", dc390_laststatus); dc390_InvalidCmd( pACB ); goto unlock; } if (istatus & SCSI_RESET) { dc390_ScsiRstDetect( pACB ); goto unlock; } if (istatus & DISCONNECTED) { dc390_Disconnect( pACB ); goto unlock; } if (istatus & RESELECTED) { dc390_Reselect( pACB ); goto unlock; } else if (istatus & (SELECTED | SEL_ATTENTION)) { printk (KERN_ERR "DC390: Target mode not supported!\n"); goto unlock; } if (istatus & (SUCCESSFUL_OP|SERVICE_REQUEST) ) { pDCB = pACB->pActiveDCB; if (!pDCB) { printk (KERN_ERR "DC390: Suc. op/ Serv. req: pActiveDCB = 0!\n"); goto unlock; } pSRB = pDCB->pActiveSRB; if( pDCB->DCBFlag & ABORT_DEV_ ) dc390_EnableMsgOut_Abort (pACB, pSRB); phase = pSRB->ScsiPhase; DEBUG1(printk (KERN_INFO "DC390: [%i]%s(0) (%02x)\n", phase, dc390_p0_str[phase], sstatus)); stateV = (void *) dc390_phase0[phase]; ( *stateV )( pACB, pSRB, &sstatus ); pSRB->ScsiPhase = sstatus & 7; phase = (u8) sstatus & 7; DEBUG1(printk (KERN_INFO "DC390: [%i]%s(1) (%02x)\n", phase, dc390_p1_str[phase], sstatus)); stateV = (void *) dc390_phase1[phase]; ( *stateV )( pACB, pSRB, &sstatus ); } unlock: spin_unlock_irq(pACB->pScsiHost->host_lock); return IRQ_HANDLED; } static irqreturn_t do_DC390_Interrupt(int irq, void *dev_id) { irqreturn_t ret; DEBUG1(printk (KERN_INFO "DC390: Irq (%i) caught: ", irq)); /* Locking is done in DC390_Interrupt */ ret = DC390_Interrupt(dev_id); DEBUG1(printk (".. IRQ returned\n")); return ret; } static void dc390_DataOut_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { u8 sstatus; u32 ResidCnt; u8 dstate = 0; sstatus = *psstatus; if( !(pSRB->SRBState & SRB_XFERPAD) ) { if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR) ) pSRB->SRBStatus |= PARITY_ERROR; if( sstatus & COUNT_2_ZERO ) { unsigned long timeout = jiffies + HZ; /* Function called from the ISR with the host_lock held and interrupts disabled */ if (pSRB->SGToBeXferLen) while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) { spin_unlock_irq(pACB->pScsiHost->host_lock); udelay(50); spin_lock_irq(pACB->pScsiHost->host_lock); } if (!time_before(jiffies, timeout)) printk (KERN_CRIT "DC390: Deadlock in DataOut_0: DMA aborted unfinished: %06x bytes remain!!\n", DC390_read32 (DMA_Wk_ByteCntr)); dc390_laststatus &= ~0xff000000; dc390_laststatus |= dstate << 24; pSRB->TotalXferredLen += pSRB->SGToBeXferLen; pSRB->SGIndex++; if( pSRB->SGIndex < pSRB->SGcount ) { pSRB->pSegmentList++; dc390_start_segment(pSRB); } else pSRB->SGToBeXferLen = 0; } else { ResidCnt = ((u32) DC390_read8 (Current_Fifo) & 0x1f) + (((u32) DC390_read8 (CtcReg_High) << 16) | ((u32) DC390_read8 (CtcReg_Mid) << 8) | (u32) DC390_read8 (CtcReg_Low)); dc390_advance_segment(pSRB, ResidCnt); } } if ((*psstatus & 7) != SCSI_DATA_OUT) { DC390_write8 (DMA_Cmd, WRITE_DIRECTION+DMA_IDLE_CMD); DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); } } static void dc390_DataIn_0(struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { u8 sstatus, residual, bval; u32 ResidCnt, i; unsigned long xferCnt; sstatus = *psstatus; if( !(pSRB->SRBState & SRB_XFERPAD) ) { if( sstatus & (PARITY_ERR | ILLEGAL_OP_ERR)) pSRB->SRBStatus |= PARITY_ERROR; if( sstatus & COUNT_2_ZERO ) { int dstate = 0; unsigned long timeout = jiffies + HZ; /* Function called from the ISR with the host_lock held and interrupts disabled */ if (pSRB->SGToBeXferLen) while (time_before(jiffies, timeout) && !((dstate = DC390_read8 (DMA_Status)) & DMA_XFER_DONE)) { spin_unlock_irq(pACB->pScsiHost->host_lock); udelay(50); spin_lock_irq(pACB->pScsiHost->host_lock); } if (!time_before(jiffies, timeout)) { printk (KERN_CRIT "DC390: Deadlock in DataIn_0: DMA aborted unfinished: %06x bytes remain!!\n", DC390_read32 (DMA_Wk_ByteCntr)); printk (KERN_CRIT "DC390: DataIn_0: DMA State: %i\n", dstate); } dc390_laststatus &= ~0xff000000; dc390_laststatus |= dstate << 24; DEBUG1(ResidCnt = ((unsigned long) DC390_read8 (CtcReg_High) << 16) \ + ((unsigned long) DC390_read8 (CtcReg_Mid) << 8) \ + ((unsigned long) DC390_read8 (CtcReg_Low))); DEBUG1(printk (KERN_DEBUG "Count_2_Zero (ResidCnt=%u,ToBeXfer=%lu),", ResidCnt, pSRB->SGToBeXferLen)); DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); pSRB->TotalXferredLen += pSRB->SGToBeXferLen; pSRB->SGIndex++; if( pSRB->SGIndex < pSRB->SGcount ) { pSRB->pSegmentList++; dc390_start_segment(pSRB); } else pSRB->SGToBeXferLen = 0; } else /* phase changed */ { residual = 0; bval = DC390_read8 (Current_Fifo); while( bval & 0x1f ) { DEBUG1(printk (KERN_DEBUG "Check for residuals,")); if( (bval & 0x1f) == 1 ) { for(i=0; i < 0x100; i++) { bval = DC390_read8 (Current_Fifo); if( !(bval & 0x1f) ) goto din_1; else if( i == 0x0ff ) { residual = 1; /* ;1 residual byte */ goto din_1; } } } else bval = DC390_read8 (Current_Fifo); } din_1: DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_BLAST_CMD); for (i = 0xa000; i; i--) { bval = DC390_read8 (DMA_Status); if (bval & BLAST_COMPLETE) break; } /* It seems a DMA Blast abort isn't that bad ... */ if (!i) printk (KERN_ERR "DC390: DMA Blast aborted unfinished!\n"); //DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); dc390_laststatus &= ~0xff000000; dc390_laststatus |= bval << 24; DEBUG1(printk (KERN_DEBUG "Blast: Read %i times DMA_Status %02x", 0xa000-i, bval)); ResidCnt = (((u32) DC390_read8 (CtcReg_High) << 16) | ((u32) DC390_read8 (CtcReg_Mid) << 8)) | (u32) DC390_read8 (CtcReg_Low); xferCnt = dc390_advance_segment(pSRB, ResidCnt); if (residual) { size_t count = 1; size_t offset = pSRB->SGBusAddr - sg_dma_address(pSRB->pSegmentList); unsigned long flags; u8 *ptr; bval = DC390_read8 (ScsiFifo); /* get one residual byte */ local_irq_save(flags); ptr = scsi_kmap_atomic_sg(pSRB->pSegmentList, pSRB->SGcount, &offset, &count); if (likely(ptr)) { *(ptr + offset) = bval; scsi_kunmap_atomic_sg(ptr); } local_irq_restore(flags); WARN_ON(!ptr); /* 1 more byte read */ xferCnt += dc390_advance_segment(pSRB, pSRB->SGToBeXferLen - 1); } DEBUG1(printk (KERN_DEBUG "Xfered: %lu, Total: %lu, Remaining: %lu\n", xferCnt,\ pSRB->TotalXferredLen, pSRB->SGToBeXferLen)); } } if ((*psstatus & 7) != SCSI_DATA_IN) { DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); DC390_write8 (DMA_Cmd, READ_DIRECTION+DMA_IDLE_CMD); } } static void dc390_Command_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { } static void dc390_Status_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { pSRB->TargetStatus = DC390_read8 (ScsiFifo); //udelay (1); pSRB->EndMessage = DC390_read8 (ScsiFifo); /* get message */ *psstatus = SCSI_NOP0; pSRB->SRBState = SRB_COMPLETED; DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); } static void dc390_MsgOut_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { if( pSRB->SRBState & (SRB_UNEXPECT_RESEL+SRB_ABORT_SENT) ) *psstatus = SCSI_NOP0; //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); } static void __inline__ dc390_reprog (struct dc390_acb* pACB, struct dc390_dcb* pDCB) { DC390_write8 (Sync_Period, pDCB->SyncPeriod); DC390_write8 (Sync_Offset, pDCB->SyncOffset); DC390_write8 (CtrlReg3, pDCB->CtrlR3); DC390_write8 (CtrlReg4, pDCB->CtrlR4); dc390_SetXferRate (pACB, pDCB); } #ifdef DC390_DEBUG0 static void dc390_printMsg (u8 *MsgBuf, u8 len) { int i; printk (" %02x", MsgBuf[0]); for (i = 1; i < len; i++) printk (" %02x", MsgBuf[i]); printk ("\n"); } #endif #define DC390_ENABLE_MSGOUT DC390_write8 (ScsiCmd, SET_ATN_CMD) /* reject_msg */ static void __inline__ dc390_MsgIn_reject (struct dc390_acb* pACB, struct dc390_srb* pSRB) { pSRB->MsgOutBuf[0] = MESSAGE_REJECT; pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT; DEBUG0 (printk (KERN_INFO "DC390: Reject message\n")); } /* abort command */ static void dc390_EnableMsgOut_Abort ( struct dc390_acb* pACB, struct dc390_srb* pSRB ) { pSRB->MsgOutBuf[0] = ABORT; pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT; pSRB->pSRBDCB->DCBFlag &= ~ABORT_DEV_; } static struct dc390_srb* dc390_MsgIn_QTag (struct dc390_acb* pACB, struct dc390_dcb* pDCB, s8 tag) { struct dc390_srb* pSRB = pDCB->pGoingSRB; if (pSRB) { struct scsi_cmnd *scmd = scsi_find_tag(pSRB->pcmd->device, tag); pSRB = (struct dc390_srb *)scmd->host_scribble; if (pDCB->DCBFlag & ABORT_DEV_) { pSRB->SRBState = SRB_ABORT_SENT; dc390_EnableMsgOut_Abort( pACB, pSRB ); } if (!(pSRB->SRBState & SRB_DISCONNECT)) goto mingx0; pDCB->pActiveSRB = pSRB; pSRB->SRBState = SRB_DATA_XFER; } else { mingx0: pSRB = pACB->pTmpSRB; pSRB->SRBState = SRB_UNEXPECT_RESEL; pDCB->pActiveSRB = pSRB; pSRB->MsgOutBuf[0] = ABORT_TAG; pSRB->MsgCnt = 1; DC390_ENABLE_MSGOUT; } return pSRB; } /* set async transfer mode */ static void dc390_MsgIn_set_async (struct dc390_acb* pACB, struct dc390_srb* pSRB) { struct dc390_dcb* pDCB = pSRB->pSRBDCB; if (!(pSRB->SRBState & DO_SYNC_NEGO)) printk (KERN_INFO "DC390: Target %i initiates Non-Sync?\n", pDCB->TargetID); pSRB->SRBState &= ~DO_SYNC_NEGO; pDCB->SyncMode &= ~(SYNC_ENABLE+SYNC_NEGO_DONE); pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; //pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */ pDCB->CtrlR3 = FAST_CLK; /* fast clock / normal scsi */ pDCB->CtrlR4 &= 0x3f; pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */ dc390_reprog (pACB, pDCB); } /* set sync transfer mode */ static void dc390_MsgIn_set_sync (struct dc390_acb* pACB, struct dc390_srb* pSRB) { u8 bval; u16 wval, wval1; struct dc390_dcb* pDCB = pSRB->pSRBDCB; u8 oldsyncperiod = pDCB->SyncPeriod; u8 oldsyncoffset = pDCB->SyncOffset; if (!(pSRB->SRBState & DO_SYNC_NEGO)) { printk (KERN_INFO "DC390: Target %i initiates Sync: %ins %i ... answer ...\n", pDCB->TargetID, pSRB->MsgInBuf[3]<<2, pSRB->MsgInBuf[4]); /* reject */ //dc390_MsgIn_reject (pACB, pSRB); //return dc390_MsgIn_set_async (pACB, pSRB); /* Reply with corrected SDTR Message */ if (pSRB->MsgInBuf[4] > 15) { printk (KERN_INFO "DC390: Lower Sync Offset to 15\n"); pSRB->MsgInBuf[4] = 15; } if (pSRB->MsgInBuf[3] < pDCB->NegoPeriod) { printk (KERN_INFO "DC390: Set sync nego period to %ins\n", pDCB->NegoPeriod << 2); pSRB->MsgInBuf[3] = pDCB->NegoPeriod; } memcpy (pSRB->MsgOutBuf, pSRB->MsgInBuf, 5); pSRB->MsgCnt = 5; DC390_ENABLE_MSGOUT; } pSRB->SRBState &= ~DO_SYNC_NEGO; pDCB->SyncMode |= SYNC_ENABLE+SYNC_NEGO_DONE; pDCB->SyncOffset &= 0x0f0; pDCB->SyncOffset |= pSRB->MsgInBuf[4]; pDCB->NegoPeriod = pSRB->MsgInBuf[3]; wval = (u16) pSRB->MsgInBuf[3]; wval = wval << 2; wval -= 3; wval1 = wval / 25; /* compute speed */ if( (wval1 * 25) != wval) wval1++; bval = FAST_CLK+FAST_SCSI; /* fast clock / fast scsi */ pDCB->CtrlR4 &= 0x3f; /* Glitch eater: 12ns less than normal */ if (pACB->glitch_cfg != NS_TO_GLITCH(0)) pDCB->CtrlR4 |= NS_TO_GLITCH(((GLITCH_TO_NS(pACB->glitch_cfg)) - 1)); else pDCB->CtrlR4 |= NS_TO_GLITCH(0); if (wval1 < 4) pDCB->CtrlR4 |= NS_TO_GLITCH(0); /* Ultra */ if (wval1 >= 8) { wval1--; /* Timing computation differs by 1 from FAST_SCSI */ bval = FAST_CLK; /* fast clock / normal scsi */ pDCB->CtrlR4 |= pACB->glitch_cfg; /* glitch eater */ } pDCB->CtrlR3 = bval; pDCB->SyncPeriod = (u8)wval1; if ((oldsyncperiod != wval1 || oldsyncoffset != pDCB->SyncOffset) && pDCB->TargetLUN == 0) { if (! (bval & FAST_SCSI)) wval1++; printk (KERN_INFO "DC390: Target %i: Sync transfer %i.%1i MHz, Offset %i\n", pDCB->TargetID, 40/wval1, ((40%wval1)*10+wval1/2)/wval1, pDCB->SyncOffset & 0x0f); } dc390_reprog (pACB, pDCB); } /* handle RESTORE_PTR */ /* This doesn't look very healthy... to-be-fixed */ static void dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB) { struct scsi_cmnd *pcmd = pSRB->pcmd; struct scatterlist *psgl; pSRB->TotalXferredLen = 0; pSRB->SGIndex = 0; if (scsi_sg_count(pcmd)) { size_t saved; pSRB->pSegmentList = scsi_sglist(pcmd); psgl = pSRB->pSegmentList; //dc390_pci_sync(pSRB); while (pSRB->TotalXferredLen + (unsigned long) sg_dma_len(psgl) < pSRB->Saved_Ptr) { pSRB->TotalXferredLen += (unsigned long) sg_dma_len(psgl); pSRB->SGIndex++; if( pSRB->SGIndex < pSRB->SGcount ) { pSRB->pSegmentList++; dc390_start_segment(pSRB); } else pSRB->SGToBeXferLen = 0; } saved = pSRB->Saved_Ptr - pSRB->TotalXferredLen; pSRB->SGToBeXferLen -= saved; pSRB->SGBusAddr += saved; printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n", pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr); } else { pSRB->SGcount = 0; printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n"); } pSRB->TotalXferredLen = pSRB->Saved_Ptr; } /* According to the docs, the AM53C974 reads the message and * generates a Successful Operation IRQ before asserting ACK for * the last byte (how does it know whether it's the last ?) */ /* The old code handled it in another way, indicating, that on * every message byte an IRQ is generated and every byte has to * be manually ACKed. Hmmm ? (KG, 98/11/28) */ /* The old implementation was correct. Sigh! */ /* Check if the message is complete */ static u8 __inline__ dc390_MsgIn_complete (u8 *msgbuf, u32 len) { if (*msgbuf == EXTENDED_MESSAGE) { if (len < 2) return 0; if (len < msgbuf[1] + 2) return 0; } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) // two byte messages if (len < 2) return 0; return 1; } /* read and eval received messages */ static void dc390_MsgIn_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { struct dc390_dcb* pDCB = pACB->pActiveDCB; /* Read the msg */ pSRB->MsgInBuf[pACB->MsgLen++] = DC390_read8 (ScsiFifo); //pSRB->SRBState = 0; /* Msg complete ? */ if (dc390_MsgIn_complete (pSRB->MsgInBuf, pACB->MsgLen)) { DEBUG0 (printk (KERN_INFO "DC390: MsgIn:"); dc390_printMsg (pSRB->MsgInBuf, pACB->MsgLen)); /* Now eval the msg */ switch (pSRB->MsgInBuf[0]) { case DISCONNECT: pSRB->SRBState = SRB_DISCONNECT; break; case SIMPLE_QUEUE_TAG: case HEAD_OF_QUEUE_TAG: case ORDERED_QUEUE_TAG: pSRB = dc390_MsgIn_QTag (pACB, pDCB, pSRB->MsgInBuf[1]); break; case MESSAGE_REJECT: DC390_write8 (ScsiCmd, RESET_ATN_CMD); pDCB->NegoPeriod = 50; /* 200ns <=> 5 MHz */ if( pSRB->SRBState & DO_SYNC_NEGO) dc390_MsgIn_set_async (pACB, pSRB); break; case EXTENDED_MESSAGE: /* reject every extended msg but SDTR */ if (pSRB->MsgInBuf[1] != 3 || pSRB->MsgInBuf[2] != EXTENDED_SDTR) dc390_MsgIn_reject (pACB, pSRB); else { if (pSRB->MsgInBuf[3] == 0 || pSRB->MsgInBuf[4] == 0) dc390_MsgIn_set_async (pACB, pSRB); else dc390_MsgIn_set_sync (pACB, pSRB); } // nothing has to be done case COMMAND_COMPLETE: break; // SAVE POINTER may be ignored as we have the struct dc390_srb* associated with the // scsi command. Thanks, Gerard, for pointing it out. case SAVE_POINTERS: pSRB->Saved_Ptr = pSRB->TotalXferredLen; break; // The device might want to restart transfer with a RESTORE case RESTORE_POINTERS: DEBUG0(printk ("DC390: RESTORE POINTER message received ... try to handle\n")); dc390_restore_ptr (pACB, pSRB); break; // reject unknown messages default: dc390_MsgIn_reject (pACB, pSRB); } /* Clear counter and MsgIn state */ pSRB->SRBState &= ~SRB_MSGIN; pACB->MsgLen = 0; } *psstatus = SCSI_NOP0; DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); } static void dc390_DataIO_Comm( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 ioDir) { unsigned long lval; struct dc390_dcb* pDCB = pACB->pActiveDCB; if (pSRB == pACB->pTmpSRB) { if (pDCB) printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (%02i-%i)\n", pDCB->TargetID, pDCB->TargetLUN); else printk(KERN_ERR "DC390: pSRB == pTmpSRB! (TagQ Error?) (DCB 0!)\n"); /* Try to recover - some broken disks react badly to tagged INQUIRY */ if (pDCB && pACB->scan_devices && pDCB->GoingSRBCnt == 1) { pSRB = pDCB->pGoingSRB; pDCB->pActiveSRB = pSRB; } else { pSRB->pSRBDCB = pDCB; dc390_EnableMsgOut_Abort(pACB, pSRB); if (pDCB) pDCB->DCBFlag |= ABORT_DEV; return; } } if( pSRB->SGIndex < pSRB->SGcount ) { DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); if( !pSRB->SGToBeXferLen ) { dc390_start_segment(pSRB); DEBUG1(printk (KERN_DEBUG " DC390: Next SG segment.")); } lval = pSRB->SGToBeXferLen; DEBUG1(printk (KERN_DEBUG " DC390: Start transfer: %li bytes (address %08lx)\n", lval, pSRB->SGBusAddr)); DC390_write8 (CtcReg_Low, (u8) lval); lval >>= 8; DC390_write8 (CtcReg_Mid, (u8) lval); lval >>= 8; DC390_write8 (CtcReg_High, (u8) lval); DC390_write32 (DMA_XferCnt, pSRB->SGToBeXferLen); DC390_write32 (DMA_XferAddr, pSRB->SGBusAddr); //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); pSRB->SRBState = SRB_DATA_XFER; DC390_write8 (ScsiCmd, DMA_COMMAND+INFO_XFER_CMD); DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir); //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT)); //DEBUG1(printk (KERN_DEBUG "DC390: DMA_Status: %02x\n", DC390_read8 (DMA_Status))); //DEBUG1(DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT)); } else /* xfer pad */ { if( pSRB->SGcount ) { pSRB->AdaptStatus = H_OVER_UNDER_RUN; pSRB->SRBStatus |= OVER_RUN; DEBUG0(printk (KERN_WARNING " DC390: Overrun -")); } DEBUG0(printk (KERN_WARNING " Clear transfer pad \n")); DC390_write8 (CtcReg_Low, 0); DC390_write8 (CtcReg_Mid, 0); DC390_write8 (CtcReg_High, 0); pSRB->SRBState |= SRB_XFERPAD; DC390_write8 (ScsiCmd, DMA_COMMAND+XFER_PAD_BYTE); /* DC390_write8 (DMA_Cmd, DMA_IDLE_CMD | ioDir); DC390_write8 (DMA_Cmd, DMA_START_CMD | ioDir); */ } } static void dc390_DataOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { dc390_DataIO_Comm (pACB, pSRB, WRITE_DIRECTION); } static void dc390_DataInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { dc390_DataIO_Comm (pACB, pSRB, READ_DIRECTION); } static void dc390_CommandPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { struct dc390_dcb* pDCB; u8 i, cnt; u8 *ptr; DC390_write8 (ScsiCmd, RESET_ATN_CMD); DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); if( !(pSRB->SRBFlag & AUTO_REQSENSE) ) { cnt = (u8) pSRB->pcmd->cmd_len; ptr = (u8 *) pSRB->pcmd->cmnd; for(i=0; i < cnt; i++) DC390_write8 (ScsiFifo, *(ptr++)); } else { DC390_write8 (ScsiFifo, REQUEST_SENSE); pDCB = pACB->pActiveDCB; DC390_write8 (ScsiFifo, pDCB->TargetLUN << 5); DC390_write8 (ScsiFifo, 0); DC390_write8 (ScsiFifo, 0); DC390_write8 (ScsiFifo, SCSI_SENSE_BUFFERSIZE); DC390_write8 (ScsiFifo, 0); DEBUG0(printk(KERN_DEBUG "DC390: AutoReqSense (CmndPhase)!\n")); } pSRB->SRBState = SRB_COMMAND; DC390_write8 (ScsiCmd, INFO_XFER_CMD); } static void dc390_StatusPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); pSRB->SRBState = SRB_STATUS; DC390_write8 (ScsiCmd, INITIATOR_CMD_CMPLTE); //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); } static void dc390_MsgOutPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { u8 bval, i, cnt; u8 *ptr; struct dc390_dcb* pDCB; DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); pDCB = pACB->pActiveDCB; if( !(pSRB->SRBState & SRB_MSGOUT) ) { cnt = pSRB->MsgCnt; if( cnt ) { ptr = (u8 *) pSRB->MsgOutBuf; for(i=0; i < cnt; i++) DC390_write8 (ScsiFifo, *(ptr++)); pSRB->MsgCnt = 0; if( (pDCB->DCBFlag & ABORT_DEV_) && (pSRB->MsgOutBuf[0] == ABORT) ) pSRB->SRBState = SRB_ABORT_SENT; } else { bval = ABORT; /* ??? MSG_NOP */ if( (pSRB->pcmd->cmnd[0] == INQUIRY ) || (pSRB->pcmd->cmnd[0] == REQUEST_SENSE) || (pSRB->SRBFlag & AUTO_REQSENSE) ) { if( pDCB->SyncMode & SYNC_ENABLE ) goto mop1; } DC390_write8 (ScsiFifo, bval); } DC390_write8 (ScsiCmd, INFO_XFER_CMD); } else { mop1: printk (KERN_ERR "DC390: OLD Sync Nego code triggered! (%i %i)\n", pDCB->TargetID, pDCB->TargetLUN); DC390_write8 (ScsiFifo, EXTENDED_MESSAGE); DC390_write8 (ScsiFifo, 3); /* ;length of extended msg */ DC390_write8 (ScsiFifo, EXTENDED_SDTR); /* ; sync nego */ DC390_write8 (ScsiFifo, pDCB->NegoPeriod); if (pDCB->SyncOffset & 0x0f) DC390_write8 (ScsiFifo, pDCB->SyncOffset); else DC390_write8 (ScsiFifo, SYNC_NEGO_OFFSET); pSRB->SRBState |= DO_SYNC_NEGO; DC390_write8 (ScsiCmd, INFO_XFER_CMD); } } static void dc390_MsgInPhase( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); if( !(pSRB->SRBState & SRB_MSGIN) ) { pSRB->SRBState &= ~SRB_DISCONNECT; pSRB->SRBState |= SRB_MSGIN; } DC390_write8 (ScsiCmd, INFO_XFER_CMD); //DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); } static void dc390_Nop_0( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { } static void dc390_Nop_1( struct dc390_acb* pACB, struct dc390_srb* pSRB, u8 *psstatus) { } static void dc390_SetXferRate( struct dc390_acb* pACB, struct dc390_dcb* pDCB ) { u8 bval, i, cnt; struct dc390_dcb* ptr; if( !(pDCB->TargetLUN) ) { if( !pACB->scan_devices ) { ptr = pACB->pLinkDCB; cnt = pACB->DCBCnt; bval = pDCB->TargetID; for(i=0; i<cnt; i++) { if( ptr->TargetID == bval ) { ptr->SyncPeriod = pDCB->SyncPeriod; ptr->SyncOffset = pDCB->SyncOffset; ptr->CtrlR3 = pDCB->CtrlR3; ptr->CtrlR4 = pDCB->CtrlR4; ptr->SyncMode = pDCB->SyncMode; } ptr = ptr->pNextDCB; } } } return; } static void dc390_Disconnect( struct dc390_acb* pACB ) { struct dc390_dcb *pDCB; struct dc390_srb *pSRB, *psrb; u8 i, cnt; DEBUG0(printk(KERN_INFO "DISC,")); if (!pACB->Connected) printk(KERN_ERR "DC390: Disconnect not-connected bus?\n"); pACB->Connected = 0; pDCB = pACB->pActiveDCB; if (!pDCB) { DEBUG0(printk(KERN_ERR "ACB:%p->ActiveDCB:%p IOPort:%04x IRQ:%02x !\n",\ pACB, pDCB, pACB->IOPortBase, pACB->IRQLevel)); mdelay(400); DC390_read8 (INT_Status); /* Reset Pending INT */ DC390_write8 (ScsiCmd, EN_SEL_RESEL); return; } DC390_write8 (ScsiCmd, EN_SEL_RESEL); pSRB = pDCB->pActiveSRB; pACB->pActiveDCB = NULL; pSRB->ScsiPhase = SCSI_NOP0; if( pSRB->SRBState & SRB_UNEXPECT_RESEL ) pSRB->SRBState = 0; else if( pSRB->SRBState & SRB_ABORT_SENT ) { pDCB->TagMask = 0; pDCB->DCBFlag = 0; cnt = pDCB->GoingSRBCnt; pDCB->GoingSRBCnt = 0; pSRB = pDCB->pGoingSRB; for( i=0; i < cnt; i++) { psrb = pSRB->pNextSRB; dc390_Free_insert (pACB, pSRB); pSRB = psrb; } pDCB->pGoingSRB = NULL; } else { if( (pSRB->SRBState & (SRB_START_+SRB_MSGOUT)) || !(pSRB->SRBState & (SRB_DISCONNECT+SRB_COMPLETED)) ) { /* Selection time out */ pSRB->AdaptStatus = H_SEL_TIMEOUT; pSRB->TargetStatus = 0; goto disc1; } else if (!(pSRB->SRBState & SRB_DISCONNECT) && (pSRB->SRBState & SRB_COMPLETED)) { disc1: dc390_freetag (pDCB, pSRB); pDCB->pActiveSRB = NULL; pSRB->SRBState = SRB_FREE; dc390_SRBdone( pACB, pDCB, pSRB); } } pACB->MsgLen = 0; } static void dc390_Reselect( struct dc390_acb* pACB ) { struct dc390_dcb* pDCB; struct dc390_srb* pSRB; u8 id, lun; DEBUG0(printk(KERN_INFO "RSEL,")); pACB->Connected = 1; pDCB = pACB->pActiveDCB; if( pDCB ) { /* Arbitration lost but Reselection won */ DEBUG0(printk ("DC390: (ActiveDCB != 0: Arb. lost but resel. won)!\n")); pSRB = pDCB->pActiveSRB; if( !( pACB->scan_devices ) ) { struct scsi_cmnd *pcmd = pSRB->pcmd; scsi_set_resid(pcmd, scsi_bufflen(pcmd)); SET_RES_DID(pcmd->result, DID_SOFT_ERROR); dc390_Going_remove(pDCB, pSRB); dc390_Free_insert(pACB, pSRB); pcmd->scsi_done (pcmd); DEBUG0(printk(KERN_DEBUG"DC390: Return SRB %p to free\n", pSRB)); } } /* Get ID */ lun = DC390_read8 (ScsiFifo); DEBUG0(printk ("Dev %02x,", lun)); if (!(lun & (1 << pACB->pScsiHost->this_id))) printk (KERN_ERR "DC390: Reselection must select host adapter: %02x!\n", lun); else lun ^= 1 << pACB->pScsiHost->this_id; /* Mask AdapterID */ id = 0; while (lun >>= 1) id++; /* Get LUN */ lun = DC390_read8 (ScsiFifo); if (!(lun & IDENTIFY_BASE)) printk (KERN_ERR "DC390: Resel: Expect identify message!\n"); lun &= 7; DEBUG0(printk ("(%02i-%i),", id, lun)); pDCB = dc390_findDCB (pACB, id, lun); if (!pDCB) { printk (KERN_ERR "DC390: Reselect from non existing device (%02i-%i)\n", id, lun); return; } pACB->pActiveDCB = pDCB; /* TagQ: We expect a message soon, so never mind the exact SRB */ if( pDCB->SyncMode & EN_TAG_QUEUEING ) { pSRB = pACB->pTmpSRB; pDCB->pActiveSRB = pSRB; } else { pSRB = pDCB->pActiveSRB; if( !pSRB || !(pSRB->SRBState & SRB_DISCONNECT) ) { pSRB= pACB->pTmpSRB; pSRB->SRBState = SRB_UNEXPECT_RESEL; printk (KERN_ERR "DC390: Reselect without outstanding cmnd (%02i-%i)\n", id, lun); pDCB->pActiveSRB = pSRB; dc390_EnableMsgOut_Abort ( pACB, pSRB ); } else { if( pDCB->DCBFlag & ABORT_DEV_ ) { pSRB->SRBState = SRB_ABORT_SENT; printk (KERN_INFO "DC390: Reselect: Abort (%02i-%i)\n", id, lun); dc390_EnableMsgOut_Abort( pACB, pSRB ); } else pSRB->SRBState = SRB_DATA_XFER; } } DEBUG1(printk (KERN_DEBUG "Resel SRB(%p): TagNum (%02x)\n", pSRB, pSRB->TagNumber)); pSRB->ScsiPhase = SCSI_NOP0; DC390_write8 (Scsi_Dest_ID, pDCB->TargetID); DC390_write8 (Sync_Period, pDCB->SyncPeriod); DC390_write8 (Sync_Offset, pDCB->SyncOffset); DC390_write8 (CtrlReg1, pDCB->CtrlR1); DC390_write8 (CtrlReg3, pDCB->CtrlR3); DC390_write8 (CtrlReg4, pDCB->CtrlR4); /* ; Glitch eater */ DC390_write8 (ScsiCmd, MSG_ACCEPTED_CMD); /* ;to release the /ACK signal */ } static int __inline__ dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) { struct scsi_cmnd *pcmd; pcmd = pSRB->pcmd; REMOVABLEDEBUG(printk(KERN_INFO "DC390: RequestSense(Cmd %02x, Id %02x, LUN %02x)\n",\ pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN)); pSRB->SRBFlag |= AUTO_REQSENSE; pSRB->SavedTotXLen = pSRB->TotalXferredLen; pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */ /* We are called from SRBdone, original PCI mapping has been removed * already, new one is set up from StartSCSI */ pSRB->SGIndex = 0; pSRB->TotalXferredLen = 0; pSRB->SGToBeXferLen = 0; return dc390_StartSCSI(pACB, pDCB, pSRB); } static void dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB ) { u8 status; struct scsi_cmnd *pcmd; pcmd = pSRB->pcmd; /* KG: Moved pci_unmap here */ dc390_pci_unmap(pSRB); status = pSRB->TargetStatus; DEBUG0(printk (" SRBdone (%02x,%08x), SRB %p, pid %li\n", status, pcmd->result,\ pSRB, pcmd->serial_number)); if(pSRB->SRBFlag & AUTO_REQSENSE) { /* Last command was a Request Sense */ pSRB->SRBFlag &= ~AUTO_REQSENSE; pSRB->AdaptStatus = 0; pSRB->TargetStatus = SAM_STAT_CHECK_CONDITION; //pcmd->result = MK_RES(DRIVER_SENSE,DID_OK,0,status); if (status == SAM_STAT_CHECK_CONDITION) pcmd->result = MK_RES_LNX(0, DID_BAD_TARGET, 0, /*CHECK_CONDITION*/0); else /* Retry */ { if( pSRB->pcmd->cmnd[0] == TEST_UNIT_READY /* || pSRB->pcmd->cmnd[0] == START_STOP */) { /* Don't retry on TEST_UNIT_READY */ pcmd->result = MK_RES_LNX(DRIVER_SENSE, DID_OK, 0, SAM_STAT_CHECK_CONDITION); REMOVABLEDEBUG(printk(KERN_INFO "Cmd=%02x, Result=%08x, XferL=%08x\n",pSRB->pcmd->cmnd[0],\ (u32) pcmd->result, (u32) pSRB->TotalXferredLen)); } else { SET_RES_DRV(pcmd->result, DRIVER_SENSE); //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->serial_number, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); pSRB->TotalXferredLen = 0; SET_RES_DID(pcmd->result, DID_SOFT_ERROR); } } goto cmd_done; } if( status ) { if (status == SAM_STAT_CHECK_CONDITION) { if (dc390_RequestSense(pACB, pDCB, pSRB)) { SET_RES_DID(pcmd->result, DID_ERROR); goto cmd_done; } return; } else if (status == SAM_STAT_TASK_SET_FULL) { scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->serial_number, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); pSRB->TotalXferredLen = 0; SET_RES_DID(pcmd->result, DID_SOFT_ERROR); } else if (status == SAM_STAT_BUSY && (pcmd->cmnd[0] == TEST_UNIT_READY || pcmd->cmnd[0] == INQUIRY) && pACB->scan_devices) { pSRB->AdaptStatus = 0; pSRB->TargetStatus = status; pcmd->result = MK_RES(0,0,pSRB->EndMessage,/*status*/0); } else { /* Another error */ pSRB->TotalXferredLen = 0; SET_RES_DID(pcmd->result, DID_SOFT_ERROR); goto cmd_done; } } else { /* Target status == 0 */ status = pSRB->AdaptStatus; if (status == H_OVER_UNDER_RUN) { pSRB->TargetStatus = 0; SET_RES_DID(pcmd->result,DID_OK); SET_RES_MSG(pcmd->result,pSRB->EndMessage); } else if (status == H_SEL_TIMEOUT) { pcmd->result = MK_RES(0, DID_NO_CONNECT, 0, 0); /* Devices are removed below ... */ } else if( pSRB->SRBStatus & PARITY_ERROR) { //pcmd->result = MK_RES(0,DID_PARITY,pSRB->EndMessage,0); SET_RES_DID(pcmd->result,DID_PARITY); SET_RES_MSG(pcmd->result,pSRB->EndMessage); } else /* No error */ { pSRB->AdaptStatus = 0; pSRB->TargetStatus = 0; SET_RES_DID(pcmd->result,DID_OK); } } cmd_done: scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen); dc390_Going_remove (pDCB, pSRB); /* Add to free list */ dc390_Free_insert (pACB, pSRB); DEBUG0(printk (KERN_DEBUG "DC390: SRBdone: done pid %li\n", pcmd->serial_number)); pcmd->scsi_done (pcmd); return; } /* Remove all SRBs from Going list and inform midlevel */ static void dc390_DoingSRB_Done(struct dc390_acb* pACB, struct scsi_cmnd *cmd) { struct dc390_dcb *pDCB, *pdcb; struct dc390_srb *psrb, *psrb2; int i; struct scsi_cmnd *pcmd; pDCB = pACB->pLinkDCB; pdcb = pDCB; if (! pdcb) return; do { psrb = pdcb->pGoingSRB; for (i = 0; i < pdcb->GoingSRBCnt; i++) { psrb2 = psrb->pNextSRB; pcmd = psrb->pcmd; dc390_Free_insert (pACB, psrb); psrb = psrb2; } pdcb->GoingSRBCnt = 0; pdcb->pGoingSRB = NULL; pdcb->TagMask = 0; pdcb = pdcb->pNextDCB; } while( pdcb != pDCB ); } static void dc390_ResetSCSIBus( struct dc390_acb* pACB ) { //DC390_write8 (ScsiCmd, RST_DEVICE_CMD); //udelay (250); //DC390_write8 (ScsiCmd, NOP_CMD); DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); DC390_write8 (ScsiCmd, RST_SCSI_BUS_CMD); pACB->Connected = 0; return; } static void dc390_ScsiRstDetect( struct dc390_acb* pACB ) { printk ("DC390: Rst_Detect: laststat = %08x\n", dc390_laststatus); //DEBUG0(printk(KERN_INFO "RST_DETECT,")); DC390_write8 (DMA_Cmd, DMA_IDLE_CMD); /* Unlock before ? */ /* delay half a second */ udelay (1000); DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); pACB->pScsiHost->last_reset = jiffies + 5*HZ/2 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; pACB->Connected = 0; if( pACB->ACBFlag & RESET_DEV ) pACB->ACBFlag |= RESET_DONE; else { /* Reset was issued by sb else */ pACB->ACBFlag |= RESET_DETECT; dc390_ResetDevParam( pACB ); dc390_DoingSRB_Done( pACB, NULL); //dc390_RecoverSRB( pACB ); pACB->pActiveDCB = NULL; pACB->ACBFlag = 0; } return; } static int DC390_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct scsi_device *sdev = cmd->device; struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata; struct dc390_dcb *dcb = sdev->hostdata; struct dc390_srb *srb; if (sdev->queue_depth <= dcb->GoingSRBCnt) goto device_busy; if (acb->pActiveDCB) goto host_busy; if (acb->ACBFlag & (RESET_DETECT|RESET_DONE|RESET_DEV)) goto host_busy; srb = acb->pFreeSRB; if (unlikely(srb == NULL)) goto host_busy; cmd->scsi_done = done; cmd->result = 0; acb->Cmds++; acb->pFreeSRB = srb->pNextSRB; srb->pNextSRB = NULL; srb->pSRBDCB = dcb; srb->pcmd = cmd; cmd->host_scribble = (char *)srb; srb->SGIndex = 0; srb->AdaptStatus = 0; srb->TargetStatus = 0; srb->MsgCnt = 0; srb->SRBStatus = 0; srb->SRBFlag = 0; srb->SRBState = 0; srb->TotalXferredLen = 0; srb->SGBusAddr = 0; srb->SGToBeXferLen = 0; srb->ScsiPhase = 0; srb->EndMessage = 0; srb->TagNumber = SCSI_NO_TAG; if (dc390_StartSCSI(acb, dcb, srb)) { dc390_Free_insert(acb, srb); goto host_busy; } dc390_Going_append(dcb, srb); return 0; host_busy: return SCSI_MLQUEUE_HOST_BUSY; device_busy: return SCSI_MLQUEUE_DEVICE_BUSY; } static DEF_SCSI_QCMD(DC390_queuecommand) static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) { struct pci_dev *pdev; u16 pstat; if (!pDCB) pDCB = pACB->pActiveDCB; if (!pSRB && pDCB) pSRB = pDCB->pActiveSRB; if (pSRB) { printk ("DC390: SRB: Xferred %08lx, Remain %08lx, State %08x, Phase %02x\n", pSRB->TotalXferredLen, pSRB->SGToBeXferLen, pSRB->SRBState, pSRB->ScsiPhase); printk ("DC390: AdpaterStatus: %02x, SRB Status %02x\n", pSRB->AdaptStatus, pSRB->SRBStatus); } printk ("DC390: Status of last IRQ (DMA/SC/Int/IRQ): %08x\n", dc390_laststatus); printk ("DC390: Register dump: SCSI block:\n"); printk ("DC390: XferCnt Cmd Stat IntS IRQS FFIS Ctl1 Ctl2 Ctl3 Ctl4\n"); printk ("DC390: %06x %02x %02x %02x", DC390_read8(CtcReg_Low) + (DC390_read8(CtcReg_Mid) << 8) + (DC390_read8(CtcReg_High) << 16), DC390_read8(ScsiCmd), DC390_read8(Scsi_Status), DC390_read8(Intern_State)); printk (" %02x %02x %02x %02x %02x %02x\n", DC390_read8(INT_Status), DC390_read8(Current_Fifo), DC390_read8(CtrlReg1), DC390_read8(CtrlReg2), DC390_read8(CtrlReg3), DC390_read8(CtrlReg4)); DC390_write32 (DMA_ScsiBusCtrl, WRT_ERASE_DMA_STAT | EN_INT_ON_PCI_ABORT); if (DC390_read8(Current_Fifo) & 0x1f) { printk ("DC390: FIFO:"); while (DC390_read8(Current_Fifo) & 0x1f) printk (" %02x", DC390_read8(ScsiFifo)); printk ("\n"); } printk ("DC390: Register dump: DMA engine:\n"); printk ("DC390: Cmd STrCnt SBusA WrkBC WrkAC Stat SBusCtrl\n"); printk ("DC390: %02x %08x %08x %08x %08x %02x %08x\n", DC390_read8(DMA_Cmd), DC390_read32(DMA_XferCnt), DC390_read32(DMA_XferAddr), DC390_read32(DMA_Wk_ByteCntr), DC390_read32(DMA_Wk_AddrCntr), DC390_read8(DMA_Status), DC390_read32(DMA_ScsiBusCtrl)); DC390_write32 (DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); pdev = pACB->pdev; pci_read_config_word(pdev, PCI_STATUS, &pstat); printk ("DC390: Register dump: PCI Status: %04x\n", pstat); printk ("DC390: In case of driver trouble read Documentation/scsi/tmscsim.txt\n"); } static int DC390_abort(struct scsi_cmnd *cmd) { struct dc390_acb *pACB = (struct dc390_acb*) cmd->device->host->hostdata; struct dc390_dcb *pDCB = (struct dc390_dcb*) cmd->device->hostdata; scmd_printk(KERN_WARNING, cmd, "DC390: Abort command (pid %li)\n", cmd->serial_number); /* abort() is too stupid for already sent commands at the moment. * If it's called we are in trouble anyway, so let's dump some info * into the syslog at least. (KG, 98/08/20,99/06/20) */ dc390_dumpinfo(pACB, pDCB, NULL); pDCB->DCBFlag |= ABORT_DEV_; printk(KERN_INFO "DC390: Aborted pid %li\n", cmd->serial_number); return FAILED; } static void dc390_ResetDevParam( struct dc390_acb* pACB ) { struct dc390_dcb *pDCB, *pdcb; pDCB = pACB->pLinkDCB; if (! pDCB) return; pdcb = pDCB; do { pDCB->SyncMode &= ~SYNC_NEGO_DONE; pDCB->SyncPeriod = 0; pDCB->SyncOffset = 0; pDCB->TagMask = 0; pDCB->CtrlR3 = FAST_CLK; pDCB->CtrlR4 &= NEGATE_REQACKDATA | CTRL4_RESERVED | NEGATE_REQACK; pDCB->CtrlR4 |= pACB->glitch_cfg; pDCB = pDCB->pNextDCB; } while( pdcb != pDCB ); pACB->ACBFlag &= ~(RESET_DEV | RESET_DONE | RESET_DETECT); } static int DC390_bus_reset (struct scsi_cmnd *cmd) { struct dc390_acb* pACB = (struct dc390_acb*) cmd->device->host->hostdata; u8 bval; spin_lock_irq(cmd->device->host->host_lock); bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST; DC390_write8(CtrlReg1, bval); /* disable IRQ on bus reset */ pACB->ACBFlag |= RESET_DEV; dc390_ResetSCSIBus(pACB); dc390_ResetDevParam(pACB); mdelay(1); pACB->pScsiHost->last_reset = jiffies + 3*HZ/2 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); DC390_read8(INT_Status); /* Reset Pending INT */ dc390_DoingSRB_Done(pACB, cmd); pACB->pActiveDCB = NULL; pACB->ACBFlag = 0; bval = DC390_read8(CtrlReg1) & ~DIS_INT_ON_SCSI_RST; DC390_write8(CtrlReg1, bval); /* re-enable interrupt */ spin_unlock_irq(cmd->device->host->host_lock); return SUCCESS; } /** * dc390_slave_alloc - Called by the scsi mid layer to tell us about a new * scsi device that we need to deal with. * * @scsi_device: The new scsi device that we need to handle. */ static int dc390_slave_alloc(struct scsi_device *scsi_device) { struct dc390_acb *pACB = (struct dc390_acb*) scsi_device->host->hostdata; struct dc390_dcb *pDCB, *pDCB2 = NULL; uint id = scsi_device->id; uint lun = scsi_device->lun; pDCB = kzalloc(sizeof(struct dc390_dcb), GFP_KERNEL); if (!pDCB) return -ENOMEM; if (!pACB->DCBCnt++) { pACB->pLinkDCB = pDCB; pACB->pDCBRunRobin = pDCB; } else { pACB->pLastDCB->pNextDCB = pDCB; } pDCB->pNextDCB = pACB->pLinkDCB; pACB->pLastDCB = pDCB; pDCB->pDCBACB = pACB; pDCB->TargetID = id; pDCB->TargetLUN = lun; /* * Some values are for all LUNs: Copy them * In a clean way: We would have an own structure for a SCSI-ID */ if (lun && (pDCB2 = dc390_findDCB(pACB, id, 0))) { pDCB->DevMode = pDCB2->DevMode; pDCB->SyncMode = pDCB2->SyncMode & SYNC_NEGO_DONE; pDCB->SyncPeriod = pDCB2->SyncPeriod; pDCB->SyncOffset = pDCB2->SyncOffset; pDCB->NegoPeriod = pDCB2->NegoPeriod; pDCB->CtrlR3 = pDCB2->CtrlR3; pDCB->CtrlR4 = pDCB2->CtrlR4; } else { u8 index = pACB->AdapterIndex; PEEprom prom = (PEEprom) &dc390_eepromBuf[index][id << 2]; pDCB->DevMode = prom->EE_MODE1; pDCB->NegoPeriod = (dc390_clock_period1[prom->EE_SPEED] * 25) >> 2; pDCB->CtrlR3 = FAST_CLK; pDCB->CtrlR4 = pACB->glitch_cfg | CTRL4_RESERVED; if (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION) pDCB->CtrlR4 |= NEGATE_REQACKDATA | NEGATE_REQACK; } if (pDCB->DevMode & SYNC_NEGO_) pDCB->SyncMode |= SYNC_ENABLE; else { pDCB->SyncMode = 0; pDCB->SyncOffset &= ~0x0f; } pDCB->CtrlR1 = pACB->pScsiHost->this_id; if (pDCB->DevMode & PARITY_CHK_) pDCB->CtrlR1 |= PARITY_ERR_REPO; pACB->scan_devices = 1; scsi_device->hostdata = pDCB; return 0; } /** * dc390_slave_destroy - Called by the scsi mid layer to tell us about a * device that is going away. * * @scsi_device: The scsi device that we need to remove. */ static void dc390_slave_destroy(struct scsi_device *scsi_device) { struct dc390_acb* pACB = (struct dc390_acb*) scsi_device->host->hostdata; struct dc390_dcb* pDCB = (struct dc390_dcb*) scsi_device->hostdata; struct dc390_dcb* pPrevDCB = pACB->pLinkDCB; pACB->scan_devices = 0; BUG_ON(pDCB->GoingSRBCnt > 1); if (pDCB == pACB->pLinkDCB) { if (pACB->pLastDCB == pDCB) { pDCB->pNextDCB = NULL; pACB->pLastDCB = NULL; } pACB->pLinkDCB = pDCB->pNextDCB; } else { while (pPrevDCB->pNextDCB != pDCB) pPrevDCB = pPrevDCB->pNextDCB; pPrevDCB->pNextDCB = pDCB->pNextDCB; if (pDCB == pACB->pLastDCB) pACB->pLastDCB = pPrevDCB; } if (pDCB == pACB->pActiveDCB) pACB->pActiveDCB = NULL; if (pDCB == pACB->pLinkDCB) pACB->pLinkDCB = pDCB->pNextDCB; if (pDCB == pACB->pDCBRunRobin) pACB->pDCBRunRobin = pDCB->pNextDCB; kfree(pDCB); pACB->DCBCnt--; } static int dc390_slave_configure(struct scsi_device *sdev) { struct dc390_acb *acb = (struct dc390_acb *)sdev->host->hostdata; struct dc390_dcb *dcb = (struct dc390_dcb *)sdev->hostdata; acb->scan_devices = 0; if (sdev->tagged_supported && (dcb->DevMode & TAG_QUEUEING_)) { dcb->SyncMode |= EN_TAG_QUEUEING; scsi_activate_tcq(sdev, acb->TagMaxNum); } return 0; } static struct scsi_host_template driver_template = { .module = THIS_MODULE, .proc_name = "tmscsim", .name = DC390_BANNER " V" DC390_VERSION, .slave_alloc = dc390_slave_alloc, .slave_configure = dc390_slave_configure, .slave_destroy = dc390_slave_destroy, .queuecommand = DC390_queuecommand, .eh_abort_handler = DC390_abort, .eh_bus_reset_handler = DC390_bus_reset, .can_queue = 1, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, .max_sectors = 0x4000, /* 8MiB = 16 * 1024 * 512 */ }; /*********************************************************************** * Functions for access to DC390 EEPROM * and some to emulate it * **********************************************************************/ static void __devinit dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd) { u8 carryFlag = 1, j = 0x80, bval; int i; for (i = 0; i < 9; i++) { if (carryFlag) { pci_write_config_byte(pdev, 0x80, 0x40); bval = 0xc0; } else bval = 0x80; udelay(160); pci_write_config_byte(pdev, 0x80, bval); udelay(160); pci_write_config_byte(pdev, 0x80, 0); udelay(160); carryFlag = (cmd & j) ? 1 : 0; j >>= 1; } } static u16 __devinit dc390_eeprom_get_data(struct pci_dev *pdev) { int i; u16 wval = 0; u8 bval; for (i = 0; i < 16; i++) { wval <<= 1; pci_write_config_byte(pdev, 0x80, 0x80); udelay(160); pci_write_config_byte(pdev, 0x80, 0x40); udelay(160); pci_read_config_byte(pdev, 0x00, &bval); if (bval == 0x22) wval |= 1; } return wval; } static void __devinit dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr) { u8 cmd = EEPROM_READ, i; for (i = 0; i < 0x40; i++) { pci_write_config_byte(pdev, 0xc0, 0); udelay(160); dc390_eeprom_prepare_read(pdev, cmd++); *ptr++ = dc390_eeprom_get_data(pdev); pci_write_config_byte(pdev, 0x80, 0); pci_write_config_byte(pdev, 0x80, 0); udelay(160); } } /* Override EEprom values with explicitly set values */ static void __devinit dc390_eeprom_override(u8 index) { u8 *ptr = (u8 *) dc390_eepromBuf[index], id; /* Adapter Settings */ if (tmscsim[0] != -2) ptr[EE_ADAPT_SCSI_ID] = (u8)tmscsim[0]; /* Adapter ID */ if (tmscsim[3] != -2) ptr[EE_MODE2] = (u8)tmscsim[3]; if (tmscsim[5] != -2) ptr[EE_DELAY] = tmscsim[5]; /* Reset delay */ if (tmscsim[4] != -2) ptr[EE_TAG_CMD_NUM] = (u8)tmscsim[4]; /* Tagged Cmds */ /* Device Settings */ for (id = 0; id < MAX_SCSI_ID; id++) { if (tmscsim[2] != -2) ptr[id << 2] = (u8)tmscsim[2]; /* EE_MODE1 */ if (tmscsim[1] != -2) ptr[(id << 2) + 1] = (u8)tmscsim[1]; /* EE_Speed */ } } static int __devinitdata tmscsim_def[] = { 7, 0 /* 10MHz */, PARITY_CHK_ | SEND_START_ | EN_DISCONNECT_ | SYNC_NEGO_ | TAG_QUEUEING_, MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION | LUN_CHECK, 3 /* 16 Tags per LUN */, 1 /* s delay after Reset */, }; /* Copy defaults over set values where missing */ static void __devinit dc390_fill_with_defaults (void) { int i; for (i = 0; i < 6; i++) { if (tmscsim[i] < 0 || tmscsim[i] > 255) tmscsim[i] = tmscsim_def[i]; } /* Sanity checks */ if (tmscsim[0] > 7) tmscsim[0] = 7; if (tmscsim[1] > 7) tmscsim[1] = 4; if (tmscsim[4] > 5) tmscsim[4] = 4; if (tmscsim[5] > 180) tmscsim[5] = 180; } static void __devinit dc390_check_eeprom(struct pci_dev *pdev, u8 index) { u8 interpd[] = {1, 3, 5, 10, 16, 30, 60, 120}; u8 EEbuf[128]; u16 *ptr = (u16 *)EEbuf, wval = 0; int i; dc390_read_eeprom(pdev, ptr); memcpy(dc390_eepromBuf[index], EEbuf, EE_ADAPT_SCSI_ID); memcpy(&dc390_eepromBuf[index][EE_ADAPT_SCSI_ID], &EEbuf[REAL_EE_ADAPT_SCSI_ID], EE_LEN - EE_ADAPT_SCSI_ID); dc390_eepromBuf[index][EE_DELAY] = interpd[dc390_eepromBuf[index][EE_DELAY]]; for (i = 0; i < 0x40; i++, ptr++) wval += *ptr; /* no Tekram EEprom found */ if (wval != 0x1234) { int speed; printk(KERN_INFO "DC390_init: No EEPROM found! Trying default settings ...\n"); /* * XXX(hch): bogus, because we might have tekram and * non-tekram hbas in a single machine. */ dc390_fill_with_defaults(); speed = dc390_clock_speed[tmscsim[1]]; printk(KERN_INFO "DC390: Used defaults: AdaptID=%i, SpeedIdx=%i (%i.%i MHz), " "DevMode=0x%02x, AdaptMode=0x%02x, TaggedCmnds=%i (%i), DelayReset=%is\n", tmscsim[0], tmscsim[1], speed / 10, speed % 10, (u8)tmscsim[2], (u8)tmscsim[3], tmscsim[4], 2 << (tmscsim[4]), tmscsim[5]); } } static void __devinit dc390_init_hw(struct dc390_acb *pACB, u8 index) { struct Scsi_Host *shost = pACB->pScsiHost; u8 dstate; /* Disable SCSI bus reset interrupt */ DC390_write8(CtrlReg1, DIS_INT_ON_SCSI_RST | shost->this_id); if (pACB->Gmode2 & RST_SCSI_BUS) { dc390_ResetSCSIBus(pACB); udelay(1000); shost->last_reset = jiffies + HZ/2 + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; } pACB->ACBFlag = 0; /* Reset Pending INT */ DC390_read8(INT_Status); /* 250ms selection timeout */ DC390_write8(Scsi_TimeOut, SEL_TIMEOUT); /* Conversion factor = 0 , 40MHz clock */ DC390_write8(Clk_Factor, CLK_FREQ_40MHZ); /* NOP cmd - clear command register */ DC390_write8(ScsiCmd, NOP_CMD); /* Enable Feature and SCSI-2 */ DC390_write8(CtrlReg2, EN_FEATURE+EN_SCSI2_CMD); /* Fast clock */ DC390_write8(CtrlReg3, FAST_CLK); /* Negation */ DC390_write8(CtrlReg4, pACB->glitch_cfg | /* glitch eater */ (dc390_eepromBuf[index][EE_MODE2] & ACTIVE_NEGATION) ? NEGATE_REQACKDATA : 0); /* Clear Transfer Count High: ID */ DC390_write8(CtcReg_High, 0); DC390_write8(DMA_Cmd, DMA_IDLE_CMD); DC390_write8(ScsiCmd, CLEAR_FIFO_CMD); DC390_write32(DMA_ScsiBusCtrl, EN_INT_ON_PCI_ABORT); dstate = DC390_read8(DMA_Status); DC390_write8(DMA_Status, dstate); } static int __devinit dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct dc390_acb *pACB; struct Scsi_Host *shost; unsigned long io_port; int error = -ENODEV, i; if (pci_enable_device(pdev)) goto out; pci_set_master(pdev); error = -ENOMEM; if (disable_clustering) driver_template.use_clustering = DISABLE_CLUSTERING; shost = scsi_host_alloc(&driver_template, sizeof(struct dc390_acb)); if (!shost) goto out_disable_device; pACB = (struct dc390_acb *)shost->hostdata; memset(pACB, 0, sizeof(struct dc390_acb)); dc390_check_eeprom(pdev, dc390_adapterCnt); dc390_eeprom_override(dc390_adapterCnt); io_port = pci_resource_start(pdev, 0); shost->this_id = dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID]; shost->io_port = io_port; shost->n_io_port = 0x80; shost->irq = pdev->irq; shost->base = io_port; shost->unique_id = io_port; shost->last_reset = jiffies; pACB->pScsiHost = shost; pACB->IOPortBase = (u16) io_port; pACB->IRQLevel = pdev->irq; shost->max_id = 8; if (shost->max_id - 1 == dc390_eepromBuf[dc390_adapterCnt][EE_ADAPT_SCSI_ID]) shost->max_id--; if (dc390_eepromBuf[dc390_adapterCnt][EE_MODE2] & LUN_CHECK) shost->max_lun = 8; else shost->max_lun = 1; pACB->pFreeSRB = pACB->SRB_array; pACB->SRBCount = MAX_SRB_CNT; pACB->AdapterIndex = dc390_adapterCnt; pACB->TagMaxNum = 2 << dc390_eepromBuf[dc390_adapterCnt][EE_TAG_CMD_NUM]; pACB->Gmode2 = dc390_eepromBuf[dc390_adapterCnt][EE_MODE2]; for (i = 0; i < pACB->SRBCount-1; i++) pACB->SRB_array[i].pNextSRB = &pACB->SRB_array[i+1]; pACB->SRB_array[pACB->SRBCount-1].pNextSRB = NULL; pACB->pTmpSRB = &pACB->TmpSRB; pACB->sel_timeout = SEL_TIMEOUT; pACB->glitch_cfg = EATER_25NS; pACB->pdev = pdev; if (!request_region(io_port, shost->n_io_port, "tmscsim")) { printk(KERN_ERR "DC390: register IO ports error!\n"); goto out_host_put; } /* Reset Pending INT */ DC390_read8_(INT_Status, io_port); if (request_irq(pdev->irq, do_DC390_Interrupt, IRQF_SHARED, "tmscsim", pACB)) { printk(KERN_ERR "DC390: register IRQ error!\n"); goto out_release_region; } dc390_init_hw(pACB, dc390_adapterCnt); dc390_adapterCnt++; pci_set_drvdata(pdev, shost); error = scsi_add_host(shost, &pdev->dev); if (error) goto out_free_irq; scsi_scan_host(shost); return 0; out_free_irq: free_irq(pdev->irq, pACB); out_release_region: release_region(io_port, shost->n_io_port); out_host_put: scsi_host_put(shost); out_disable_device: pci_disable_device(pdev); out: return error; } /** * dc390_remove_one - Called to remove a single instance of the adapter. * * @dev: The PCI device to remove. */ static void __devexit dc390_remove_one(struct pci_dev *dev) { struct Scsi_Host *scsi_host = pci_get_drvdata(dev); unsigned long iflags; struct dc390_acb* pACB = (struct dc390_acb*) scsi_host->hostdata; u8 bval; scsi_remove_host(scsi_host); spin_lock_irqsave(scsi_host->host_lock, iflags); pACB->ACBFlag = RESET_DEV; bval = DC390_read8(CtrlReg1) | DIS_INT_ON_SCSI_RST; DC390_write8 (CtrlReg1, bval); /* disable interrupt */ if (pACB->Gmode2 & RST_SCSI_BUS) dc390_ResetSCSIBus(pACB); spin_unlock_irqrestore(scsi_host->host_lock, iflags); free_irq(scsi_host->irq, pACB); release_region(scsi_host->io_port, scsi_host->n_io_port); pci_disable_device(dev); scsi_host_put(scsi_host); pci_set_drvdata(dev, NULL); } static struct pci_device_id tmscsim_pci_tbl[] = { { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD53C974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { } }; MODULE_DEVICE_TABLE(pci, tmscsim_pci_tbl); static struct pci_driver dc390_driver = { .name = "tmscsim", .id_table = tmscsim_pci_tbl, .probe = dc390_probe_one, .remove = __devexit_p(dc390_remove_one), }; static int __init dc390_module_init(void) { if (!disable_clustering) { printk(KERN_INFO "DC390: clustering now enabled by default. If you get problems load\n"); printk(KERN_INFO " with \"disable_clustering=1\" and report to maintainers\n"); } if (tmscsim[0] == -1 || tmscsim[0] > 15) { tmscsim[0] = 7; tmscsim[1] = 4; tmscsim[2] = PARITY_CHK_ | TAG_QUEUEING_; tmscsim[3] = MORE2_DRV | GREATER_1G | RST_SCSI_BUS | ACTIVE_NEGATION; tmscsim[4] = 2; tmscsim[5] = 10; printk (KERN_INFO "DC390: Using safe settings.\n"); } return pci_register_driver(&dc390_driver); } static void __exit dc390_module_exit(void) { pci_unregister_driver(&dc390_driver); } module_init(dc390_module_init); module_exit(dc390_module_exit); #ifndef MODULE static int __init dc390_setup (char *str) { int ints[8],i, im; get_options(str, ARRAY_SIZE(ints), ints); im = ints[0]; if (im > 6) { printk (KERN_NOTICE "DC390: ignore extra params!\n"); im = 6; } for (i = 0; i < im; i++) tmscsim[i] = ints[i+1]; /* dc390_checkparams (); */ return 1; } __setup("tmscsim=", dc390_setup); #endif
gpl-2.0
faux123/Endeavoru
fs/ceph/locks.c
304
7645
#include <linux/ceph/ceph_debug.h> #include <linux/file.h> #include <linux/namei.h> #include "super.h" #include "mds_client.h" #include <linux/ceph/pagelist.h> /** * Implement fcntl and flock locking functions. */ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, int cmd, u8 wait, struct file_lock *fl) { struct inode *inode = file->f_dentry->d_inode; struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_mds_request *req; int err; u64 length = 0; req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); req->r_inode = igrab(inode); /* mds requires start and length rather than start and end */ if (LLONG_MAX == fl->fl_end) length = 0; else length = fl->fl_end - fl->fl_start + 1; dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " "length: %llu, wait: %d, type`: %d", (int)lock_type, (int)operation, (u64)fl->fl_pid, fl->fl_start, length, wait, fl->fl_type); req->r_args.filelock_change.rule = lock_type; req->r_args.filelock_change.type = cmd; req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); /* This should be adjusted, but I'm not sure if namespaces actually get id numbers*/ req->r_args.filelock_change.pid_namespace = cpu_to_le64((u64)(unsigned long)fl->fl_nspid); req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); req->r_args.filelock_change.length = cpu_to_le64(length); req->r_args.filelock_change.wait = wait; err = ceph_mdsc_do_request(mdsc, inode, req); if ( operation == CEPH_MDS_OP_GETFILELOCK){ fl->fl_pid = le64_to_cpu(req->r_reply_info.filelock_reply->pid); if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) fl->fl_type = F_RDLCK; else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type) fl->fl_type = F_WRLCK; else fl->fl_type = F_UNLCK; fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start); length = le64_to_cpu(req->r_reply_info.filelock_reply->start) + le64_to_cpu(req->r_reply_info.filelock_reply->length); if (length >= 1) fl->fl_end = length -1; else fl->fl_end = 0; } ceph_mdsc_put_request(req); dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " "length: %llu, wait: %d, type`: %d, err code %d", (int)lock_type, (int)operation, (u64)fl->fl_pid, fl->fl_start, length, wait, fl->fl_type, err); return err; } /** * Attempt to set an fcntl lock. * For now, this just goes away to the server. Later it may be more awesome. */ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 0; u16 op = CEPH_MDS_OP_SETFILELOCK; fl->fl_nspid = get_pid(task_tgid(current)); dout("ceph_lock, fl_pid:%d", fl->fl_pid); /* set wait bit as appropriate, then make command as Ceph expects it*/ if (F_SETLKW == cmd) wait = 1; if (F_GETLK == cmd) op = CEPH_MDS_OP_GETFILELOCK; if (F_RDLCK == fl->fl_type) lock_cmd = CEPH_LOCK_SHARED; else if (F_WRLCK == fl->fl_type) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl); if (!err) { if ( op != CEPH_MDS_OP_GETFILELOCK ){ dout("mds locked, locking locally"); err = posix_lock_file(file, fl, NULL); if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { /* undo! This should only happen if the kernel detects * local deadlock. */ ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on posix_lock_file, undid lock", err); } } } else { dout("mds returned error code %d", err); } return err; } int ceph_flock(struct file *file, int cmd, struct file_lock *fl) { u8 lock_cmd; int err; u8 wait = 1; fl->fl_nspid = get_pid(task_tgid(current)); dout("ceph_flock, fl_pid:%d", fl->fl_pid); /* set wait bit, then clear it out of cmd*/ if (cmd & LOCK_NB) wait = 0; cmd = cmd & (LOCK_SH | LOCK_EX | LOCK_UN); /* set command sequence that Ceph wants to see: shared lock, exclusive lock, or unlock */ if (LOCK_SH == cmd) lock_cmd = CEPH_LOCK_SHARED; else if (LOCK_EX == cmd) lock_cmd = CEPH_LOCK_EXCL; else lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, lock_cmd, wait, fl); if (!err) { err = flock_lock_file_wait(file, fl); if (err) { ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on flock_lock_file_wait, undid lock", err); } } else { dout("mds error code %d", err); } return err; } /** * Must be called with BKL already held. Fills in the passed * counter variables, so you can prepare pagelist metadata before calling * ceph_encode_locks. */ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) { struct file_lock *lock; *fcntl_count = 0; *flock_count = 0; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_POSIX) ++(*fcntl_count); else if (lock->fl_flags & FL_FLOCK) ++(*flock_count); } dout("counted %d flock locks and %d fcntl locks", *flock_count, *fcntl_count); } /** * Encode the flock and fcntl locks for the given inode into the pagelist. * Format is: #fcntl locks, sequential fcntl locks, #flock locks, * sequential flock locks. * Must be called with lock_flocks() already held. * If we encounter more of a specific lock type than expected, * we return the value 1. */ int ceph_encode_locks(struct inode *inode, struct ceph_pagelist *pagelist, int num_fcntl_locks, int num_flock_locks) { struct file_lock *lock; struct ceph_filelock cephlock; int err = 0; int seen_fcntl = 0; int seen_flock = 0; dout("encoding %d flock and %d fcntl locks", num_flock_locks, num_fcntl_locks); err = ceph_pagelist_append(pagelist, &num_fcntl_locks, sizeof(u32)); if (err) goto fail; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_POSIX) { ++seen_fcntl; if (seen_fcntl > num_fcntl_locks) { err = -ENOSPC; goto fail; } err = lock_to_ceph_filelock(lock, &cephlock); if (err) goto fail; err = ceph_pagelist_append(pagelist, &cephlock, sizeof(struct ceph_filelock)); } if (err) goto fail; } err = ceph_pagelist_append(pagelist, &num_flock_locks, sizeof(u32)); if (err) goto fail; for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { if (lock->fl_flags & FL_FLOCK) { ++seen_flock; if (seen_flock > num_flock_locks) { err = -ENOSPC; goto fail; } err = lock_to_ceph_filelock(lock, &cephlock); if (err) goto fail; err = ceph_pagelist_append(pagelist, &cephlock, sizeof(struct ceph_filelock)); } if (err) goto fail; } fail: return err; } /* * Given a pointer to a lock, convert it to a ceph filelock */ int lock_to_ceph_filelock(struct file_lock *lock, struct ceph_filelock *cephlock) { int err = 0; cephlock->start = cpu_to_le64(lock->fl_start); cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); cephlock->client = cpu_to_le64(0); cephlock->pid = cpu_to_le64(lock->fl_pid); cephlock->pid_namespace = cpu_to_le64((u64)(unsigned long)lock->fl_nspid); switch (lock->fl_type) { case F_RDLCK: cephlock->type = CEPH_LOCK_SHARED; break; case F_WRLCK: cephlock->type = CEPH_LOCK_EXCL; break; case F_UNLCK: cephlock->type = CEPH_LOCK_UNLOCK; break; default: dout("Have unknown lock type %d", lock->fl_type); err = -EINVAL; } return err; }
gpl-2.0
schlund/nerdey-kernel
drivers/staging/rtl8192e/ieee80211/ieee80211_crypt_ccmp.c
560
13021
/* * Host AP crypt: host-based CCMP encryption implementation for Host AP driver * * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ //#include <linux/config.h> #include <linux/version.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <asm/string.h> #include <linux/wireless.h> #include "ieee80211.h" #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) #include "rtl_crypto.h" #else #include <linux/crypto.h> #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) #include <asm/scatterlist.h> #else #include <linux/scatterlist.h> #endif //#include <asm/scatterlist.h> MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Host AP crypt: CCMP"); MODULE_LICENSE("GPL"); #ifndef OPENSUSE_SLED #define OPENSUSE_SLED 0 #endif #define AES_BLOCK_LEN 16 #define CCMP_HDR_LEN 8 #define CCMP_MIC_LEN 8 #define CCMP_TK_LEN 16 #define CCMP_PN_LEN 6 struct ieee80211_ccmp_data { u8 key[CCMP_TK_LEN]; int key_set; u8 tx_pn[CCMP_PN_LEN]; u8 rx_pn[CCMP_PN_LEN]; u32 dot11RSNAStatsCCMPFormatErrors; u32 dot11RSNAStatsCCMPReplays; u32 dot11RSNAStatsCCMPDecryptErrors; int key_idx; struct crypto_tfm *tfm; /* scratch buffers for virt_to_page() (crypto API) */ u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN], tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN]; u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN]; }; void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm, const u8 pt[16], u8 ct[16]) { #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) struct scatterlist src, dst; src.page = virt_to_page(pt); src.offset = offset_in_page(pt); src.length = AES_BLOCK_LEN; dst.page = virt_to_page(ct); dst.offset = offset_in_page(ct); dst.length = AES_BLOCK_LEN; crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN); #else crypto_cipher_encrypt_one((void*)tfm, ct, pt); #endif } static void * ieee80211_ccmp_init(int key_idx) { struct ieee80211_ccmp_data *priv; priv = kmalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; memset(priv, 0, sizeof(*priv)); priv->key_idx = key_idx; #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) priv->tfm = crypto_alloc_tfm("aes", 0); if (priv->tfm == NULL) { printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate " "crypto API aes\n"); goto fail; } #else priv->tfm = (void*)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tfm)) { printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate " "crypto API aes\n"); priv->tfm = NULL; goto fail; } #endif return priv; fail: if (priv) { if (priv->tfm) #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) crypto_free_tfm(priv->tfm); #else crypto_free_cipher((void*)priv->tfm); #endif kfree(priv); } return NULL; } static void ieee80211_ccmp_deinit(void *priv) { struct ieee80211_ccmp_data *_priv = priv; if (_priv && _priv->tfm) #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) crypto_free_tfm(_priv->tfm); #else crypto_free_cipher((void*)_priv->tfm); #endif kfree(priv); } static inline void xor_block(u8 *b, u8 *a, size_t len) { int i; for (i = 0; i < len; i++) b[i] ^= a[i]; } static void ccmp_init_blocks(struct crypto_tfm *tfm, struct ieee80211_hdr_4addr *hdr, u8 *pn, size_t dlen, u8 *b0, u8 *auth, u8 *s0) { u8 *pos, qc = 0; size_t aad_len; u16 fc; int a4_included, qc_included; u8 aad[2 * AES_BLOCK_LEN]; fc = le16_to_cpu(hdr->frame_ctl); a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)); /* qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && (WLAN_FC_GET_STYPE(fc) & 0x08)); */ // fixed by David :2006.9.6 qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) && (WLAN_FC_GET_STYPE(fc) & 0x80)); aad_len = 22; if (a4_included) aad_len += 6; if (qc_included) { pos = (u8 *) &hdr->addr4; if (a4_included) pos += 6; qc = *pos & 0x0f; aad_len += 2; } /* CCM Initial Block: * Flag (Include authentication header, M=3 (8-octet MIC), * L=1 (2-octet Dlen)) * Nonce: 0x00 | A2 | PN * Dlen */ b0[0] = 0x59; b0[1] = qc; memcpy(b0 + 2, hdr->addr2, ETH_ALEN); memcpy(b0 + 8, pn, CCMP_PN_LEN); b0[14] = (dlen >> 8) & 0xff; b0[15] = dlen & 0xff; /* AAD: * FC with bits 4..6 and 11..13 masked to zero; 14 is always one * A1 | A2 | A3 * SC with bits 4..15 (seq#) masked to zero * A4 (if present) * QC (if present) */ pos = (u8 *) hdr; aad[0] = 0; /* aad_len >> 8 */ aad[1] = aad_len & 0xff; aad[2] = pos[0] & 0x8f; aad[3] = pos[1] & 0xc7; memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN); pos = (u8 *) &hdr->seq_ctl; aad[22] = pos[0] & 0x0f; aad[23] = 0; /* all bits masked */ memset(aad + 24, 0, 8); if (a4_included) memcpy(aad + 24, hdr->addr4, ETH_ALEN); if (qc_included) { aad[a4_included ? 30 : 24] = qc; /* rest of QC masked */ } /* Start with the first block and AAD */ ieee80211_ccmp_aes_encrypt(tfm, b0, auth); xor_block(auth, aad, AES_BLOCK_LEN); ieee80211_ccmp_aes_encrypt(tfm, auth, auth); xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN); ieee80211_ccmp_aes_encrypt(tfm, auth, auth); b0[0] &= 0x07; b0[14] = b0[15] = 0; ieee80211_ccmp_aes_encrypt(tfm, b0, s0); } static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; int data_len, i; u8 *pos; struct ieee80211_hdr_4addr *hdr; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); if (skb_headroom(skb) < CCMP_HDR_LEN || skb_tailroom(skb) < CCMP_MIC_LEN || skb->len < hdr_len) return -1; data_len = skb->len - hdr_len; pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdr_len); pos += hdr_len; // mic = skb_put(skb, CCMP_MIC_LEN); i = CCMP_PN_LEN - 1; while (i >= 0) { key->tx_pn[i]++; if (key->tx_pn[i] != 0) break; i--; } *pos++ = key->tx_pn[5]; *pos++ = key->tx_pn[4]; *pos++ = 0; *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */; *pos++ = key->tx_pn[3]; *pos++ = key->tx_pn[2]; *pos++ = key->tx_pn[1]; *pos++ = key->tx_pn[0]; hdr = (struct ieee80211_hdr_4addr *) skb->data; if (!tcb_desc->bHwSec) { int blocks, last, len; u8 *mic; u8 *b0 = key->tx_b0; u8 *b = key->tx_b; u8 *e = key->tx_e; u8 *s0 = key->tx_s0; //mic is moved to here by john mic = skb_put(skb, CCMP_MIC_LEN); ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Authentication */ xor_block(b, pos, len); ieee80211_ccmp_aes_encrypt(key->tfm, b, b); /* Encryption, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; ieee80211_ccmp_aes_encrypt(key->tfm, b0, e); xor_block(pos, e, len); pos += len; } for (i = 0; i < CCMP_MIC_LEN; i++) mic[i] = b[i] ^ s0[i]; } return 0; } static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_ccmp_data *key = priv; u8 keyidx, *pos; struct ieee80211_hdr_4addr *hdr; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); u8 pn[6]; if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) { key->dot11RSNAStatsCCMPFormatErrors++; return -1; } hdr = (struct ieee80211_hdr_4addr *) skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: received packet without ExtIV" " flag from " MAC_FMT "\n", MAC_ARG(hdr->addr2)); } key->dot11RSNAStatsCCMPFormatErrors++; return -2; } keyidx >>= 6; if (key->key_idx != keyidx) { printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame " "keyidx=%d priv=%p\n", key->key_idx, keyidx, priv); return -6; } if (!key->key_set) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: received packet from " MAC_FMT " with keyid=%d that does not have a configured" " key\n", MAC_ARG(hdr->addr2), keyidx); } return -3; } pn[0] = pos[7]; pn[1] = pos[6]; pn[2] = pos[5]; pn[3] = pos[4]; pn[4] = pos[1]; pn[5] = pos[0]; pos += 8; if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: replay detected: STA=" MAC_FMT " previous PN %02x%02x%02x%02x%02x%02x " "received PN %02x%02x%02x%02x%02x%02x\n", MAC_ARG(hdr->addr2), MAC_ARG(key->rx_pn), MAC_ARG(pn)); } key->dot11RSNAStatsCCMPReplays++; return -4; } if (!tcb_desc->bHwSec) { size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN; u8 *mic = skb->data + skb->len - CCMP_MIC_LEN; u8 *b0 = key->rx_b0; u8 *b = key->rx_b; u8 *a = key->rx_a; int i, blocks, last, len; ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b); xor_block(mic, b, CCMP_MIC_LEN); blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN; last = data_len % AES_BLOCK_LEN; for (i = 1; i <= blocks; i++) { len = (i == blocks && last) ? last : AES_BLOCK_LEN; /* Decrypt, with counter */ b0[14] = (i >> 8) & 0xff; b0[15] = i & 0xff; ieee80211_ccmp_aes_encrypt(key->tfm, b0, b); xor_block(pos, b, len); /* Authentication */ xor_block(a, pos, len); ieee80211_ccmp_aes_encrypt(key->tfm, a, a); pos += len; } if (memcmp(mic, a, CCMP_MIC_LEN) != 0) { if (net_ratelimit()) { printk(KERN_DEBUG "CCMP: decrypt failed: STA=" MAC_FMT "\n", MAC_ARG(hdr->addr2)); } key->dot11RSNAStatsCCMPDecryptErrors++; return -5; } memcpy(key->rx_pn, pn, CCMP_PN_LEN); } /* Remove hdr and MIC */ memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len); skb_pull(skb, CCMP_HDR_LEN); skb_trim(skb, skb->len - CCMP_MIC_LEN); return keyidx; } static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv) { struct ieee80211_ccmp_data *data = priv; int keyidx; struct crypto_tfm *tfm = data->tfm; keyidx = data->key_idx; memset(data, 0, sizeof(*data)); data->key_idx = keyidx; data->tfm = tfm; if (len == CCMP_TK_LEN) { memcpy(data->key, key, CCMP_TK_LEN); data->key_set = 1; if (seq) { data->rx_pn[0] = seq[5]; data->rx_pn[1] = seq[4]; data->rx_pn[2] = seq[3]; data->rx_pn[3] = seq[2]; data->rx_pn[4] = seq[1]; data->rx_pn[5] = seq[0]; } crypto_cipher_setkey((void*)data->tfm, data->key, CCMP_TK_LEN); } else if (len == 0) data->key_set = 0; else return -1; return 0; } static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv) { struct ieee80211_ccmp_data *data = priv; if (len < CCMP_TK_LEN) return -1; if (!data->key_set) return 0; memcpy(key, data->key, CCMP_TK_LEN); if (seq) { seq[0] = data->tx_pn[5]; seq[1] = data->tx_pn[4]; seq[2] = data->tx_pn[3]; seq[3] = data->tx_pn[2]; seq[4] = data->tx_pn[1]; seq[5] = data->tx_pn[0]; } return CCMP_TK_LEN; } static char * ieee80211_ccmp_print_stats(char *p, void *priv) { struct ieee80211_ccmp_data *ccmp = priv; p += sprintf(p, "key[%d] alg=CCMP key_set=%d " "tx_pn=%02x%02x%02x%02x%02x%02x " "rx_pn=%02x%02x%02x%02x%02x%02x " "format_errors=%d replays=%d decrypt_errors=%d\n", ccmp->key_idx, ccmp->key_set, MAC_ARG(ccmp->tx_pn), MAC_ARG(ccmp->rx_pn), ccmp->dot11RSNAStatsCCMPFormatErrors, ccmp->dot11RSNAStatsCCMPReplays, ccmp->dot11RSNAStatsCCMPDecryptErrors); return p; } void ieee80211_ccmp_null(void) { // printk("============>%s()\n", __FUNCTION__); return; } static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = { .name = "CCMP", .init = ieee80211_ccmp_init, .deinit = ieee80211_ccmp_deinit, .encrypt_mpdu = ieee80211_ccmp_encrypt, .decrypt_mpdu = ieee80211_ccmp_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = ieee80211_ccmp_set_key, .get_key = ieee80211_ccmp_get_key, .print_stats = ieee80211_ccmp_print_stats, .extra_prefix_len = CCMP_HDR_LEN, .extra_postfix_len = CCMP_MIC_LEN, .owner = THIS_MODULE, }; int __init ieee80211_crypto_ccmp_init(void) { return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp); } void __exit ieee80211_crypto_ccmp_exit(void) { ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp); } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) //EXPORT_SYMBOL(ieee80211_ccmp_null); #else EXPORT_SYMBOL_NOVERS(ieee80211_ccmp_null); #endif //module_init(ieee80211_crypto_ccmp_init); //module_exit(ieee80211_crypto_ccmp_exit);
gpl-2.0
t0mm13b/ics4blade_kernel
drivers/scsi/bfa/scn.c
816
11496
/* * Copyright (c) 2005-2009 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <bfa.h> #include <bfa_svc.h> #include "fcs_lport.h" #include "fcs_rport.h" #include "fcs_ms.h" #include "fcs_trcmod.h" #include "fcs_fcxp.h" #include "fcs.h" #include "lport_priv.h" BFA_TRC_FILE(FCS, SCN); #define FC_QOS_RSCN_EVENT 0x0c #define FC_FABRIC_NAME_RSCN_EVENT 0x0d /* * forward declarations */ static void bfa_fcs_port_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_port_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs); static void bfa_fcs_port_scn_timeout(void *arg); /** * fcs_scm_sm FCS SCN state machine */ /** * VPort SCN State Machine events */ enum port_scn_event { SCNSM_EVENT_PORT_ONLINE = 1, SCNSM_EVENT_PORT_OFFLINE = 2, SCNSM_EVENT_RSP_OK = 3, SCNSM_EVENT_RSP_ERROR = 4, SCNSM_EVENT_TIMEOUT = 5, SCNSM_EVENT_SCR_SENT = 6, }; static void bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event); static void bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event); static void bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event); static void bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event); static void bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event); /** * Starting state - awaiting link up. */ static void bfa_fcs_port_scn_sm_offline(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event) { switch (event) { case SCNSM_EVENT_PORT_ONLINE: bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr); bfa_fcs_port_scn_send_scr(scn, NULL); break; case SCNSM_EVENT_PORT_OFFLINE: break; default: bfa_sm_fault(scn->port->fcs, event); } } static void bfa_fcs_port_scn_sm_sending_scr(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event) { switch (event) { case SCNSM_EVENT_SCR_SENT: bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr); break; case SCNSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe); break; default: bfa_sm_fault(scn->port->fcs, event); } } static void bfa_fcs_port_scn_sm_scr(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event) { struct bfa_fcs_port_s *port = scn->port; switch (event) { case SCNSM_EVENT_RSP_OK: bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_online); break; case SCNSM_EVENT_RSP_ERROR: bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_scr_retry); bfa_timer_start(port->fcs->bfa, &scn->timer, bfa_fcs_port_scn_timeout, scn, BFA_FCS_RETRY_TIMEOUT); break; case SCNSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); bfa_fcxp_discard(scn->fcxp); break; default: bfa_sm_fault(scn->port->fcs, event); } } static void bfa_fcs_port_scn_sm_scr_retry(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event) { switch (event) { case SCNSM_EVENT_TIMEOUT: bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_sending_scr); bfa_fcs_port_scn_send_scr(scn, NULL); break; case SCNSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); bfa_timer_stop(&scn->timer); break; default: bfa_sm_fault(scn->port->fcs, event); } } static void bfa_fcs_port_scn_sm_online(struct bfa_fcs_port_scn_s *scn, enum port_scn_event event) { switch (event) { case SCNSM_EVENT_PORT_OFFLINE: bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); break; default: bfa_sm_fault(scn->port->fcs, event); } } /** * fcs_scn_private FCS SCN private functions */ /** * This routine will be called to send a SCR command. */ static void bfa_fcs_port_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_port_scn_s *scn = scn_cbarg; struct bfa_fcs_port_s *port = scn->port; struct fchs_s fchs; int len; struct bfa_fcxp_s *fcxp; bfa_trc(port->fcs, port->pid); bfa_trc(port->fcs, port->port_cfg.pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) { bfa_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe, bfa_fcs_port_scn_send_scr, scn); return; } scn->fcxp = fcxp; /* * Handle VU registrations for Base port only */ if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) { len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), bfa_lps_is_brcd_fabric(port->fabric->lps), port->pid, 0); } else { len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), BFA_FALSE, port->pid, 0); } bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_port_scn_scr_response, (void *)scn, FC_MAX_PDUSZ, FC_RA_TOV); bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT); } static void bfa_fcs_port_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)cbarg; struct bfa_fcs_port_s *port = scn->port; struct fc_els_cmd_s *els_cmd; struct fc_ls_rjt_s *ls_rjt; bfa_trc(port->fcs, port->port_cfg.pwwn); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { bfa_trc(port->fcs, req_status); bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); return; } els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); switch (els_cmd->els_code) { case FC_ELS_ACC: bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK); break; case FC_ELS_LS_RJT: ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(port->fcs, ls_rjt->reason_code); bfa_trc(port->fcs, ls_rjt->reason_code_expl); bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); break; default: bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR); } } /* * Send a LS Accept */ static void bfa_fcs_port_scn_send_ls_acc(struct bfa_fcs_port_s *port, struct fchs_s *rx_fchs) { struct fchs_s fchs; struct bfa_fcxp_s *fcxp; struct bfa_rport_s *bfa_rport = NULL; int len; bfa_trc(port->fcs, rx_fchs->s_id); fcxp = bfa_fcs_fcxp_alloc(port->fcs); if (!fcxp) return; len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rx_fchs->s_id, bfa_fcs_port_get_fcid(port), rx_fchs->ox_id); bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } /** * This routine will be called by bfa_timer on timer timeouts. * * param[in] vport - pointer to bfa_fcs_port_t. * param[out] vport_status - pointer to return vport status in * * return * void * * Special Considerations: * * note */ static void bfa_fcs_port_scn_timeout(void *arg) { struct bfa_fcs_port_scn_s *scn = (struct bfa_fcs_port_scn_s *)arg; bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT); } /** * fcs_scn_public FCS state change notification public interfaces */ /* * Functions called by port/fab */ void bfa_fcs_port_scn_init(struct bfa_fcs_port_s *port) { struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); scn->port = port; bfa_sm_set_state(scn, bfa_fcs_port_scn_sm_offline); } void bfa_fcs_port_scn_offline(struct bfa_fcs_port_s *port) { struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); scn->port = port; bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE); } void bfa_fcs_port_scn_online(struct bfa_fcs_port_s *port) { struct bfa_fcs_port_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port); scn->port = port; bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE); } static void bfa_fcs_port_scn_portid_rscn(struct bfa_fcs_port_s *port, u32 rpid) { struct bfa_fcs_rport_s *rport; bfa_trc(port->fcs, rpid); /** * If this is an unknown device, then it just came online. * Otherwise let rport handle the RSCN event. */ rport = bfa_fcs_port_get_rport_by_pid(port, rpid); if (rport == NULL) { /* * If min cfg mode is enabled, we donot need to * discover any new rports. */ if (!__fcs_min_cfg(port->fcs)) rport = bfa_fcs_rport_create(port, rpid); } else { bfa_fcs_rport_scn(rport); } } /** * rscn format based PID comparison */ #define __fc_pid_match(__c0, __c1, __fmt) \ (((__fmt) == FC_RSCN_FORMAT_FABRIC) || \ (((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \ ((__c0)[0] == (__c1)[0])) || \ (((__fmt) == FC_RSCN_FORMAT_AREA) && \ ((__c0)[0] == (__c1)[0]) && \ ((__c0)[1] == (__c1)[1]))) static void bfa_fcs_port_scn_multiport_rscn(struct bfa_fcs_port_s *port, enum fc_rscn_format format, u32 rscn_pid) { struct bfa_fcs_rport_s *rport; struct list_head *qe, *qe_next; u8 *c0, *c1; bfa_trc(port->fcs, format); bfa_trc(port->fcs, rscn_pid); c0 = (u8 *) &rscn_pid; list_for_each_safe(qe, qe_next, &port->rport_q) { rport = (struct bfa_fcs_rport_s *)qe; c1 = (u8 *) &rport->pid; if (__fc_pid_match(c0, c1, format)) bfa_fcs_rport_scn(rport); } } void bfa_fcs_port_scn_process_rscn(struct bfa_fcs_port_s *port, struct fchs_s *fchs, u32 len) { struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1); int num_entries; u32 rscn_pid; bfa_boolean_t nsquery = BFA_FALSE; int i = 0; num_entries = (bfa_os_ntohs(rscn->payldlen) - sizeof(u32)) / sizeof(rscn->event[0]); bfa_trc(port->fcs, num_entries); port->stats.num_rscn++; bfa_fcs_port_scn_send_ls_acc(port, fchs); for (i = 0; i < num_entries; i++) { rscn_pid = rscn->event[i].portid; bfa_trc(port->fcs, rscn->event[i].format); bfa_trc(port->fcs, rscn_pid); switch (rscn->event[i].format) { case FC_RSCN_FORMAT_PORTID: if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) { /* * Ignore this event. f/w would have processed * it */ bfa_trc(port->fcs, rscn_pid); } else { port->stats.num_portid_rscn++; bfa_fcs_port_scn_portid_rscn(port, rscn_pid); } break; case FC_RSCN_FORMAT_FABRIC: if (rscn->event[i].qualifier == FC_FABRIC_NAME_RSCN_EVENT) { bfa_fcs_port_ms_fabric_rscn(port); break; } /* * !!!!!!!!! Fall Through !!!!!!!!!!!!! */ case FC_RSCN_FORMAT_AREA: case FC_RSCN_FORMAT_DOMAIN: nsquery = BFA_TRUE; bfa_fcs_port_scn_multiport_rscn(port, rscn->event[i].format, rscn_pid); break; default: bfa_assert(0); nsquery = BFA_TRUE; } } /** * If any of area, domain or fabric RSCN is received, do a fresh discovery * to find new devices. */ if (nsquery) bfa_fcs_port_ns_query(port); }
gpl-2.0
ubuntustudio-kernel/ubuntu-saucy-lowlatency
fs/ubifs/master.c
1584
10718
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ /* This file implements reading and writing the master node */ #include "ubifs.h" /** * scan_for_master - search the valid master node. * @c: UBIFS file-system description object * * This function scans the master node LEBs and search for the latest master * node. Returns zero in case of success, %-EUCLEAN if there master area is * corrupted and requires recovery, and a negative error code in case of * failure. */ static int scan_for_master(struct ubifs_info *c) { struct ubifs_scan_leb *sleb; struct ubifs_scan_node *snod; int lnum, offs = 0, nodes_cnt; lnum = UBIFS_MST_LNUM; sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); if (IS_ERR(sleb)) return PTR_ERR(sleb); nodes_cnt = sleb->nodes_cnt; if (nodes_cnt > 0) { snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, list); if (snod->type != UBIFS_MST_NODE) goto out_dump; memcpy(c->mst_node, snod->node, snod->len); offs = snod->offs; } ubifs_scan_destroy(sleb); lnum += 1; sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1); if (IS_ERR(sleb)) return PTR_ERR(sleb); if (sleb->nodes_cnt != nodes_cnt) goto out; if (!sleb->nodes_cnt) goto out; snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, list); if (snod->type != UBIFS_MST_NODE) goto out_dump; if (snod->offs != offs) goto out; if (memcmp((void *)c->mst_node + UBIFS_CH_SZ, (void *)snod->node + UBIFS_CH_SZ, UBIFS_MST_NODE_SZ - UBIFS_CH_SZ)) goto out; c->mst_offs = offs; ubifs_scan_destroy(sleb); return 0; out: ubifs_scan_destroy(sleb); return -EUCLEAN; out_dump: ubifs_err("unexpected node type %d master LEB %d:%d", snod->type, lnum, snod->offs); ubifs_scan_destroy(sleb); return -EINVAL; } /** * validate_master - validate master node. * @c: UBIFS file-system description object * * This function validates data which was read from master node. Returns zero * if the data is all right and %-EINVAL if not. */ static int validate_master(const struct ubifs_info *c) { long long main_sz; int err; if (c->max_sqnum >= SQNUM_WATERMARK) { err = 1; goto out; } if (c->cmt_no >= c->max_sqnum) { err = 2; goto out; } if (c->highest_inum >= INUM_WATERMARK) { err = 3; goto out; } if (c->lhead_lnum < UBIFS_LOG_LNUM || c->lhead_lnum >= UBIFS_LOG_LNUM + c->log_lebs || c->lhead_offs < 0 || c->lhead_offs >= c->leb_size || c->lhead_offs & (c->min_io_size - 1)) { err = 4; goto out; } if (c->zroot.lnum >= c->leb_cnt || c->zroot.lnum < c->main_first || c->zroot.offs >= c->leb_size || c->zroot.offs & 7) { err = 5; goto out; } if (c->zroot.len < c->ranges[UBIFS_IDX_NODE].min_len || c->zroot.len > c->ranges[UBIFS_IDX_NODE].max_len) { err = 6; goto out; } if (c->gc_lnum >= c->leb_cnt || c->gc_lnum < c->main_first) { err = 7; goto out; } if (c->ihead_lnum >= c->leb_cnt || c->ihead_lnum < c->main_first || c->ihead_offs % c->min_io_size || c->ihead_offs < 0 || c->ihead_offs > c->leb_size || c->ihead_offs & 7) { err = 8; goto out; } main_sz = (long long)c->main_lebs * c->leb_size; if (c->bi.old_idx_sz & 7 || c->bi.old_idx_sz >= main_sz) { err = 9; goto out; } if (c->lpt_lnum < c->lpt_first || c->lpt_lnum > c->lpt_last || c->lpt_offs < 0 || c->lpt_offs + c->nnode_sz > c->leb_size) { err = 10; goto out; } if (c->nhead_lnum < c->lpt_first || c->nhead_lnum > c->lpt_last || c->nhead_offs < 0 || c->nhead_offs % c->min_io_size || c->nhead_offs > c->leb_size) { err = 11; goto out; } if (c->ltab_lnum < c->lpt_first || c->ltab_lnum > c->lpt_last || c->ltab_offs < 0 || c->ltab_offs + c->ltab_sz > c->leb_size) { err = 12; goto out; } if (c->big_lpt && (c->lsave_lnum < c->lpt_first || c->lsave_lnum > c->lpt_last || c->lsave_offs < 0 || c->lsave_offs + c->lsave_sz > c->leb_size)) { err = 13; goto out; } if (c->lscan_lnum < c->main_first || c->lscan_lnum >= c->leb_cnt) { err = 14; goto out; } if (c->lst.empty_lebs < 0 || c->lst.empty_lebs > c->main_lebs - 2) { err = 15; goto out; } if (c->lst.idx_lebs < 0 || c->lst.idx_lebs > c->main_lebs - 1) { err = 16; goto out; } if (c->lst.total_free < 0 || c->lst.total_free > main_sz || c->lst.total_free & 7) { err = 17; goto out; } if (c->lst.total_dirty < 0 || (c->lst.total_dirty & 7)) { err = 18; goto out; } if (c->lst.total_used < 0 || (c->lst.total_used & 7)) { err = 19; goto out; } if (c->lst.total_free + c->lst.total_dirty + c->lst.total_used > main_sz) { err = 20; goto out; } if (c->lst.total_dead + c->lst.total_dark + c->lst.total_used + c->bi.old_idx_sz > main_sz) { err = 21; goto out; } if (c->lst.total_dead < 0 || c->lst.total_dead > c->lst.total_free + c->lst.total_dirty || c->lst.total_dead & 7) { err = 22; goto out; } if (c->lst.total_dark < 0 || c->lst.total_dark > c->lst.total_free + c->lst.total_dirty || c->lst.total_dark & 7) { err = 23; goto out; } return 0; out: ubifs_err("bad master node at offset %d error %d", c->mst_offs, err); ubifs_dump_node(c, c->mst_node); return -EINVAL; } /** * ubifs_read_master - read master node. * @c: UBIFS file-system description object * * This function finds and reads the master node during file-system mount. If * the flash is empty, it creates default master node as well. Returns zero in * case of success and a negative error code in case of failure. */ int ubifs_read_master(struct ubifs_info *c) { int err, old_leb_cnt; c->mst_node = kzalloc(c->mst_node_alsz, GFP_KERNEL); if (!c->mst_node) return -ENOMEM; err = scan_for_master(c); if (err) { if (err == -EUCLEAN) err = ubifs_recover_master_node(c); if (err) /* * Note, we do not free 'c->mst_node' here because the * unmount routine will take care of this. */ return err; } /* Make sure that the recovery flag is clear */ c->mst_node->flags &= cpu_to_le32(~UBIFS_MST_RCVRY); c->max_sqnum = le64_to_cpu(c->mst_node->ch.sqnum); c->highest_inum = le64_to_cpu(c->mst_node->highest_inum); c->cmt_no = le64_to_cpu(c->mst_node->cmt_no); c->zroot.lnum = le32_to_cpu(c->mst_node->root_lnum); c->zroot.offs = le32_to_cpu(c->mst_node->root_offs); c->zroot.len = le32_to_cpu(c->mst_node->root_len); c->lhead_lnum = le32_to_cpu(c->mst_node->log_lnum); c->gc_lnum = le32_to_cpu(c->mst_node->gc_lnum); c->ihead_lnum = le32_to_cpu(c->mst_node->ihead_lnum); c->ihead_offs = le32_to_cpu(c->mst_node->ihead_offs); c->bi.old_idx_sz = le64_to_cpu(c->mst_node->index_size); c->lpt_lnum = le32_to_cpu(c->mst_node->lpt_lnum); c->lpt_offs = le32_to_cpu(c->mst_node->lpt_offs); c->nhead_lnum = le32_to_cpu(c->mst_node->nhead_lnum); c->nhead_offs = le32_to_cpu(c->mst_node->nhead_offs); c->ltab_lnum = le32_to_cpu(c->mst_node->ltab_lnum); c->ltab_offs = le32_to_cpu(c->mst_node->ltab_offs); c->lsave_lnum = le32_to_cpu(c->mst_node->lsave_lnum); c->lsave_offs = le32_to_cpu(c->mst_node->lsave_offs); c->lscan_lnum = le32_to_cpu(c->mst_node->lscan_lnum); c->lst.empty_lebs = le32_to_cpu(c->mst_node->empty_lebs); c->lst.idx_lebs = le32_to_cpu(c->mst_node->idx_lebs); old_leb_cnt = le32_to_cpu(c->mst_node->leb_cnt); c->lst.total_free = le64_to_cpu(c->mst_node->total_free); c->lst.total_dirty = le64_to_cpu(c->mst_node->total_dirty); c->lst.total_used = le64_to_cpu(c->mst_node->total_used); c->lst.total_dead = le64_to_cpu(c->mst_node->total_dead); c->lst.total_dark = le64_to_cpu(c->mst_node->total_dark); c->calc_idx_sz = c->bi.old_idx_sz; if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS)) c->no_orphs = 1; if (old_leb_cnt != c->leb_cnt) { /* The file system has been resized */ int growth = c->leb_cnt - old_leb_cnt; if (c->leb_cnt < old_leb_cnt || c->leb_cnt < UBIFS_MIN_LEB_CNT) { ubifs_err("bad leb_cnt on master node"); ubifs_dump_node(c, c->mst_node); return -EINVAL; } dbg_mnt("Auto resizing (master) from %d LEBs to %d LEBs", old_leb_cnt, c->leb_cnt); c->lst.empty_lebs += growth; c->lst.total_free += growth * (long long)c->leb_size; c->lst.total_dark += growth * (long long)c->dark_wm; /* * Reflect changes back onto the master node. N.B. the master * node gets written immediately whenever mounting (or * remounting) in read-write mode, so we do not need to write it * here. */ c->mst_node->leb_cnt = cpu_to_le32(c->leb_cnt); c->mst_node->empty_lebs = cpu_to_le32(c->lst.empty_lebs); c->mst_node->total_free = cpu_to_le64(c->lst.total_free); c->mst_node->total_dark = cpu_to_le64(c->lst.total_dark); } err = validate_master(c); if (err) return err; err = dbg_old_index_check_init(c, &c->zroot); return err; } /** * ubifs_write_master - write master node. * @c: UBIFS file-system description object * * This function writes the master node. The caller has to take the * @c->mst_mutex lock before calling this function. Returns zero in case of * success and a negative error code in case of failure. The master node is * written twice to enable recovery. */ int ubifs_write_master(struct ubifs_info *c) { int err, lnum, offs, len; ubifs_assert(!c->ro_media && !c->ro_mount); if (c->ro_error) return -EROFS; lnum = UBIFS_MST_LNUM; offs = c->mst_offs + c->mst_node_alsz; len = UBIFS_MST_NODE_SZ; if (offs + UBIFS_MST_NODE_SZ > c->leb_size) { err = ubifs_leb_unmap(c, lnum); if (err) return err; offs = 0; } c->mst_offs = offs; c->mst_node->highest_inum = cpu_to_le64(c->highest_inum); err = ubifs_write_node(c, c->mst_node, len, lnum, offs); if (err) return err; lnum += 1; if (offs == 0) { err = ubifs_leb_unmap(c, lnum); if (err) return err; } err = ubifs_write_node(c, c->mst_node, len, lnum, offs); return err; }
gpl-2.0
loxdegio/linux-patched
arch/x86/mm/mmio-mod.c
2096
12118
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) IBM Corporation, 2005 * Jeff Muizelaar, 2006, 2007 * Pekka Paalanen, 2008 <pq@iki.fi> * * Derived from the read-mod example from relay-examples by Tom Zanussi. */ #define pr_fmt(fmt) "mmiotrace: " fmt #define DEBUG 1 #include <linux/module.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/kallsyms.h> #include <asm/pgtable.h> #include <linux/mmiotrace.h> #include <asm/e820.h> /* for ISA_START_ADDRESS */ #include <linux/atomic.h> #include <linux/percpu.h> #include <linux/cpu.h> #include "pf_in.h" struct trap_reason { unsigned long addr; unsigned long ip; enum reason_type type; int active_traces; }; struct remap_trace { struct list_head list; struct kmmio_probe probe; resource_size_t phys; unsigned long id; }; /* Accessed per-cpu. */ static DEFINE_PER_CPU(struct trap_reason, pf_reason); static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace); static DEFINE_MUTEX(mmiotrace_mutex); static DEFINE_SPINLOCK(trace_lock); static atomic_t mmiotrace_enabled; static LIST_HEAD(trace_list); /* struct remap_trace */ /* * Locking in this file: * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections. * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex * and trace_lock. * - Routines depending on is_enabled() must take trace_lock. * - trace_list users must hold trace_lock. * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed. * - pre/post callbacks assume the effect of is_enabled() being true. */ /* module parameters */ static unsigned long filter_offset; static bool nommiotrace; static bool trace_pc; module_param(filter_offset, ulong, 0); module_param(nommiotrace, bool, 0); module_param(trace_pc, bool, 0); MODULE_PARM_DESC(filter_offset, "Start address of traced mappings."); MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing."); MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions."); static bool is_enabled(void) { return atomic_read(&mmiotrace_enabled); } static void print_pte(unsigned long address) { unsigned int level; pte_t *pte = lookup_address(address, &level); if (!pte) { pr_err("Error in %s: no pte for page 0x%08lx\n", __func__, address); return; } if (level == PG_LEVEL_2M) { pr_emerg("4MB pages are not currently supported: 0x%08lx\n", address); BUG(); } pr_info("pte for 0x%lx: 0x%llx 0x%llx\n", address, (unsigned long long)pte_val(*pte), (unsigned long long)pte_val(*pte) & _PAGE_PRESENT); } /* * For some reason the pre/post pairs have been called in an * unmatched order. Report and die. */ static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr) { const struct trap_reason *my_reason = &get_cpu_var(pf_reason); pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n", addr, my_reason->addr); print_pte(addr); print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip); print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip); #ifdef __i386__ pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", regs->ax, regs->bx, regs->cx, regs->dx); pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", regs->si, regs->di, regs->bp, regs->sp); #else pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n", regs->ax, regs->cx, regs->dx); pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n", regs->si, regs->di, regs->bp, regs->sp); #endif put_cpu_var(pf_reason); BUG(); } static void pre(struct kmmio_probe *p, struct pt_regs *regs, unsigned long addr) { struct trap_reason *my_reason = &get_cpu_var(pf_reason); struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace); const unsigned long instptr = instruction_pointer(regs); const enum reason_type type = get_ins_type(instptr); struct remap_trace *trace = p->private; /* it doesn't make sense to have more than one active trace per cpu */ if (my_reason->active_traces) die_kmmio_nesting_error(regs, addr); else my_reason->active_traces++; my_reason->type = type; my_reason->addr = addr; my_reason->ip = instptr; my_trace->phys = addr - trace->probe.addr + trace->phys; my_trace->map_id = trace->id; /* * Only record the program counter when requested. * It may taint clean-room reverse engineering. */ if (trace_pc) my_trace->pc = instptr; else my_trace->pc = 0; /* * XXX: the timestamp recorded will be *after* the tracing has been * done, not at the time we hit the instruction. SMP implications * on event ordering? */ switch (type) { case REG_READ: my_trace->opcode = MMIO_READ; my_trace->width = get_ins_mem_width(instptr); break; case REG_WRITE: my_trace->opcode = MMIO_WRITE; my_trace->width = get_ins_mem_width(instptr); my_trace->value = get_ins_reg_val(instptr, regs); break; case IMM_WRITE: my_trace->opcode = MMIO_WRITE; my_trace->width = get_ins_mem_width(instptr); my_trace->value = get_ins_imm_val(instptr); break; default: { unsigned char *ip = (unsigned char *)instptr; my_trace->opcode = MMIO_UNKNOWN_OP; my_trace->width = 0; my_trace->value = (*ip) << 16 | *(ip + 1) << 8 | *(ip + 2); } } put_cpu_var(cpu_trace); put_cpu_var(pf_reason); } static void post(struct kmmio_probe *p, unsigned long condition, struct pt_regs *regs) { struct trap_reason *my_reason = &get_cpu_var(pf_reason); struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace); /* this should always return the active_trace count to 0 */ my_reason->active_traces--; if (my_reason->active_traces) { pr_emerg("unexpected post handler"); BUG(); } switch (my_reason->type) { case REG_READ: my_trace->value = get_ins_reg_val(my_reason->ip, regs); break; default: break; } mmio_trace_rw(my_trace); put_cpu_var(cpu_trace); put_cpu_var(pf_reason); } static void ioremap_trace_core(resource_size_t offset, unsigned long size, void __iomem *addr) { static atomic_t next_id; struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); /* These are page-unaligned. */ struct mmiotrace_map map = { .phys = offset, .virt = (unsigned long)addr, .len = size, .opcode = MMIO_PROBE }; if (!trace) { pr_err("kmalloc failed in ioremap\n"); return; } *trace = (struct remap_trace) { .probe = { .addr = (unsigned long)addr, .len = size, .pre_handler = pre, .post_handler = post, .private = trace }, .phys = offset, .id = atomic_inc_return(&next_id) }; map.map_id = trace->id; spin_lock_irq(&trace_lock); if (!is_enabled()) { kfree(trace); goto not_enabled; } mmio_trace_mapping(&map); list_add_tail(&trace->list, &trace_list); if (!nommiotrace) register_kmmio_probe(&trace->probe); not_enabled: spin_unlock_irq(&trace_lock); } void mmiotrace_ioremap(resource_size_t offset, unsigned long size, void __iomem *addr) { if (!is_enabled()) /* recheck and proper locking in *_core() */ return; pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n", (unsigned long long)offset, size, addr); if ((filter_offset) && (offset != filter_offset)) return; ioremap_trace_core(offset, size, addr); } static void iounmap_trace_core(volatile void __iomem *addr) { struct mmiotrace_map map = { .phys = 0, .virt = (unsigned long)addr, .len = 0, .opcode = MMIO_UNPROBE }; struct remap_trace *trace; struct remap_trace *tmp; struct remap_trace *found_trace = NULL; pr_debug("Unmapping %p.\n", addr); spin_lock_irq(&trace_lock); if (!is_enabled()) goto not_enabled; list_for_each_entry_safe(trace, tmp, &trace_list, list) { if ((unsigned long)addr == trace->probe.addr) { if (!nommiotrace) unregister_kmmio_probe(&trace->probe); list_del(&trace->list); found_trace = trace; break; } } map.map_id = (found_trace) ? found_trace->id : -1; mmio_trace_mapping(&map); not_enabled: spin_unlock_irq(&trace_lock); if (found_trace) { synchronize_rcu(); /* unregister_kmmio_probe() requirement */ kfree(found_trace); } } void mmiotrace_iounmap(volatile void __iomem *addr) { might_sleep(); if (is_enabled()) /* recheck and proper locking in *_core() */ iounmap_trace_core(addr); } int mmiotrace_printk(const char *fmt, ...) { int ret = 0; va_list args; unsigned long flags; va_start(args, fmt); spin_lock_irqsave(&trace_lock, flags); if (is_enabled()) ret = mmio_trace_printk(fmt, args); spin_unlock_irqrestore(&trace_lock, flags); va_end(args); return ret; } EXPORT_SYMBOL(mmiotrace_printk); static void clear_trace_list(void) { struct remap_trace *trace; struct remap_trace *tmp; /* * No locking required, because the caller ensures we are in a * critical section via mutex, and is_enabled() is false, * i.e. nothing can traverse or modify this list. * Caller also ensures is_enabled() cannot change. */ list_for_each_entry(trace, &trace_list, list) { pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n", trace->probe.addr, trace->probe.len); if (!nommiotrace) unregister_kmmio_probe(&trace->probe); } synchronize_rcu(); /* unregister_kmmio_probe() requirement */ list_for_each_entry_safe(trace, tmp, &trace_list, list) { list_del(&trace->list); kfree(trace); } } #ifdef CONFIG_HOTPLUG_CPU static cpumask_var_t downed_cpus; static void enter_uniprocessor(void) { int cpu; int err; if (downed_cpus == NULL && !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) { pr_notice("Failed to allocate mask\n"); goto out; } get_online_cpus(); cpumask_copy(downed_cpus, cpu_online_mask); cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus); if (num_online_cpus() > 1) pr_notice("Disabling non-boot CPUs...\n"); put_online_cpus(); for_each_cpu(cpu, downed_cpus) { err = cpu_down(cpu); if (!err) pr_info("CPU%d is down.\n", cpu); else pr_err("Error taking CPU%d down: %d\n", cpu, err); } out: if (num_online_cpus() > 1) pr_warning("multiple CPUs still online, may miss events.\n"); } static void leave_uniprocessor(void) { int cpu; int err; if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0) return; pr_notice("Re-enabling CPUs...\n"); for_each_cpu(cpu, downed_cpus) { err = cpu_up(cpu); if (!err) pr_info("enabled CPU%d.\n", cpu); else pr_err("cannot re-enable CPU%d: %d\n", cpu, err); } } #else /* !CONFIG_HOTPLUG_CPU */ static void enter_uniprocessor(void) { if (num_online_cpus() > 1) pr_warning("multiple CPUs are online, may miss events. " "Suggest booting with maxcpus=1 kernel argument.\n"); } static void leave_uniprocessor(void) { } #endif void enable_mmiotrace(void) { mutex_lock(&mmiotrace_mutex); if (is_enabled()) goto out; if (nommiotrace) pr_info("MMIO tracing disabled.\n"); kmmio_init(); enter_uniprocessor(); spin_lock_irq(&trace_lock); atomic_inc(&mmiotrace_enabled); spin_unlock_irq(&trace_lock); pr_info("enabled.\n"); out: mutex_unlock(&mmiotrace_mutex); } void disable_mmiotrace(void) { mutex_lock(&mmiotrace_mutex); if (!is_enabled()) goto out; spin_lock_irq(&trace_lock); atomic_dec(&mmiotrace_enabled); BUG_ON(is_enabled()); spin_unlock_irq(&trace_lock); clear_trace_list(); /* guarantees: no more kmmio callbacks */ leave_uniprocessor(); kmmio_cleanup(); pr_info("disabled.\n"); out: mutex_unlock(&mmiotrace_mutex); }
gpl-2.0
chrisk44/android_kernel_lge_hammerhead
drivers/video/msm/lcdc_st15.c
3632
8988
/* Copyright (c) 2010, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/i2c.h> #include <linux/delay.h> #include "msm_fb.h" #define DEVICE_NAME "sii9022" #define SII9022_DEVICE_ID 0xB0 #define SII9022_ISR 0x3D #define SII9022_ISR_RXS_STATUS 0x08 static int lcdc_sii9022_panel_on(struct platform_device *pdev); static int lcdc_sii9022_panel_off(struct platform_device *pdev); static struct i2c_client *sii9022_i2c_client; struct sii9022_data { struct msm_hdmi_platform_data *pd; struct platform_device *pdev; struct work_struct work; int x_res; int y_res; int sysfs_entry_created; int hdmi_attached; }; static struct sii9022_data *dd; struct sii9022_i2c_addr_data{ u8 addr; u8 data; }; /* video mode data */ static u8 video_mode_data[] = { 0x00, 0xF9, 0x1C, 0x70, 0x17, 0x72, 0x06, 0xEE, 0x02, }; static u8 avi_io_format[] = { 0x09, 0x00, 0x00, }; /* power state */ static struct sii9022_i2c_addr_data regset0[] = { { 0x60, 0x04 }, { 0x63, 0x00 }, { 0x1E, 0x00 }, }; static u8 video_infoframe[] = { 0x0C, 0xF0, 0x00, 0x68, 0x00, 0x04, 0x00, 0x19, 0x00, 0xE9, 0x02, 0x04, 0x01, 0x04, 0x06, }; /* configure audio */ static struct sii9022_i2c_addr_data regset1[] = { { 0x26, 0x90 }, { 0x20, 0x90 }, { 0x1F, 0x80 }, { 0x26, 0x80 }, { 0x24, 0x02 }, { 0x25, 0x0B }, { 0xBC, 0x02 }, { 0xBD, 0x24 }, { 0xBE, 0x02 }, }; /* enable audio */ static u8 misc_infoframe[] = { 0xBF, 0xC2, 0x84, 0x01, 0x0A, 0x6F, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; /* set HDMI, active */ static struct sii9022_i2c_addr_data regset2[] = { { 0x1A, 0x01 }, { 0x3D, 0x00 }, { 0x3C, 0x02 }, }; static struct msm_fb_panel_data sii9022_panel_data = { .on = lcdc_sii9022_panel_on, .off = lcdc_sii9022_panel_off, }; static struct platform_device sii9022_device = { .name = DEVICE_NAME, .id = 1, .dev = { .platform_data = &sii9022_panel_data, } }; static int send_i2c_data(struct i2c_client *client, struct sii9022_i2c_addr_data *regset, int size) { int i; int rc = 0; for (i = 0; i < size; i++) { rc = i2c_smbus_write_byte_data( client, regset[i].addr, regset[i].data); if (rc) break; } return rc; } static void sii9022_work_f(struct work_struct *work) { int isr; isr = i2c_smbus_read_byte_data(sii9022_i2c_client, SII9022_ISR); if (isr < 0) { dev_err(&sii9022_i2c_client->dev, "i2c read of isr failed rc = 0x%x\n", isr); return; } if (isr == 0) return; /* reset any set bits */ i2c_smbus_write_byte_data(sii9022_i2c_client, SII9022_ISR, isr); dd->hdmi_attached = isr & SII9022_ISR_RXS_STATUS; if (dd->pd->cable_detect) dd->pd->cable_detect(dd->hdmi_attached); if (dd->hdmi_attached) { dd->x_res = 1280; dd->y_res = 720; } else { dd->x_res = sii9022_panel_data.panel_info.xres; dd->y_res = sii9022_panel_data.panel_info.yres; } } static irqreturn_t sii9022_interrupt(int irq, void *dev_id) { struct sii9022_data *dd = dev_id; schedule_work(&dd->work); return IRQ_HANDLED; } static int hdmi_sii_enable(struct i2c_client *client) { int rc; int retries = 10; int count; rc = i2c_smbus_write_byte_data(client, 0xC7, 0x00); if (rc) goto enable_exit; do { msleep(1); rc = i2c_smbus_read_byte_data(client, 0x1B); } while ((rc != SII9022_DEVICE_ID) && retries--); if (rc != SII9022_DEVICE_ID) return -ENODEV; rc = i2c_smbus_write_byte_data(client, 0x1A, 0x11); if (rc) goto enable_exit; count = ARRAY_SIZE(video_mode_data); rc = i2c_master_send(client, video_mode_data, count); if (rc != count) { rc = -EIO; goto enable_exit; } rc = i2c_smbus_write_byte_data(client, 0x08, 0x20); if (rc) goto enable_exit; count = ARRAY_SIZE(avi_io_format); rc = i2c_master_send(client, avi_io_format, count); if (rc != count) { rc = -EIO; goto enable_exit; } rc = send_i2c_data(client, regset0, ARRAY_SIZE(regset0)); if (rc) goto enable_exit; count = ARRAY_SIZE(video_infoframe); rc = i2c_master_send(client, video_infoframe, count); if (rc != count) { rc = -EIO; goto enable_exit; } rc = send_i2c_data(client, regset1, ARRAY_SIZE(regset1)); if (rc) goto enable_exit; count = ARRAY_SIZE(misc_infoframe); rc = i2c_master_send(client, misc_infoframe, count); if (rc != count) { rc = -EIO; goto enable_exit; } rc = send_i2c_data(client, regset2, ARRAY_SIZE(regset2)); if (rc) goto enable_exit; return 0; enable_exit: printk(KERN_ERR "%s: exited rc=%d\n", __func__, rc); return rc; } static ssize_t show_res(struct device *device, struct device_attribute *attr, char *buf) { return snprintf(buf, PAGE_SIZE, "%dx%d\n", dd->x_res, dd->y_res); } static struct device_attribute device_attrs[] = { __ATTR(screen_resolution, S_IRUGO|S_IWUSR, show_res, NULL), }; static int lcdc_sii9022_panel_on(struct platform_device *pdev) { int rc; if (!dd->sysfs_entry_created) { dd->pdev = pdev; rc = device_create_file(&pdev->dev, &device_attrs[0]); if (!rc) dd->sysfs_entry_created = 1; } rc = hdmi_sii_enable(sii9022_i2c_client); if (rc) { dd->hdmi_attached = 0; dd->x_res = sii9022_panel_data.panel_info.xres; dd->y_res = sii9022_panel_data.panel_info.yres; } if (dd->pd->irq) enable_irq(dd->pd->irq); /* Don't return the value from hdmi_sii_enable(). * It may fail on some ST1.5s, but we must return 0 from this * function in order for the on-board display to turn on. */ return 0; } static int lcdc_sii9022_panel_off(struct platform_device *pdev) { if (dd->pd->irq) disable_irq(dd->pd->irq); return 0; } static const struct i2c_device_id hmdi_sii_id[] = { { DEVICE_NAME, 0 }, { } }; static int hdmi_sii_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C)) return -ENODEV; dd = kzalloc(sizeof *dd, GFP_KERNEL); if (!dd) { rc = -ENOMEM; goto probe_exit; } sii9022_i2c_client = client; i2c_set_clientdata(client, dd); dd->pd = client->dev.platform_data; if (!dd->pd) { rc = -ENODEV; goto probe_free; } if (dd->pd->irq) { INIT_WORK(&dd->work, sii9022_work_f); rc = request_irq(dd->pd->irq, &sii9022_interrupt, IRQF_TRIGGER_FALLING, "sii9022_cable", dd); if (rc) goto probe_free; disable_irq(dd->pd->irq); } msm_fb_add_device(&sii9022_device); dd->x_res = sii9022_panel_data.panel_info.xres; dd->y_res = sii9022_panel_data.panel_info.yres; return 0; probe_free: i2c_set_clientdata(client, NULL); kfree(dd); probe_exit: return rc; } static int __devexit hdmi_sii_remove(struct i2c_client *client) { int err = 0 ; struct msm_hdmi_platform_data *pd; if (dd->sysfs_entry_created) device_remove_file(&dd->pdev->dev, &device_attrs[0]); pd = client->dev.platform_data; if (pd && pd->irq) free_irq(pd->irq, dd); i2c_set_clientdata(client, NULL); kfree(dd); return err ; } #ifdef CONFIG_PM static int sii9022_suspend(struct device *dev) { if (dd && dd->pd && dd->pd->irq) disable_irq(dd->pd->irq); return 0; } static int sii9022_resume(struct device *dev) { if (dd && dd->pd && dd->pd->irq) enable_irq(dd->pd->irq); return 0; } static struct dev_pm_ops sii9022_pm_ops = { .suspend = sii9022_suspend, .resume = sii9022_resume, }; #endif static struct i2c_driver hdmi_sii_i2c_driver = { .driver = { .name = DEVICE_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &sii9022_pm_ops, #endif }, .probe = hdmi_sii_probe, .remove = __exit_p(hdmi_sii_remove), .id_table = hmdi_sii_id, }; static int __init lcdc_st15_init(void) { int ret; struct msm_panel_info *pinfo; if (msm_fb_detect_client("lcdc_st15")) return 0; pinfo = &sii9022_panel_data.panel_info; pinfo->xres = 1366; pinfo->yres = 768; MSM_FB_SINGLE_MODE_PANEL(pinfo); pinfo->type = LCDC_PANEL; pinfo->pdest = DISPLAY_1; pinfo->wait_cycle = 0; pinfo->bpp = 24; pinfo->fb_num = 2; pinfo->clk_rate = 74250000; pinfo->lcdc.h_back_porch = 120; pinfo->lcdc.h_front_porch = 20; pinfo->lcdc.h_pulse_width = 40; pinfo->lcdc.v_back_porch = 25; pinfo->lcdc.v_front_porch = 1; pinfo->lcdc.v_pulse_width = 7; pinfo->lcdc.border_clr = 0; /* blk */ pinfo->lcdc.underflow_clr = 0xff; /* blue */ pinfo->lcdc.hsync_skew = 0; ret = i2c_add_driver(&hdmi_sii_i2c_driver); if (ret) printk(KERN_ERR "%s: failed to add i2c driver\n", __func__); return ret; } static void __exit hdmi_sii_exit(void) { i2c_del_driver(&hdmi_sii_i2c_driver); } module_init(lcdc_st15_init); module_exit(hdmi_sii_exit);
gpl-2.0
TeamSXL/htc-cm-kernel-doubleshot-34_old
arch/alpha/kernel/pci_iommu.c
4400
25883
/* * linux/arch/alpha/kernel/pci_iommu.c */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/gfp.h> #include <linux/bootmem.h> #include <linux/export.h> #include <linux/scatterlist.h> #include <linux/log2.h> #include <linux/dma-mapping.h> #include <linux/iommu-helper.h> #include <asm/io.h> #include <asm/hwrpb.h> #include "proto.h" #include "pci_impl.h" #define DEBUG_ALLOC 0 #if DEBUG_ALLOC > 0 # define DBGA(args...) printk(KERN_DEBUG args) #else # define DBGA(args...) #endif #if DEBUG_ALLOC > 1 # define DBGA2(args...) printk(KERN_DEBUG args) #else # define DBGA2(args...) #endif #define DEBUG_NODIRECT 0 #define ISA_DMA_MASK 0x00ffffff static inline unsigned long mk_iommu_pte(unsigned long paddr) { return (paddr >> (PAGE_SHIFT-1)) | 1; } /* Return the minimum of MAX or the first power of two larger than main memory. */ unsigned long size_for_memory(unsigned long max) { unsigned long mem = max_low_pfn << PAGE_SHIFT; if (mem < max) max = roundup_pow_of_two(mem); return max; } struct pci_iommu_arena * __init iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, unsigned long window_size, unsigned long align) { unsigned long mem_size; struct pci_iommu_arena *arena; mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long)); /* Note that the TLB lookup logic uses bitwise concatenation, not addition, so the required arena alignment is based on the size of the window. Retain the align parameter so that particular systems can over-align the arena. */ if (align < mem_size) align = mem_size; #ifdef CONFIG_DISCONTIGMEM arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena)); if (!NODE_DATA(nid) || !arena) { printk("%s: couldn't allocate arena from node %d\n" " falling back to system-wide allocation\n", __func__, nid); arena = alloc_bootmem(sizeof(*arena)); } arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0); if (!NODE_DATA(nid) || !arena->ptes) { printk("%s: couldn't allocate arena ptes from node %d\n" " falling back to system-wide allocation\n", __func__, nid); arena->ptes = __alloc_bootmem(mem_size, align, 0); } #else /* CONFIG_DISCONTIGMEM */ arena = alloc_bootmem(sizeof(*arena)); arena->ptes = __alloc_bootmem(mem_size, align, 0); #endif /* CONFIG_DISCONTIGMEM */ spin_lock_init(&arena->lock); arena->hose = hose; arena->dma_base = base; arena->size = window_size; arena->next_entry = 0; /* Align allocations to a multiple of a page size. Not needed unless there are chip bugs. */ arena->align_entry = 1; return arena; } struct pci_iommu_arena * __init iommu_arena_new(struct pci_controller *hose, dma_addr_t base, unsigned long window_size, unsigned long align) { return iommu_arena_new_node(0, hose, base, window_size, align); } /* Must be called with the arena lock held */ static long iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, long n, long mask) { unsigned long *ptes; long i, p, nent; int pass = 0; unsigned long base; unsigned long boundary_size; base = arena->dma_base >> PAGE_SHIFT; if (dev) { boundary_size = dma_get_seg_boundary(dev) + 1; boundary_size >>= PAGE_SHIFT; } else { boundary_size = 1UL << (32 - PAGE_SHIFT); } /* Search forward for the first mask-aligned sequence of N free ptes */ ptes = arena->ptes; nent = arena->size >> PAGE_SHIFT; p = ALIGN(arena->next_entry, mask + 1); i = 0; again: while (i < n && p+i < nent) { if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) { p = ALIGN(p + 1, mask + 1); goto again; } if (ptes[p+i]) p = ALIGN(p + i + 1, mask + 1), i = 0; else i = i + 1; } if (i < n) { if (pass < 1) { /* * Reached the end. Flush the TLB and restart * the search from the beginning. */ alpha_mv.mv_pci_tbi(arena->hose, 0, -1); pass++; p = 0; i = 0; goto again; } else return -1; } /* Success. It's the responsibility of the caller to mark them in use before releasing the lock */ return p; } static long iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n, unsigned int align) { unsigned long flags; unsigned long *ptes; long i, p, mask; spin_lock_irqsave(&arena->lock, flags); /* Search for N empty ptes */ ptes = arena->ptes; mask = max(align, arena->align_entry) - 1; p = iommu_arena_find_pages(dev, arena, n, mask); if (p < 0) { spin_unlock_irqrestore(&arena->lock, flags); return -1; } /* Success. Mark them all in use, ie not zero and invalid for the iommu tlb that could load them from under us. The chip specific bits will fill this in with something kosher when we return. */ for (i = 0; i < n; ++i) ptes[p+i] = IOMMU_INVALID_PTE; arena->next_entry = p + n; spin_unlock_irqrestore(&arena->lock, flags); return p; } static void iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n) { unsigned long *p; long i; p = arena->ptes + ofs; for (i = 0; i < n; ++i) p[i] = 0; } /* * True if the machine supports DAC addressing, and DEV can * make use of it given MASK. */ static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask) { dma_addr_t dac_offset = alpha_mv.pci_dac_offset; int ok = 1; /* If this is not set, the machine doesn't support DAC at all. */ if (dac_offset == 0) ok = 0; /* The device has to be able to address our DAC bit. */ if ((dac_offset & dev->dma_mask) != dac_offset) ok = 0; /* If both conditions above are met, we are fine. */ DBGA("pci_dac_dma_supported %s from %p\n", ok ? "yes" : "no", __builtin_return_address(0)); return ok; } /* Map a single buffer of the indicated size for PCI DMA in streaming mode. The 32-bit PCI bus mastering address to use is returned. Once the device is given the dma address, the device owns this memory until either pci_unmap_single or pci_dma_sync_single is performed. */ static dma_addr_t pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size, int dac_allowed) { struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; struct pci_iommu_arena *arena; long npages, dma_ofs, i; unsigned long paddr; dma_addr_t ret; unsigned int align = 0; struct device *dev = pdev ? &pdev->dev : NULL; paddr = __pa(cpu_addr); #if !DEBUG_NODIRECT /* First check to see if we can use the direct map window. */ if (paddr + size + __direct_map_base - 1 <= max_dma && paddr + size <= __direct_map_size) { ret = paddr + __direct_map_base; DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %p\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; } #endif /* Next, use DAC if selected earlier. */ if (dac_allowed) { ret = paddr + alpha_mv.pci_dac_offset; DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %p\n", cpu_addr, size, ret, __builtin_return_address(0)); return ret; } /* If the machine doesn't define a pci_tbi routine, we have to assume it doesn't support sg mapping, and, since we tried to use direct_map above, it now must be considered an error. */ if (! alpha_mv.mv_pci_tbi) { printk_once(KERN_WARNING "pci_map_single: no HW sg\n"); return 0; } arena = hose->sg_pci; if (!arena || arena->dma_base + arena->size - 1 > max_dma) arena = hose->sg_isa; npages = iommu_num_pages(paddr, size, PAGE_SIZE); /* Force allocation to 64KB boundary for ISA bridges. */ if (pdev && pdev == isa_bridge) align = 8; dma_ofs = iommu_arena_alloc(dev, arena, npages, align); if (dma_ofs < 0) { printk(KERN_WARNING "pci_map_single failed: " "could not allocate dma page tables\n"); return 0; } paddr &= PAGE_MASK; for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr); ret = arena->dma_base + dma_ofs * PAGE_SIZE; ret += (unsigned long)cpu_addr & ~PAGE_MASK; DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %p\n", cpu_addr, size, npages, ret, __builtin_return_address(0)); return ret; } /* Helper for generic DMA-mapping functions. */ static struct pci_dev *alpha_gendev_to_pci(struct device *dev) { if (dev && dev->bus == &pci_bus_type) return to_pci_dev(dev); /* Assume that non-PCI devices asking for DMA are either ISA or EISA, BUG() otherwise. */ BUG_ON(!isa_bridge); /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA bridge is bus master then). */ if (!dev || !dev->dma_mask || !*dev->dma_mask) return isa_bridge; /* For EISA bus masters, return isa_bridge (it might have smaller dma_mask due to wiring limitations). */ if (*dev->dma_mask >= isa_bridge->dma_mask) return isa_bridge; /* This assumes ISA bus master with dma_mask 0xffffff. */ return NULL; } static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); int dac_allowed; if (dir == PCI_DMA_NONE) BUG(); dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; return pci_map_single_1(pdev, (char *)page_address(page) + offset, size, dac_allowed); } /* Unmap a single streaming mode DMA translation. The DMA_ADDR and SIZE must match what was provided for in a previous pci_map_single call. All other usages are undefined. After this call, reads by the cpu to the buffer are guaranteed to see whatever the device wrote there. */ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { unsigned long flags; struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose; struct pci_iommu_arena *arena; long dma_ofs, npages; if (dir == PCI_DMA_NONE) BUG(); if (dma_addr >= __direct_map_base && dma_addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ DBGA2("pci_unmap_single: direct [%llx,%zx] from %p\n", dma_addr, size, __builtin_return_address(0)); return; } if (dma_addr > 0xffffffff) { DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %p\n", dma_addr, size, __builtin_return_address(0)); return; } arena = hose->sg_pci; if (!arena || dma_addr < arena->dma_base) arena = hose->sg_isa; dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT; if (dma_ofs * PAGE_SIZE >= arena->size) { printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx " " base %llx size %x\n", dma_addr, arena->dma_base, arena->size); return; BUG(); } npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); spin_lock_irqsave(&arena->lock, flags); iommu_arena_free(arena, dma_ofs, npages); /* If we're freeing ptes above the `next_entry' pointer (they may have snuck back into the TLB since the last wrap flush), we need to flush the TLB before reallocating the latter. */ if (dma_ofs >= arena->next_entry) alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1); spin_unlock_irqrestore(&arena->lock, flags); DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %p\n", dma_addr, size, npages, __builtin_return_address(0)); } /* Allocate and map kernel buffer using consistent mode DMA for PCI device. Returns non-NULL cpu-view pointer to the buffer if successful and sets *DMA_ADDRP to the pci side dma address as well, else DMA_ADDRP is undefined. */ static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, struct dma_attrs *attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); void *cpu_addr; long order = get_order(size); gfp &= ~GFP_DMA; try_again: cpu_addr = (void *)__get_free_pages(gfp, order); if (! cpu_addr) { printk(KERN_INFO "pci_alloc_consistent: " "get_free_pages failed from %p\n", __builtin_return_address(0)); /* ??? Really atomic allocation? Otherwise we could play with vmalloc and sg if we can't find contiguous memory. */ return NULL; } memset(cpu_addr, 0, size); *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0); if (*dma_addrp == 0) { free_pages((unsigned long)cpu_addr, order); if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA)) return NULL; /* The address doesn't fit required mask and we do not have iommu. Try again with GFP_DMA. */ gfp |= GFP_DMA; goto try_again; } DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %p\n", size, cpu_addr, *dma_addrp, __builtin_return_address(0)); return cpu_addr; } /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must be values that were returned from pci_alloc_consistent. SIZE must be the same as what as passed into pci_alloc_consistent. References to the memory and mappings associated with CPU_ADDR or DMA_ADDR past this call are illegal. */ static void alpha_pci_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, struct dma_attrs *attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); free_pages((unsigned long)cpu_addr, get_order(size)); DBGA2("pci_free_consistent: [%llx,%zx] from %p\n", dma_addr, size, __builtin_return_address(0)); } /* Classify the elements of the scatterlist. Write dma_address of each element with: 0 : Followers all physically adjacent. 1 : Followers all virtually adjacent. -1 : Not leader, physically adjacent to previous. -2 : Not leader, virtually adjacent to previous. Write dma_length of each leader with the combined lengths of the mergable followers. */ #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG)) static void sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end, int virt_ok) { unsigned long next_paddr; struct scatterlist *leader; long leader_flag, leader_length; unsigned int max_seg_size; leader = sg; leader_flag = 0; leader_length = leader->length; next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length; /* we will not marge sg without device. */ max_seg_size = dev ? dma_get_max_seg_size(dev) : 0; for (++sg; sg < end; ++sg) { unsigned long addr, len; addr = SG_ENT_PHYS_ADDRESS(sg); len = sg->length; if (leader_length + len > max_seg_size) goto new_segment; if (next_paddr == addr) { sg->dma_address = -1; leader_length += len; } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) { sg->dma_address = -2; leader_flag = 1; leader_length += len; } else { new_segment: leader->dma_address = leader_flag; leader->dma_length = leader_length; leader = sg; leader_flag = 0; leader_length = len; } next_paddr = addr + len; } leader->dma_address = leader_flag; leader->dma_length = leader_length; } /* Given a scatterlist leader, choose an allocation method and fill in the blanks. */ static int sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end, struct scatterlist *out, struct pci_iommu_arena *arena, dma_addr_t max_dma, int dac_allowed) { unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader); long size = leader->dma_length; struct scatterlist *sg; unsigned long *ptes; long npages, dma_ofs, i; #if !DEBUG_NODIRECT /* If everything is physically contiguous, and the addresses fall into the direct-map window, use it. */ if (leader->dma_address == 0 && paddr + size + __direct_map_base - 1 <= max_dma && paddr + size <= __direct_map_size) { out->dma_address = paddr + __direct_map_base; out->dma_length = size; DBGA(" sg_fill: [%p,%lx] -> direct %llx\n", __va(paddr), size, out->dma_address); return 0; } #endif /* If physically contiguous and DAC is available, use it. */ if (leader->dma_address == 0 && dac_allowed) { out->dma_address = paddr + alpha_mv.pci_dac_offset; out->dma_length = size; DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n", __va(paddr), size, out->dma_address); return 0; } /* Otherwise, we'll use the iommu to make the pages virtually contiguous. */ paddr &= ~PAGE_MASK; npages = iommu_num_pages(paddr, size, PAGE_SIZE); dma_ofs = iommu_arena_alloc(dev, arena, npages, 0); if (dma_ofs < 0) { /* If we attempted a direct map above but failed, die. */ if (leader->dma_address == 0) return -1; /* Otherwise, break up the remaining virtually contiguous hunks into individual direct maps and retry. */ sg_classify(dev, leader, end, 0); return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed); } out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr; out->dma_length = size; DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n", __va(paddr), size, out->dma_address, npages); /* All virtually contiguous. We need to find the length of each physically contiguous subsegment to fill in the ptes. */ ptes = &arena->ptes[dma_ofs]; sg = leader; do { #if DEBUG_ALLOC > 0 struct scatterlist *last_sg = sg; #endif size = sg->length; paddr = SG_ENT_PHYS_ADDRESS(sg); while (sg+1 < end && (int) sg[1].dma_address == -1) { size += sg[1].length; sg++; } npages = iommu_num_pages(paddr, size, PAGE_SIZE); paddr &= PAGE_MASK; for (i = 0; i < npages; ++i, paddr += PAGE_SIZE) *ptes++ = mk_iommu_pte(paddr); #if DEBUG_ALLOC > 0 DBGA(" (%ld) [%p,%x] np %ld\n", last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), last_sg->length, npages); while (++last_sg <= sg) { DBGA(" (%ld) [%p,%x] cont\n", last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg), last_sg->length); } #endif } while (++sg < end && (int) sg->dma_address < 0); return 1; } static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct scatterlist *start, *end, *out; struct pci_controller *hose; struct pci_iommu_arena *arena; dma_addr_t max_dma; int dac_allowed; if (dir == PCI_DMA_NONE) BUG(); dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0; /* Fast path single entry scatterlists. */ if (nents == 1) { sg->dma_length = sg->length; sg->dma_address = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), sg->length, dac_allowed); return sg->dma_address != 0; } start = sg; end = sg + nents; /* First, prepare information about the entries. */ sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0); /* Second, figure out where we're going to map things. */ if (alpha_mv.mv_pci_tbi) { hose = pdev ? pdev->sysdata : pci_isa_hose; max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; arena = hose->sg_pci; if (!arena || arena->dma_base + arena->size - 1 > max_dma) arena = hose->sg_isa; } else { max_dma = -1; arena = NULL; hose = NULL; } /* Third, iterate over the scatterlist leaders and allocate dma space as needed. */ for (out = sg; sg < end; ++sg) { if ((int) sg->dma_address < 0) continue; if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0) goto error; out++; } /* Mark the end of the list for pci_unmap_sg. */ if (out < end) out->dma_length = 0; if (out - start == 0) printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); DBGA("pci_map_sg: %ld entries\n", out - start); return out - start; error: printk(KERN_WARNING "pci_map_sg failed: " "could not allocate dma page tables\n"); /* Some allocation failed while mapping the scatterlist entries. Unmap them now. */ if (out > start) pci_unmap_sg(pdev, start, out - start, dir); return 0; } /* Unmap a set of streaming mode DMA translations. Again, cpu read rules concerning calls here are the same as for pci_unmap_single() above. */ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); unsigned long flags; struct pci_controller *hose; struct pci_iommu_arena *arena; struct scatterlist *end; dma_addr_t max_dma; dma_addr_t fbeg, fend; if (dir == PCI_DMA_NONE) BUG(); if (! alpha_mv.mv_pci_tbi) return; hose = pdev ? pdev->sysdata : pci_isa_hose; max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK; arena = hose->sg_pci; if (!arena || arena->dma_base + arena->size - 1 > max_dma) arena = hose->sg_isa; fbeg = -1, fend = 0; spin_lock_irqsave(&arena->lock, flags); for (end = sg + nents; sg < end; ++sg) { dma_addr_t addr; size_t size; long npages, ofs; dma_addr_t tend; addr = sg->dma_address; size = sg->dma_length; if (!size) break; if (addr > 0xffffffff) { /* It's a DAC address -- nothing to do. */ DBGA(" (%ld) DAC [%llx,%zx]\n", sg - end + nents, addr, size); continue; } if (addr >= __direct_map_base && addr < __direct_map_base + __direct_map_size) { /* Nothing to do. */ DBGA(" (%ld) direct [%llx,%zx]\n", sg - end + nents, addr, size); continue; } DBGA(" (%ld) sg [%llx,%zx]\n", sg - end + nents, addr, size); npages = iommu_num_pages(addr, size, PAGE_SIZE); ofs = (addr - arena->dma_base) >> PAGE_SHIFT; iommu_arena_free(arena, ofs, npages); tend = addr + size - 1; if (fbeg > addr) fbeg = addr; if (fend < tend) fend = tend; } /* If we're freeing ptes above the `next_entry' pointer (they may have snuck back into the TLB since the last wrap flush), we need to flush the TLB before reallocating the latter. */ if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry) alpha_mv.mv_pci_tbi(hose, fbeg, fend); spin_unlock_irqrestore(&arena->lock, flags); DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg)); } /* Return whether the given PCI device DMA address mask can be supported properly. */ static int alpha_pci_supported(struct device *dev, u64 mask) { struct pci_dev *pdev = alpha_gendev_to_pci(dev); struct pci_controller *hose; struct pci_iommu_arena *arena; /* If there exists a direct map, and the mask fits either the entire direct mapped space or the total system memory as shifted by the map base */ if (__direct_map_size != 0 && (__direct_map_base + __direct_map_size - 1 <= mask || __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask)) return 1; /* Check that we have a scatter-gather arena that fits. */ hose = pdev ? pdev->sysdata : pci_isa_hose; arena = hose->sg_isa; if (arena && arena->dma_base + arena->size - 1 <= mask) return 1; arena = hose->sg_pci; if (arena && arena->dma_base + arena->size - 1 <= mask) return 1; /* As last resort try ZONE_DMA. */ if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask) return 1; return 0; } /* * AGP GART extensions to the IOMMU */ int iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask) { unsigned long flags; unsigned long *ptes; long i, p; if (!arena) return -EINVAL; spin_lock_irqsave(&arena->lock, flags); /* Search for N empty ptes. */ ptes = arena->ptes; p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask); if (p < 0) { spin_unlock_irqrestore(&arena->lock, flags); return -1; } /* Success. Mark them all reserved (ie not zero and invalid) for the iommu tlb that could load them from under us. They will be filled in with valid bits by _bind() */ for (i = 0; i < pg_count; ++i) ptes[p+i] = IOMMU_RESERVED_PTE; arena->next_entry = p + pg_count; spin_unlock_irqrestore(&arena->lock, flags); return p; } int iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count) { unsigned long *ptes; long i; if (!arena) return -EINVAL; ptes = arena->ptes; /* Make sure they're all reserved first... */ for(i = pg_start; i < pg_start + pg_count; i++) if (ptes[i] != IOMMU_RESERVED_PTE) return -EBUSY; iommu_arena_free(arena, pg_start, pg_count); return 0; } int iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count, struct page **pages) { unsigned long flags; unsigned long *ptes; long i, j; if (!arena) return -EINVAL; spin_lock_irqsave(&arena->lock, flags); ptes = arena->ptes; for(j = pg_start; j < pg_start + pg_count; j++) { if (ptes[j] != IOMMU_RESERVED_PTE) { spin_unlock_irqrestore(&arena->lock, flags); return -EBUSY; } } for(i = 0, j = pg_start; i < pg_count; i++, j++) ptes[j] = mk_iommu_pte(page_to_phys(pages[i])); spin_unlock_irqrestore(&arena->lock, flags); return 0; } int iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count) { unsigned long *p; long i; if (!arena) return -EINVAL; p = arena->ptes + pg_start; for(i = 0; i < pg_count; i++) p[i] = IOMMU_RESERVED_PTE; return 0; } static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == 0; } static int alpha_pci_set_mask(struct device *dev, u64 mask) { if (!dev->dma_mask || !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) return -EIO; *dev->dma_mask = mask; return 0; } struct dma_map_ops alpha_pci_ops = { .alloc = alpha_pci_alloc_coherent, .free = alpha_pci_free_coherent, .map_page = alpha_pci_map_page, .unmap_page = alpha_pci_unmap_page, .map_sg = alpha_pci_map_sg, .unmap_sg = alpha_pci_unmap_sg, .mapping_error = alpha_pci_mapping_error, .dma_supported = alpha_pci_supported, .set_dma_mask = alpha_pci_set_mask, }; struct dma_map_ops *dma_ops = &alpha_pci_ops; EXPORT_SYMBOL(dma_ops);
gpl-2.0
blastagator/LGG2_Kernel
drivers/input/misc/keyspan_remote.c
4912
15395
/* * keyspan_remote: USB driver for the Keyspan DMR * * Copyright (C) 2005 Zymeta Corporation - Michael Downey (downey@zymeta.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * * This driver has been put together with the support of Innosys, Inc. * and Keyspan, Inc the manufacturers of the Keyspan USB DMR product. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb/input.h> #define DRIVER_VERSION "v0.1" #define DRIVER_AUTHOR "Michael Downey <downey@zymeta.com>" #define DRIVER_DESC "Driver for the USB Keyspan remote control." #define DRIVER_LICENSE "GPL" /* Parameters that can be passed to the driver. */ static int debug; module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "Enable extra debug messages and information"); /* Vendor and product ids */ #define USB_KEYSPAN_VENDOR_ID 0x06CD #define USB_KEYSPAN_PRODUCT_UIA11 0x0202 /* Defines for converting the data from the remote. */ #define ZERO 0x18 #define ZERO_MASK 0x1F /* 5 bits for a 0 */ #define ONE 0x3C #define ONE_MASK 0x3F /* 6 bits for a 1 */ #define SYNC 0x3F80 #define SYNC_MASK 0x3FFF /* 14 bits for a SYNC sequence */ #define STOP 0x00 #define STOP_MASK 0x1F /* 5 bits for the STOP sequence */ #define GAP 0xFF #define RECV_SIZE 8 /* The UIA-11 type have a 8 byte limit. */ /* * Table that maps the 31 possible keycodes to input keys. * Currently there are 15 and 17 button models so RESERVED codes * are blank areas in the mapping. */ static const unsigned short keyspan_key_table[] = { KEY_RESERVED, /* 0 is just a place holder. */ KEY_RESERVED, KEY_STOP, KEY_PLAYCD, KEY_RESERVED, KEY_PREVIOUSSONG, KEY_REWIND, KEY_FORWARD, KEY_NEXTSONG, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_PAUSE, KEY_VOLUMEUP, KEY_RESERVED, KEY_RESERVED, KEY_RESERVED, KEY_VOLUMEDOWN, KEY_RESERVED, KEY_UP, KEY_RESERVED, KEY_MUTE, KEY_LEFT, KEY_ENTER, KEY_RIGHT, KEY_RESERVED, KEY_RESERVED, KEY_DOWN, KEY_RESERVED, KEY_KPASTERISK, KEY_RESERVED, KEY_MENU }; /* table of devices that work with this driver */ static struct usb_device_id keyspan_table[] = { { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) }, { } /* Terminating entry */ }; /* Structure to store all the real stuff that a remote sends to us. */ struct keyspan_message { u16 system; u8 button; u8 toggle; }; /* Structure used for all the bit testing magic needed to be done. */ struct bit_tester { u32 tester; int len; int pos; int bits_left; u8 buffer[32]; }; /* Structure to hold all of our driver specific stuff */ struct usb_keyspan { char name[128]; char phys[64]; unsigned short keymap[ARRAY_SIZE(keyspan_key_table)]; struct usb_device *udev; struct input_dev *input; struct usb_interface *interface; struct usb_endpoint_descriptor *in_endpoint; struct urb* irq_urb; int open; dma_addr_t in_dma; unsigned char *in_buffer; /* variables used to parse messages from remote. */ struct bit_tester data; int stage; int toggle; }; static struct usb_driver keyspan_driver; /* * Debug routine that prints out what we've received from the remote. */ static void keyspan_print(struct usb_keyspan* dev) /*unsigned char* data)*/ { char codes[4 * RECV_SIZE]; int i; for (i = 0; i < RECV_SIZE; i++) snprintf(codes + i * 3, 4, "%02x ", dev->in_buffer[i]); dev_info(&dev->udev->dev, "%s\n", codes); } /* * Routine that manages the bit_tester structure. It makes sure that there are * at least bits_needed bits loaded into the tester. */ static int keyspan_load_tester(struct usb_keyspan* dev, int bits_needed) { if (dev->data.bits_left >= bits_needed) return 0; /* * Somehow we've missed the last message. The message will be repeated * though so it's not too big a deal */ if (dev->data.pos >= dev->data.len) { dev_dbg(&dev->udev->dev, "%s - Error ran out of data. pos: %d, len: %d\n", __func__, dev->data.pos, dev->data.len); return -1; } /* Load as much as we can into the tester. */ while ((dev->data.bits_left + 7 < (sizeof(dev->data.tester) * 8)) && (dev->data.pos < dev->data.len)) { dev->data.tester += (dev->data.buffer[dev->data.pos++] << dev->data.bits_left); dev->data.bits_left += 8; } return 0; } static void keyspan_report_button(struct usb_keyspan *remote, int button, int press) { struct input_dev *input = remote->input; input_event(input, EV_MSC, MSC_SCAN, button); input_report_key(input, remote->keymap[button], press); input_sync(input); } /* * Routine that handles all the logic needed to parse out the message from the remote. */ static void keyspan_check_data(struct usb_keyspan *remote) { int i; int found = 0; struct keyspan_message message; switch(remote->stage) { case 0: /* * In stage 0 we want to find the start of a message. The remote sends a 0xFF as filler. * So the first byte that isn't a FF should be the start of a new message. */ for (i = 0; i < RECV_SIZE && remote->in_buffer[i] == GAP; ++i); if (i < RECV_SIZE) { memcpy(remote->data.buffer, remote->in_buffer, RECV_SIZE); remote->data.len = RECV_SIZE; remote->data.pos = 0; remote->data.tester = 0; remote->data.bits_left = 0; remote->stage = 1; } break; case 1: /* * Stage 1 we should have 16 bytes and should be able to detect a * SYNC. The SYNC is 14 bits, 7 0's and then 7 1's. */ memcpy(remote->data.buffer + remote->data.len, remote->in_buffer, RECV_SIZE); remote->data.len += RECV_SIZE; found = 0; while ((remote->data.bits_left >= 14 || remote->data.pos < remote->data.len) && !found) { for (i = 0; i < 8; ++i) { if (keyspan_load_tester(remote, 14) != 0) { remote->stage = 0; return; } if ((remote->data.tester & SYNC_MASK) == SYNC) { remote->data.tester = remote->data.tester >> 14; remote->data.bits_left -= 14; found = 1; break; } else { remote->data.tester = remote->data.tester >> 1; --remote->data.bits_left; } } } if (!found) { remote->stage = 0; remote->data.len = 0; } else { remote->stage = 2; } break; case 2: /* * Stage 2 we should have 24 bytes which will be enough for a full * message. We need to parse out the system code, button code, * toggle code, and stop. */ memcpy(remote->data.buffer + remote->data.len, remote->in_buffer, RECV_SIZE); remote->data.len += RECV_SIZE; message.system = 0; for (i = 0; i < 9; i++) { keyspan_load_tester(remote, 6); if ((remote->data.tester & ZERO_MASK) == ZERO) { message.system = message.system << 1; remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else if ((remote->data.tester & ONE_MASK) == ONE) { message.system = (message.system << 1) + 1; remote->data.tester = remote->data.tester >> 6; remote->data.bits_left -= 6; } else { err("%s - Unknown sequence found in system data.\n", __func__); remote->stage = 0; return; } } message.button = 0; for (i = 0; i < 5; i++) { keyspan_load_tester(remote, 6); if ((remote->data.tester & ZERO_MASK) == ZERO) { message.button = message.button << 1; remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else if ((remote->data.tester & ONE_MASK) == ONE) { message.button = (message.button << 1) + 1; remote->data.tester = remote->data.tester >> 6; remote->data.bits_left -= 6; } else { err("%s - Unknown sequence found in button data.\n", __func__); remote->stage = 0; return; } } keyspan_load_tester(remote, 6); if ((remote->data.tester & ZERO_MASK) == ZERO) { message.toggle = 0; remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else if ((remote->data.tester & ONE_MASK) == ONE) { message.toggle = 1; remote->data.tester = remote->data.tester >> 6; remote->data.bits_left -= 6; } else { err("%s - Error in message, invalid toggle.\n", __func__); remote->stage = 0; return; } keyspan_load_tester(remote, 5); if ((remote->data.tester & STOP_MASK) == STOP) { remote->data.tester = remote->data.tester >> 5; remote->data.bits_left -= 5; } else { err("Bad message received, no stop bit found.\n"); } dev_dbg(&remote->udev->dev, "%s found valid message: system: %d, button: %d, toggle: %d\n", __func__, message.system, message.button, message.toggle); if (message.toggle != remote->toggle) { keyspan_report_button(remote, message.button, 1); keyspan_report_button(remote, message.button, 0); remote->toggle = message.toggle; } remote->stage = 0; break; } } /* * Routine for sending all the initialization messages to the remote. */ static int keyspan_setup(struct usb_device* dev) { int retval = 0; retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0); if (retval) { dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n", __func__, retval); return(retval); } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x44, 0x40, 0x0, 0x0, NULL, 0, 0); if (retval) { dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n", __func__, retval); return(retval); } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0x22, 0x40, 0x0, 0x0, NULL, 0, 0); if (retval) { dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n", __func__, retval); return(retval); } dev_dbg(&dev->dev, "%s - Setup complete.\n", __func__); return(retval); } /* * Routine used to handle a new message that has come in. */ static void keyspan_irq_recv(struct urb *urb) { struct usb_keyspan *dev = urb->context; int retval; /* Check our status in case we need to bail out early. */ switch (urb->status) { case 0: break; /* Device went away so don't keep trying to read from it. */ case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: goto resubmit; break; } if (debug) keyspan_print(dev); keyspan_check_data(dev); resubmit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) err ("%s - usb_submit_urb failed with result: %d", __func__, retval); } static int keyspan_open(struct input_dev *dev) { struct usb_keyspan *remote = input_get_drvdata(dev); remote->irq_urb->dev = remote->udev; if (usb_submit_urb(remote->irq_urb, GFP_KERNEL)) return -EIO; return 0; } static void keyspan_close(struct input_dev *dev) { struct usb_keyspan *remote = input_get_drvdata(dev); usb_kill_urb(remote->irq_urb); } static struct usb_endpoint_descriptor *keyspan_get_in_endpoint(struct usb_host_interface *iface) { struct usb_endpoint_descriptor *endpoint; int i; for (i = 0; i < iface->desc.bNumEndpoints; ++i) { endpoint = &iface->endpoint[i].desc; if (usb_endpoint_is_int_in(endpoint)) { /* we found our interrupt in endpoint */ return endpoint; } } return NULL; } /* * Routine that sets up the driver to handle a specific USB device detected on the bus. */ static int keyspan_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_endpoint_descriptor *endpoint; struct usb_keyspan *remote; struct input_dev *input_dev; int i, error; endpoint = keyspan_get_in_endpoint(interface->cur_altsetting); if (!endpoint) return -ENODEV; remote = kzalloc(sizeof(*remote), GFP_KERNEL); input_dev = input_allocate_device(); if (!remote || !input_dev) { error = -ENOMEM; goto fail1; } remote->udev = udev; remote->input = input_dev; remote->interface = interface; remote->in_endpoint = endpoint; remote->toggle = -1; /* Set to -1 so we will always not match the toggle from the first remote message. */ remote->in_buffer = usb_alloc_coherent(udev, RECV_SIZE, GFP_ATOMIC, &remote->in_dma); if (!remote->in_buffer) { error = -ENOMEM; goto fail1; } remote->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if (!remote->irq_urb) { error = -ENOMEM; goto fail2; } error = keyspan_setup(udev); if (error) { error = -ENODEV; goto fail3; } if (udev->manufacturer) strlcpy(remote->name, udev->manufacturer, sizeof(remote->name)); if (udev->product) { if (udev->manufacturer) strlcat(remote->name, " ", sizeof(remote->name)); strlcat(remote->name, udev->product, sizeof(remote->name)); } if (!strlen(remote->name)) snprintf(remote->name, sizeof(remote->name), "USB Keyspan Remote %04x:%04x", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); usb_make_path(udev, remote->phys, sizeof(remote->phys)); strlcat(remote->phys, "/input0", sizeof(remote->phys)); memcpy(remote->keymap, keyspan_key_table, sizeof(remote->keymap)); input_dev->name = remote->name; input_dev->phys = remote->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &interface->dev; input_dev->keycode = remote->keymap; input_dev->keycodesize = sizeof(unsigned short); input_dev->keycodemax = ARRAY_SIZE(remote->keymap); input_set_capability(input_dev, EV_MSC, MSC_SCAN); __set_bit(EV_KEY, input_dev->evbit); for (i = 0; i < ARRAY_SIZE(keyspan_key_table); i++) __set_bit(keyspan_key_table[i], input_dev->keybit); __clear_bit(KEY_RESERVED, input_dev->keybit); input_set_drvdata(input_dev, remote); input_dev->open = keyspan_open; input_dev->close = keyspan_close; /* * Initialize the URB to access the device. * The urb gets sent to the device in keyspan_open() */ usb_fill_int_urb(remote->irq_urb, remote->udev, usb_rcvintpipe(remote->udev, endpoint->bEndpointAddress), remote->in_buffer, RECV_SIZE, keyspan_irq_recv, remote, endpoint->bInterval); remote->irq_urb->transfer_dma = remote->in_dma; remote->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* we can register the device now, as it is ready */ error = input_register_device(remote->input); if (error) goto fail3; /* save our data pointer in this interface device */ usb_set_intfdata(interface, remote); return 0; fail3: usb_free_urb(remote->irq_urb); fail2: usb_free_coherent(udev, RECV_SIZE, remote->in_buffer, remote->in_dma); fail1: kfree(remote); input_free_device(input_dev); return error; } /* * Routine called when a device is disconnected from the USB. */ static void keyspan_disconnect(struct usb_interface *interface) { struct usb_keyspan *remote; remote = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); if (remote) { /* We have a valid driver structure so clean up everything we allocated. */ input_unregister_device(remote->input); usb_kill_urb(remote->irq_urb); usb_free_urb(remote->irq_urb); usb_free_coherent(remote->udev, RECV_SIZE, remote->in_buffer, remote->in_dma); kfree(remote); } } /* * Standard driver set up sections */ static struct usb_driver keyspan_driver = { .name = "keyspan_remote", .probe = keyspan_probe, .disconnect = keyspan_disconnect, .id_table = keyspan_table }; module_usb_driver(keyspan_driver); MODULE_DEVICE_TABLE(usb, keyspan_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE);
gpl-2.0
classicsong/Range-Lock
drivers/staging/comedi/drivers/ni_at_ao.c
8240
12105
/* comedi/drivers/ni_at_ao.c Driver for NI AT-AO-6/10 boards COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000,2002 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: ni_at_ao Description: National Instruments AT-AO-6/10 Devices: [National Instruments] AT-AO-6 (at-ao-6), AT-AO-10 (at-ao-10) Status: should work Author: ds Updated: Sun Dec 26 12:26:28 EST 2004 Configuration options: [0] - I/O port base address [1] - IRQ (unused) [2] - DMA (unused) [3] - analog output range, set by jumpers on hardware (0 for -10 to 10V bipolar, 1 for 0V to 10V unipolar) */ /* * Register-level programming information can be found in NI * document 320379.pdf. */ #include "../comedidev.h" #include <linux/ioport.h> /* board egisters */ /* registers with _2_ are accessed when GRP2WR is set in CFG1 */ #define ATAO_SIZE 0x20 #define ATAO_2_DMATCCLR 0x00 /* W 16 */ #define ATAO_DIN 0x00 /* R 16 */ #define ATAO_DOUT 0x00 /* W 16 */ #define ATAO_CFG2 0x02 /* W 16 */ #define CALLD1 0x8000 #define CALLD0 0x4000 #define FFRTEN 0x2000 #define DAC2S8 0x1000 #define DAC2S6 0x0800 #define DAC2S4 0x0400 #define DAC2S2 0x0200 #define DAC2S0 0x0100 #define LDAC8 0x0080 #define LDAC6 0x0040 #define LDAC4 0x0020 #define LDAC2 0x0010 #define LDAC0 0x0008 #define PROMEN 0x0004 #define SCLK 0x0002 #define SDATA 0x0001 #define ATAO_2_INT1CLR 0x02 /* W 16 */ #define ATAO_CFG3 0x04 /* W 16 */ #define DMAMODE 0x0040 #define CLKOUT 0x0020 #define RCLKEN 0x0010 #define DOUTEN2 0x0008 #define DOUTEN1 0x0004 #define EN2_5V 0x0002 #define SCANEN 0x0001 #define ATAO_2_INT2CLR 0x04 /* W 16 */ #define ATAO_82C53_BASE 0x06 /* RW 8 */ #define ATAO_82C53_CNTR1 0x06 /* RW 8 */ #define ATAO_82C53_CNTR2 0x07 /* RW 8 */ #define ATAO_82C53_CNTR3 0x08 /* RW 8 */ #define ATAO_82C53_CNTRCMD 0x09 /* W 8 */ #define CNTRSEL1 0x80 #define CNTRSEL0 0x40 #define RWSEL1 0x20 #define RWSEL0 0x10 #define MODESEL2 0x08 #define MODESEL1 0x04 #define MODESEL0 0x02 #define BCDSEL 0x01 /* read-back command */ #define COUNT 0x20 #define STATUS 0x10 #define CNTR3 0x08 #define CNTR2 0x04 #define CNTR1 0x02 /* status */ #define OUT 0x80 #define _NULL 0x40 #define RW1 0x20 #define RW0 0x10 #define MODE2 0x08 #define MODE1 0x04 #define MODE0 0x02 #define BCD 0x01 #define ATAO_2_RTSISHFT 0x06 /* W 8 */ #define RSI 0x01 #define ATAO_2_RTSISTRB 0x07 /* W 8 */ #define ATAO_CFG1 0x0a /* W 16 */ #define EXTINT2EN 0x8000 #define EXTINT1EN 0x4000 #define CNTINT2EN 0x2000 #define CNTINT1EN 0x1000 #define TCINTEN 0x0800 #define CNT1SRC 0x0400 #define CNT2SRC 0x0200 #define FIFOEN 0x0100 #define GRP2WR 0x0080 #define EXTUPDEN 0x0040 #define DMARQ 0x0020 #define DMAEN 0x0010 #define CH_mask 0x000f #define ATAO_STATUS 0x0a /* R 16 */ #define FH 0x0040 #define FE 0x0020 #define FF 0x0010 #define INT2 0x0008 #define INT1 0x0004 #define TCINT 0x0002 #define PROMOUT 0x0001 #define ATAO_FIFO_WRITE 0x0c /* W 16 */ #define ATAO_FIFO_CLEAR 0x0c /* R 16 */ #define ATAO_DACn(x) (0x0c + 2*(x)) /* W */ /* * Board descriptions for two imaginary boards. Describing the * boards in this way is optional, and completely driver-dependent. * Some drivers use arrays such as this, other do not. */ struct atao_board { const char *name; int n_ao_chans; }; static const struct atao_board atao_boards[] = { { .name = "ai-ao-6", .n_ao_chans = 6, }, { .name = "ai-ao-10", .n_ao_chans = 10, }, }; #define thisboard ((struct atao_board *)dev->board_ptr) struct atao_private { unsigned short cfg1; unsigned short cfg2; unsigned short cfg3; /* Used for AO readback */ unsigned int ao_readback[10]; }; #define devpriv ((struct atao_private *)dev->private) static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int atao_detach(struct comedi_device *dev); static struct comedi_driver driver_atao = { .driver_name = "ni_at_ao", .module = THIS_MODULE, .attach = atao_attach, .detach = atao_detach, .board_name = &atao_boards[0].name, .offset = sizeof(struct atao_board), .num_names = ARRAY_SIZE(atao_boards), }; static int __init driver_atao_init_module(void) { return comedi_driver_register(&driver_atao); } static void __exit driver_atao_cleanup_module(void) { comedi_driver_unregister(&driver_atao); } module_init(driver_atao_init_module); module_exit(driver_atao_cleanup_module); static void atao_reset(struct comedi_device *dev); static int atao_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_calib_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_calib_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase; int ao_unipolar; iobase = it->options[0]; if (iobase == 0) iobase = 0x1c0; ao_unipolar = it->options[3]; printk(KERN_INFO "comedi%d: ni_at_ao: 0x%04lx", dev->minor, iobase); if (!request_region(iobase, ATAO_SIZE, "ni_at_ao")) { printk(" I/O port conflict\n"); return -EIO; } dev->iobase = iobase; /* dev->board_ptr = atao_probe(dev); */ dev->board_name = thisboard->name; if (alloc_private(dev, sizeof(struct atao_private)) < 0) return -ENOMEM; if (alloc_subdevices(dev, 4) < 0) return -ENOMEM; s = dev->subdevices + 0; /* analog output subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = thisboard->n_ao_chans; s->maxdata = (1 << 12) - 1; if (ao_unipolar) s->range_table = &range_unipolar10; else s->range_table = &range_bipolar10; s->insn_write = &atao_ao_winsn; s->insn_read = &atao_ao_rinsn; s = dev->subdevices + 1; /* digital i/o subdevice */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = atao_dio_insn_bits; s->insn_config = atao_dio_insn_config; s = dev->subdevices + 2; /* caldac subdevice */ s->type = COMEDI_SUBD_CALIB; s->subdev_flags = SDF_WRITABLE | SDF_INTERNAL; s->n_chan = 21; s->maxdata = 0xff; s->insn_read = atao_calib_insn_read; s->insn_write = atao_calib_insn_write; s = dev->subdevices + 3; /* eeprom subdevice */ /* s->type=COMEDI_SUBD_EEPROM; */ s->type = COMEDI_SUBD_UNUSED; atao_reset(dev); printk(KERN_INFO "\n"); return 0; } static int atao_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: atao: remove\n", dev->minor); if (dev->iobase) release_region(dev->iobase, ATAO_SIZE); return 0; } static void atao_reset(struct comedi_device *dev) { /* This is the reset sequence described in the manual */ devpriv->cfg1 = 0; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); outb(RWSEL0 | MODESEL2, dev->iobase + ATAO_82C53_CNTRCMD); outb(0x03, dev->iobase + ATAO_82C53_CNTR1); outb(CNTRSEL0 | RWSEL0 | MODESEL2, dev->iobase + ATAO_82C53_CNTRCMD); devpriv->cfg2 = 0; outw(devpriv->cfg2, dev->iobase + ATAO_CFG2); devpriv->cfg3 = 0; outw(devpriv->cfg3, dev->iobase + ATAO_CFG3); inw(dev->iobase + ATAO_FIFO_CLEAR); devpriv->cfg1 |= GRP2WR; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); outw(0, dev->iobase + ATAO_2_INT1CLR); outw(0, dev->iobase + ATAO_2_INT2CLR); outw(0, dev->iobase + ATAO_2_DMATCCLR); devpriv->cfg1 &= ~GRP2WR; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); } static int atao_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); short bits; for (i = 0; i < insn->n; i++) { bits = data[i] - 0x800; if (chan == 0) { devpriv->cfg1 |= GRP2WR; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); } outw(bits, dev->iobase + ATAO_DACn(chan)); if (chan == 0) { devpriv->cfg1 &= ~GRP2WR; outw(devpriv->cfg1, dev->iobase + ATAO_CFG1); } devpriv->ao_readback[chan] = data[i]; } return i; } static int atao_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_readback[chan]; return i; } static int atao_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { if (insn->n != 2) return -EINVAL; if (data[0]) { s->state &= ~data[0]; s->state |= data[0] & data[1]; outw(s->state, dev->iobase + ATAO_DOUT); } data[1] = inw(dev->iobase + ATAO_DIN); return 2; } static int atao_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); unsigned int mask, bit; /* The input or output configuration of each digital line is * configured by a special insn_config instruction. chanspec * contains the channel to be changed, and data[0] contains the * value COMEDI_INPUT or COMEDI_OUTPUT. */ mask = (chan < 4) ? 0x0f : 0xf0; bit = (chan < 4) ? DOUTEN1 : DOUTEN2; switch (data[0]) { case INSN_CONFIG_DIO_OUTPUT: s->io_bits |= mask; devpriv->cfg3 |= bit; break; case INSN_CONFIG_DIO_INPUT: s->io_bits &= ~mask; devpriv->cfg3 &= ~bit; break; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; break; default: return -EINVAL; break; } outw(devpriv->cfg3, dev->iobase + ATAO_CFG3); return 1; } /* * Figure 2-1 in the manual shows 3 chips labeled DAC8800, which * are 8-channel 8-bit DACs. These are most likely the calibration * DACs. It is not explicitly stated in the manual how to access * the caldacs, but we can guess. */ static int atao_calib_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; for (i = 0; i < insn->n; i++) data[i] = 0; /* XXX */ return insn->n; } static int atao_calib_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int bitstring, bit; unsigned int chan = CR_CHAN(insn->chanspec); bitstring = ((chan & 0x7) << 8) | (data[insn->n - 1] & 0xff); for (bit = 1 << (11 - 1); bit; bit >>= 1) { outw(devpriv->cfg2 | ((bit & bitstring) ? SDATA : 0), dev->iobase + ATAO_CFG2); outw(devpriv->cfg2 | SCLK | ((bit & bitstring) ? SDATA : 0), dev->iobase + ATAO_CFG2); } /* strobe the appropriate caldac */ outw(devpriv->cfg2 | (((chan >> 3) + 1) << 14), dev->iobase + ATAO_CFG2); outw(devpriv->cfg2, dev->iobase + ATAO_CFG2); return insn->n; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
linuzo/stock_kernel_lge_d852
drivers/staging/comedi/drivers/dt2814.c
8240
9120
/* comedi/drivers/dt2814.c Hardware driver for Data Translation DT2814 COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1998 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: dt2814 Description: Data Translation DT2814 Author: ds Status: complete Devices: [Data Translation] DT2814 (dt2814) Configuration options: [0] - I/O port base address [1] - IRQ This card has 16 analog inputs multiplexed onto a 12 bit ADC. There is a minimally useful onboard clock. The base frequency for the clock is selected by jumpers, and the clock divider can be selected via programmed I/O. Unfortunately, the clock divider can only be a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In addition, the clock does not seem to be very accurate. */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #include <linux/delay.h> #define DT2814_SIZE 2 #define DT2814_CSR 0 #define DT2814_DATA 1 /* * flags */ #define DT2814_FINISH 0x80 #define DT2814_ERR 0x40 #define DT2814_BUSY 0x20 #define DT2814_ENB 0x10 #define DT2814_CHANMASK 0x0f static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dt2814_detach(struct comedi_device *dev); static struct comedi_driver driver_dt2814 = { .driver_name = "dt2814", .module = THIS_MODULE, .attach = dt2814_attach, .detach = dt2814_detach, }; static int __init driver_dt2814_init_module(void) { return comedi_driver_register(&driver_dt2814); } static void __exit driver_dt2814_cleanup_module(void) { comedi_driver_unregister(&driver_dt2814); } module_init(driver_dt2814_init_module); module_exit(driver_dt2814_cleanup_module); static irqreturn_t dt2814_interrupt(int irq, void *dev); struct dt2814_private { int ntrig; int curadchan; }; #define devpriv ((struct dt2814_private *)dev->private) #define DT2814_TIMEOUT 10 #define DT2814_MAX_SPEED 100000 /* Arbitrary 10 khz limit */ static int dt2814_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, i, hi, lo; int chan; int status = 0; for (n = 0; n < insn->n; n++) { chan = CR_CHAN(insn->chanspec); outb(chan, dev->iobase + DT2814_CSR); for (i = 0; i < DT2814_TIMEOUT; i++) { status = inb(dev->iobase + DT2814_CSR); printk(KERN_INFO "dt2814: status: %02x\n", status); udelay(10); if (status & DT2814_FINISH) break; } if (i >= DT2814_TIMEOUT) { printk(KERN_INFO "dt2814: status: %02x\n", status); return -ETIMEDOUT; } hi = inb(dev->iobase + DT2814_DATA); lo = inb(dev->iobase + DT2814_DATA); data[n] = (hi << 4) | (lo >> 4); } return n; } static int dt2814_ns_to_timer(unsigned int *ns, unsigned int flags) { int i; unsigned int f; /* XXX ignores flags */ f = 10000; /* ns */ for (i = 0; i < 8; i++) { if ((2 * (*ns)) < (f * 11)) break; f *= 10; } *ns = f; return i; } static int dt2814_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_NOW; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are * unique and mutually compatible */ /* note that mutual compatibility is not an issue here */ if (cmd->stop_src != TRIG_TIMER && cmd->stop_src != TRIG_EXT) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->scan_begin_arg > 1000000000) { cmd->scan_begin_arg = 1000000000; err++; } if (cmd->scan_begin_arg < DT2814_MAX_SPEED) { cmd->scan_begin_arg = DT2814_MAX_SPEED; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { if (cmd->stop_arg < 2) { cmd->stop_arg = 2; err++; } } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ tmp = cmd->scan_begin_arg; dt2814_ns_to_timer(&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; if (err) return 4; return 0; } static int dt2814_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; int chan; int trigvar; trigvar = dt2814_ns_to_timer(&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); chan = CR_CHAN(cmd->chanlist[0]); devpriv->ntrig = cmd->stop_arg; outb(chan | DT2814_ENB | (trigvar << 5), dev->iobase + DT2814_CSR); return 0; } static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int i, irq; int ret; struct comedi_subdevice *s; unsigned long iobase; iobase = it->options[0]; printk(KERN_INFO "comedi%d: dt2814: 0x%04lx ", dev->minor, iobase); if (!request_region(iobase, DT2814_SIZE, "dt2814")) { printk(KERN_ERR "I/O port conflict\n"); return -EIO; } dev->iobase = iobase; dev->board_name = "dt2814"; outb(0, dev->iobase + DT2814_CSR); udelay(100); if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) { printk(KERN_ERR "reset error (fatal)\n"); return -EIO; } i = inb(dev->iobase + DT2814_DATA); i = inb(dev->iobase + DT2814_DATA); irq = it->options[1]; #if 0 if (irq < 0) { save_flags(flags); sti(); irqs = probe_irq_on(); outb(0, dev->iobase + DT2814_CSR); udelay(100); irq = probe_irq_off(irqs); restore_flags(flags); if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) printk(KERN_DEBUG "error probing irq (bad)\n"); i = inb(dev->iobase + DT2814_DATA); i = inb(dev->iobase + DT2814_DATA); } #endif dev->irq = 0; if (irq > 0) { if (request_irq(irq, dt2814_interrupt, 0, "dt2814", dev)) { printk(KERN_WARNING "(irq %d unavailable)\n", irq); } else { printk(KERN_INFO "( irq = %d )\n", irq); dev->irq = irq; } } else if (irq == 0) { printk(KERN_WARNING "(no irq)\n"); } else { #if 0 printk(KERN_DEBUG "(probe returned multiple irqs--bad)\n"); #else printk(KERN_WARNING "(irq probe not implemented)\n"); #endif } ret = alloc_subdevices(dev, 1); if (ret < 0) return ret; ret = alloc_private(dev, sizeof(struct dt2814_private)); if (ret < 0) return ret; s = dev->subdevices + 0; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; s->n_chan = 16; /* XXX */ s->len_chanlist = 1; s->insn_read = dt2814_ai_insn_read; s->do_cmd = dt2814_ai_cmd; s->do_cmdtest = dt2814_ai_cmdtest; s->maxdata = 0xfff; s->range_table = &range_unknown; /* XXX */ return 0; } static int dt2814_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: dt2814: remove\n", dev->minor); if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) release_region(dev->iobase, DT2814_SIZE); return 0; } static irqreturn_t dt2814_interrupt(int irq, void *d) { int lo, hi; struct comedi_device *dev = d; struct comedi_subdevice *s; int data; if (!dev->attached) { comedi_error(dev, "spurious interrupt"); return IRQ_HANDLED; } s = dev->subdevices + 0; hi = inb(dev->iobase + DT2814_DATA); lo = inb(dev->iobase + DT2814_DATA); data = (hi << 4) | (lo >> 4); if (!(--devpriv->ntrig)) { int i; outb(0, dev->iobase + DT2814_CSR); /* note: turning off timed mode triggers another sample. */ for (i = 0; i < DT2814_TIMEOUT; i++) { if (inb(dev->iobase + DT2814_CSR) & DT2814_FINISH) break; } inb(dev->iobase + DT2814_DATA); inb(dev->iobase + DT2814_DATA); s->async->events |= COMEDI_CB_EOA; } comedi_event(dev, s); return IRQ_HANDLED; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
mmukadam/linuxv3.12
drivers/staging/rtl8712/rtl871x_security.c
49
45313
/****************************************************************************** * rtl871x_security.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL871X_SECURITY_C_ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kref.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/circ_buf.h> #include <linux/uaccess.h> #include <asm/byteorder.h> #include <linux/atomic.h> #include <linux/semaphore.h> #include "osdep_service.h" #include "drv_types.h" #include "wifi.h" #include "osdep_intf.h" /* =====WEP related===== */ #define CRC32_POLY 0x04c11db7 struct arc4context { u32 x; u32 y; u8 state[256]; }; static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len) { u32 t, u; u32 keyindex; u32 stateindex; u8 *state; u32 counter; state = parc4ctx->state; parc4ctx->x = 0; parc4ctx->y = 0; for (counter = 0; counter < 256; counter++) state[counter] = (u8)counter; keyindex = 0; stateindex = 0; for (counter = 0; counter < 256; counter++) { t = state[counter]; stateindex = (stateindex + key[keyindex] + t) & 0xff; u = state[stateindex]; state[stateindex] = (u8)t; state[counter] = (u8)u; if (++keyindex >= key_len) keyindex = 0; } } static u32 arcfour_byte(struct arc4context *parc4ctx) { u32 x; u32 y; u32 sx, sy; u8 *state; state = parc4ctx->state; x = (parc4ctx->x + 1) & 0xff; sx = state[x]; y = (sx + parc4ctx->y) & 0xff; sy = state[y]; parc4ctx->x = x; parc4ctx->y = y; state[y] = (u8)sx; state[x] = (u8)sy; return state[(sx + sy) & 0xff]; } static void arcfour_encrypt(struct arc4context *parc4ctx, u8 *dest, u8 *src, u32 len) { u32 i; for (i = 0; i < len; i++) dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx); } static sint bcrc32initialized; static u32 crc32_table[256]; static u8 crc32_reverseBit(u8 data) { return ((u8)(data << 7) & 0x80) | ((data << 5) & 0x40) | ((data << 3) & 0x20) | ((data << 1) & 0x10) | ((data >> 1) & 0x08) | ((data >> 3) & 0x04) | ((data >> 5) & 0x02) | ((data >> 7) & 0x01); } static void crc32_init(void) { if (bcrc32initialized == 1) return; else { sint i, j; u32 c; u8 *p = (u8 *)&c, *p1; u8 k; c = 0x12340000; for (i = 0; i < 256; ++i) { k = crc32_reverseBit((u8)i); for (c = ((u32)k) << 24, j = 8; j > 0; --j) c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1); p1 = (u8 *)&crc32_table[i]; p1[0] = crc32_reverseBit(p[3]); p1[1] = crc32_reverseBit(p[2]); p1[2] = crc32_reverseBit(p[1]); p1[3] = crc32_reverseBit(p[0]); } bcrc32initialized = 1; } } static u32 getcrc32(u8 *buf, u32 len) { u8 *p; u32 crc; if (bcrc32initialized == 0) crc32_init(); crc = 0xffffffff; /* preload shift register, per CRC-32 spec */ for (p = buf; len > 0; ++p, --len) crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8); return ~crc; /* transmit complement, per CRC-32 spec */ } /* Need to consider the fragment situation */ void r8712_wep_encrypt(struct _adapter *padapter, u8 *pxmitframe) { /* exclude ICV */ unsigned char crc[4]; struct arc4context mycontext; u32 curfragnum, length, keylength; u8 *pframe, *payload, *iv; /*,*wepkey*/ u8 wepkey[16]; struct pkt_attrib *pattrib = &((struct xmit_frame *) pxmitframe)->attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL) return; pframe = ((struct xmit_frame *)pxmitframe)->buf_addr+TXDESC_OFFSET; /*start to encrypt each fragment*/ if ((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) { keylength = psecuritypriv->DefKeylen[psecuritypriv-> PrivacyKeyIndex]; for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) { iv = pframe+pattrib->hdrlen; memcpy(&wepkey[0], iv, 3); memcpy(&wepkey[3], &psecuritypriv->DefKey[ psecuritypriv->PrivacyKeyIndex].skey[0], keylength); payload = pframe+pattrib->iv_len+pattrib->hdrlen; if ((curfragnum + 1) == pattrib->nr_frags) { length = pattrib->last_txcmdsz-pattrib-> hdrlen-pattrib->iv_len - pattrib->icv_len; *((u32 *)crc) = cpu_to_le32(getcrc32( payload, length)); arcfour_init(&mycontext, wepkey, 3 + keylength); arcfour_encrypt(&mycontext, payload, payload, length); arcfour_encrypt(&mycontext, payload + length, crc, 4); } else { length = pxmitpriv->frag_len-pattrib->hdrlen - pattrib->iv_len-pattrib->icv_len; *((u32 *)crc) = cpu_to_le32(getcrc32( payload, length)); arcfour_init(&mycontext, wepkey, 3 + keylength); arcfour_encrypt(&mycontext, payload, payload, length); arcfour_encrypt(&mycontext, payload+length, crc, 4); pframe += pxmitpriv->frag_len; pframe = (u8 *)RND4((addr_t)(pframe)); } } } } void r8712_wep_decrypt(struct _adapter *padapter, u8 *precvframe) { /* exclude ICV */ u8 crc[4]; struct arc4context mycontext; u32 length, keylength; u8 *pframe, *payload, *iv, wepkey[16]; u8 keyindex; struct rx_pkt_attrib *prxattrib = &(((union recv_frame *) precvframe)->u.hdr.attrib); struct security_priv *psecuritypriv = &padapter->securitypriv; pframe = (unsigned char *)((union recv_frame *)precvframe)-> u.hdr.rx_data; /* start to decrypt recvframe */ if ((prxattrib->encrypt == _WEP40_) || (prxattrib->encrypt == _WEP104_)) { iv = pframe + prxattrib->hdrlen; keyindex = (iv[3] & 0x3); keylength = psecuritypriv->DefKeylen[keyindex]; memcpy(&wepkey[0], iv, 3); memcpy(&wepkey[3], &psecuritypriv->DefKey[ psecuritypriv->PrivacyKeyIndex].skey[0], keylength); length = ((union recv_frame *)precvframe)-> u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len; payload = pframe+prxattrib->iv_len+prxattrib->hdrlen; /* decrypt payload include icv */ arcfour_init(&mycontext, wepkey, 3 + keylength); arcfour_encrypt(&mycontext, payload, payload, length); /* calculate icv and compare the icv */ *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length - 4)); } return; } /* 3 =====TKIP related===== */ static u32 secmicgetuint32(u8 *p) /* Convert from Byte[] to Us4Byte32 in a portable way */ { s32 i; u32 res = 0; for (i = 0; i < 4; i++) res |= ((u32)(*p++)) << (8 * i); return res; } static void secmicputuint32(u8 *p, u32 val) /* Convert from Us4Byte32 to Byte[] in a portable way */ { long i; for (i = 0; i < 4; i++) { *p++ = (u8) (val & 0xff); val >>= 8; } } static void secmicclear(struct mic_data *pmicdata) { /* Reset the state to the empty message. */ pmicdata->L = pmicdata->K0; pmicdata->R = pmicdata->K1; pmicdata->nBytesInM = 0; pmicdata->M = 0; } void r8712_secmicsetkey(struct mic_data *pmicdata, u8 *key) { /* Set the key */ pmicdata->K0 = secmicgetuint32(key); pmicdata->K1 = secmicgetuint32(key + 4); /* and reset the message */ secmicclear(pmicdata); } static void secmicappendbyte(struct mic_data *pmicdata, u8 b) { /* Append the byte to our word-sized buffer */ pmicdata->M |= ((u32)b) << (8 * pmicdata->nBytesInM); pmicdata->nBytesInM++; /* Process the word if it is full. */ if (pmicdata->nBytesInM >= 4) { pmicdata->L ^= pmicdata->M; pmicdata->R ^= ROL32(pmicdata->L, 17); pmicdata->L += pmicdata->R; pmicdata->R ^= ((pmicdata->L & 0xff00ff00) >> 8) | ((pmicdata->L & 0x00ff00ff) << 8); pmicdata->L += pmicdata->R; pmicdata->R ^= ROL32(pmicdata->L, 3); pmicdata->L += pmicdata->R; pmicdata->R ^= ROR32(pmicdata->L, 2); pmicdata->L += pmicdata->R; /* Clear the buffer */ pmicdata->M = 0; pmicdata->nBytesInM = 0; } } void r8712_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nbytes) { /* This is simple */ while (nbytes > 0) { secmicappendbyte(pmicdata, *src++); nbytes--; } } void r8712_secgetmic(struct mic_data *pmicdata, u8 *dst) { /* Append the minimum padding */ secmicappendbyte(pmicdata, 0x5a); secmicappendbyte(pmicdata, 0); secmicappendbyte(pmicdata, 0); secmicappendbyte(pmicdata, 0); secmicappendbyte(pmicdata, 0); /* and then zeroes until the length is a multiple of 4 */ while (pmicdata->nBytesInM != 0) secmicappendbyte(pmicdata, 0); /* The appendByte function has already computed the result. */ secmicputuint32(dst, pmicdata->L); secmicputuint32(dst + 4, pmicdata->R); /* Reset to the empty message. */ secmicclear(pmicdata); } void seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri) { struct mic_data micdata; u8 priority[4] = {0x0, 0x0, 0x0, 0x0}; r8712_secmicsetkey(&micdata, key); priority[0] = pri; /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */ if (header[1] & 1) { /* ToDS==1 */ r8712_secmicappend(&micdata, &header[16], 6); /* DA */ if (header[1] & 2) /* From Ds==1 */ r8712_secmicappend(&micdata, &header[24], 6); else r8712_secmicappend(&micdata, &header[10], 6); } else { /* ToDS==0 */ r8712_secmicappend(&micdata, &header[4], 6); /* DA */ if (header[1] & 2) /* From Ds==1 */ r8712_secmicappend(&micdata, &header[16], 6); else r8712_secmicappend(&micdata, &header[10], 6); } r8712_secmicappend(&micdata, &priority[0], 4); r8712_secmicappend(&micdata, data, data_len); r8712_secgetmic(&micdata, mic_code); } /* macros for extraction/creation of unsigned char/unsigned short values */ #define RotR1(v16) ((((v16) >> 1) & 0x7FFF) ^ (((v16) & 1) << 15)) #define Lo8(v16) ((u8)((v16) & 0x00FF)) #define Hi8(v16) ((u8)(((v16) >> 8) & 0x00FF)) #define Lo16(v32) ((u16)((v32) & 0xFFFF)) #define Hi16(v32) ((u16)(((v32) >> 16) & 0xFFFF)) #define Mk16(hi, lo) ((lo) ^ (((u16)(hi)) << 8)) /* select the Nth 16-bit word of the temporal key unsigned char array TK[] */ #define TK16(N) Mk16(tk[2 * (N) + 1], tk[2 * (N)]) /* S-box lookup: 16 bits --> 16 bits */ #define _S_(v16) (Sbox1[0][Lo8(v16)] ^ Sbox1[1][Hi8(v16)]) /* fixed algorithm "parameters" */ #define PHASE1_LOOP_CNT 8 /* this needs to be "big enough" */ #define TA_SIZE 6 /* 48-bit transmitter address */ #define TK_SIZE 16 /* 128-bit temporal key */ #define P1K_SIZE 10 /* 80-bit Phase1 key */ #define RC4_KEY_SIZE 16 /* 128-bit RC4KEY (104 bits unknown) */ /* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */ static const unsigned short Sbox1[2][256] = {/* Sbox for hash (can be in ROM) */ { 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, }, { /* second half is unsigned char-reversed version of first! */ 0xA5C6, 0x84F8, 0x99EE, 0x8DF6, 0x0DFF, 0xBDD6, 0xB1DE, 0x5491, 0x5060, 0x0302, 0xA9CE, 0x7D56, 0x19E7, 0x62B5, 0xE64D, 0x9AEC, 0x458F, 0x9D1F, 0x4089, 0x87FA, 0x15EF, 0xEBB2, 0xC98E, 0x0BFB, 0xEC41, 0x67B3, 0xFD5F, 0xEA45, 0xBF23, 0xF753, 0x96E4, 0x5B9B, 0xC275, 0x1CE1, 0xAE3D, 0x6A4C, 0x5A6C, 0x417E, 0x02F5, 0x4F83, 0x5C68, 0xF451, 0x34D1, 0x08F9, 0x93E2, 0x73AB, 0x5362, 0x3F2A, 0x0C08, 0x5295, 0x6546, 0x5E9D, 0x2830, 0xA137, 0x0F0A, 0xB52F, 0x090E, 0x3624, 0x9B1B, 0x3DDF, 0x26CD, 0x694E, 0xCD7F, 0x9FEA, 0x1B12, 0x9E1D, 0x7458, 0x2E34, 0x2D36, 0xB2DC, 0xEEB4, 0xFB5B, 0xF6A4, 0x4D76, 0x61B7, 0xCE7D, 0x7B52, 0x3EDD, 0x715E, 0x9713, 0xF5A6, 0x68B9, 0x0000, 0x2CC1, 0x6040, 0x1FE3, 0xC879, 0xEDB6, 0xBED4, 0x468D, 0xD967, 0x4B72, 0xDE94, 0xD498, 0xE8B0, 0x4A85, 0x6BBB, 0x2AC5, 0xE54F, 0x16ED, 0xC586, 0xD79A, 0x5566, 0x9411, 0xCF8A, 0x10E9, 0x0604, 0x81FE, 0xF0A0, 0x4478, 0xBA25, 0xE34B, 0xF3A2, 0xFE5D, 0xC080, 0x8A05, 0xAD3F, 0xBC21, 0x4870, 0x04F1, 0xDF63, 0xC177, 0x75AF, 0x6342, 0x3020, 0x1AE5, 0x0EFD, 0x6DBF, 0x4C81, 0x1418, 0x3526, 0x2FC3, 0xE1BE, 0xA235, 0xCC88, 0x392E, 0x5793, 0xF255, 0x82FC, 0x477A, 0xACC8, 0xE7BA, 0x2B32, 0x95E6, 0xA0C0, 0x9819, 0xD19E, 0x7FA3, 0x6644, 0x7E54, 0xAB3B, 0x830B, 0xCA8C, 0x29C7, 0xD36B, 0x3C28, 0x79A7, 0xE2BC, 0x1D16, 0x76AD, 0x3BDB, 0x5664, 0x4E74, 0x1E14, 0xDB92, 0x0A0C, 0x6C48, 0xE4B8, 0x5D9F, 0x6EBD, 0xEF43, 0xA6C4, 0xA839, 0xA431, 0x37D3, 0x8BF2, 0x32D5, 0x438B, 0x596E, 0xB7DA, 0x8C01, 0x64B1, 0xD29C, 0xE049, 0xB4D8, 0xFAAC, 0x07F3, 0x25CF, 0xAFCA, 0x8EF4, 0xE947, 0x1810, 0xD56F, 0x88F0, 0x6F4A, 0x725C, 0x2438, 0xF157, 0xC773, 0x5197, 0x23CB, 0x7CA1, 0x9CE8, 0x213E, 0xDD96, 0xDC61, 0x860D, 0x850F, 0x90E0, 0x427C, 0xC471, 0xAACC, 0xD890, 0x0506, 0x01F7, 0x121C, 0xA3C2, 0x5F6A, 0xF9AE, 0xD069, 0x9117, 0x5899, 0x273A, 0xB927, 0x38D9, 0x13EB, 0xB32B, 0x3322, 0xBBD2, 0x70A9, 0x8907, 0xA733, 0xB62D, 0x223C, 0x9215, 0x20C9, 0x4987, 0xFFAA, 0x7850, 0x7AA5, 0x8F03, 0xF859, 0x8009, 0x171A, 0xDA65, 0x31D7, 0xC684, 0xB8D0, 0xC382, 0xB029, 0x775A, 0x111E, 0xCB7B, 0xFCA8, 0xD66D, 0x3A2C, } }; /* ********************************************************************** * Routine: Phase 1 -- generate P1K, given TA, TK, IV32 * * Inputs: * tk[] = temporal key [128 bits] * ta[] = transmitter's MAC address [ 48 bits] * iv32 = upper 32 bits of IV [ 32 bits] * Output: * p1k[] = Phase 1 key [ 80 bits] * * Note: * This function only needs to be called every 2**16 packets, * although in theory it could be called every packet. * ********************************************************************** */ static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32) { sint i; /* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */ p1k[0] = Lo16(iv32); p1k[1] = Hi16(iv32); p1k[2] = Mk16(ta[1], ta[0]); /* use TA[] as little-endian */ p1k[3] = Mk16(ta[3], ta[2]); p1k[4] = Mk16(ta[5], ta[4]); /* Now compute an unbalanced Feistel cipher with 80-bit block */ /* size on the 80-bit block P1K[], using the 128-bit key TK[] */ for (i = 0; i < PHASE1_LOOP_CNT; i++) { /* Each add is mod 2**16 */ p1k[0] += _S_(p1k[4] ^ TK16((i&1) + 0)); p1k[1] += _S_(p1k[0] ^ TK16((i&1) + 2)); p1k[2] += _S_(p1k[1] ^ TK16((i&1) + 4)); p1k[3] += _S_(p1k[2] ^ TK16((i&1) + 6)); p1k[4] += _S_(p1k[3] ^ TK16((i&1) + 0)); p1k[4] += (unsigned short)i; /* avoid "slide attacks" */ } } /* ********************************************************************** * Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16 * * Inputs: * tk[] = Temporal key [128 bits] * p1k[] = Phase 1 output key [ 80 bits] * iv16 = low 16 bits of IV counter [ 16 bits] * Output: * rc4key[] = the key used to encrypt the packet [128 bits] * * Note: * The value {TA,IV32,IV16} for Phase1/Phase2 must be unique * across all packets using the same key TK value. Then, for a * given value of TK[], this TKIP48 construction guarantees that * the final RC4KEY value is unique across all packets. * * Suggested implementation optimization: if PPK[] is "overlaid" * appropriately on RC4KEY[], there is no need for the final * for loop below that copies the PPK[] result into RC4KEY[]. * ********************************************************************** */ static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16) { sint i; u16 PPK[6]; /* temporary key for mixing */ /* Note: all adds in the PPK[] equations below are mod 2**16 */ for (i = 0; i < 5; i++) PPK[i] = p1k[i]; /* first, copy P1K to PPK */ PPK[5] = p1k[4] + iv16; /* next, add in IV16 */ /* Bijective non-linear mixing of the 96 bits of PPK[0..5] */ PPK[0] += _S_(PPK[5] ^ TK16(0)); /* Mix key in each "round" */ PPK[1] += _S_(PPK[0] ^ TK16(1)); PPK[2] += _S_(PPK[1] ^ TK16(2)); PPK[3] += _S_(PPK[2] ^ TK16(3)); PPK[4] += _S_(PPK[3] ^ TK16(4)); PPK[5] += _S_(PPK[4] ^ TK16(5)); /* Total # S-box lookups == 6 */ /* Final sweep: bijective, "linear". Rotates kill LSB correlations */ PPK[0] += RotR1(PPK[5] ^ TK16(6)); PPK[1] += RotR1(PPK[0] ^ TK16(7)); /* Use all of TK[] in Phase2 */ PPK[2] += RotR1(PPK[1]); PPK[3] += RotR1(PPK[2]); PPK[4] += RotR1(PPK[3]); PPK[5] += RotR1(PPK[4]); /* Note: At this point, for a given key TK[0..15], the 96-bit output */ /* value PPK[0..5] is guaranteed to be unique, as a function */ /* of the 96-bit "input" value {TA,IV32,IV16}. That is, P1K */ /* is now a keyed permutation of {TA,IV32,IV16}. */ /* Set RC4KEY[0..3], which includes "cleartext" portion of RC4 key */ rc4key[0] = Hi8(iv16); /* RC4KEY[0..2] is the WEP IV */ rc4key[1] = (Hi8(iv16) | 0x20) & 0x7F; /* Help avoid weak (FMS) keys */ rc4key[2] = Lo8(iv16); rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1); /* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */ for (i = 0; i < 6; i++) { rc4key[4 + 2 * i] = Lo8(PPK[i]); rc4key[5 + 2 * i] = Hi8(PPK[i]); } } /*The hlen isn't include the IV*/ u32 r8712_tkip_encrypt(struct _adapter *padapter, u8 *pxmitframe) { /* exclude ICV */ u16 pnl; u32 pnh; u8 rc4key[16]; u8 ttkey[16]; u8 crc[4]; struct arc4context mycontext; u32 curfragnum, length, prwskeylen; u8 *pframe, *payload, *iv, *prwskey; union pn48 txpn; struct sta_info *stainfo; struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; u32 res = _SUCCESS; if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL) return _FAIL; pframe = ((struct xmit_frame *)pxmitframe)->buf_addr+TXDESC_OFFSET; /* 4 start to encrypt each fragment */ if (pattrib->encrypt == _TKIP_) { if (pattrib->psta) stainfo = pattrib->psta; else stainfo = r8712_get_stainfo(&padapter->stapriv, &pattrib->ra[0]); if (stainfo != NULL) { prwskey = &stainfo->x_UncstKey.skey[0]; prwskeylen = 16; for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) { iv = pframe + pattrib->hdrlen; payload = pframe+pattrib->iv_len + pattrib->hdrlen; GET_TKIP_PN(iv, txpn); pnl = (u16)(txpn.val); pnh = (u32)(txpn.val >> 16); phase1((u16 *)&ttkey[0], prwskey, &pattrib-> ta[0], pnh); phase2(&rc4key[0], prwskey, (u16 *)&ttkey[0], pnl); if ((curfragnum + 1) == pattrib->nr_frags) { /* 4 the last fragment */ length = pattrib->last_txcmdsz - pattrib->hdrlen-pattrib->iv_len - pattrib->icv_len; *((u32 *)crc) = cpu_to_le32( getcrc32(payload, length)); arcfour_init(&mycontext, rc4key, 16); arcfour_encrypt(&mycontext, payload, payload, length); arcfour_encrypt(&mycontext, payload + length, crc, 4); } else { length = pxmitpriv->frag_len-pattrib-> hdrlen-pattrib-> iv_len-pattrib->icv_len; *((u32 *)crc) = cpu_to_le32(getcrc32( payload, length)); arcfour_init(&mycontext, rc4key, 16); arcfour_encrypt(&mycontext, payload, payload, length); arcfour_encrypt(&mycontext, payload+length, crc, 4); pframe += pxmitpriv->frag_len; pframe = (u8 *)RND4((addr_t)(pframe)); } } } else res = _FAIL; } return res; } /* The hlen doesn't include the IV */ u32 r8712_tkip_decrypt(struct _adapter *padapter, u8 *precvframe) { /* exclude ICV */ u16 pnl; u32 pnh; u8 rc4key[16]; u8 ttkey[16]; u8 crc[4]; struct arc4context mycontext; u32 length, prwskeylen; u8 *pframe, *payload, *iv, *prwskey, idx = 0; union pn48 txpn; struct sta_info *stainfo; struct rx_pkt_attrib *prxattrib = &((union recv_frame *) precvframe)->u.hdr.attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; pframe = (unsigned char *)((union recv_frame *) precvframe)->u.hdr.rx_data; /* 4 start to decrypt recvframe */ if (prxattrib->encrypt == _TKIP_) { stainfo = r8712_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]); if (stainfo != NULL) { iv = pframe+prxattrib->hdrlen; payload = pframe+prxattrib->iv_len + prxattrib->hdrlen; length = ((union recv_frame *)precvframe)-> u.hdr.len - prxattrib->hdrlen - prxattrib->iv_len; if (IS_MCAST(prxattrib->ra)) { idx = iv[3]; prwskey = &psecuritypriv->XGrpKey[ ((idx >> 6) & 0x3) - 1].skey[0]; if (psecuritypriv->binstallGrpkey == false) return _FAIL; } else prwskey = &stainfo->x_UncstKey.skey[0]; prwskeylen = 16; GET_TKIP_PN(iv, txpn); pnl = (u16)(txpn.val); pnh = (u32)(txpn.val >> 16); phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh); phase2(&rc4key[0], prwskey, (unsigned short *) &ttkey[0], pnl); /* 4 decrypt payload include icv */ arcfour_init(&mycontext, rc4key, 16); arcfour_encrypt(&mycontext, payload, payload, length); *((u32 *)crc) = cpu_to_le32(getcrc32(payload, length - 4)); if (crc[3] != payload[length - 1] || crc[2] != payload[length - 2] || crc[1] != payload[length - 3] || crc[0] != payload[length - 4]) return _FAIL; } else return _FAIL; } return _SUCCESS; } /* 3 =====AES related===== */ #define MAX_MSG_SIZE 2048 /*****************************/ /******** SBOX Table *********/ /*****************************/ static const u8 sbox_table[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; /****************************************/ /* aes128k128d() */ /* Performs a 128 bit AES encrypt with */ /* 128 bit data. */ /****************************************/ static void xor_128(u8 *a, u8 *b, u8 *out) { sint i; for (i = 0; i < 16; i++) out[i] = a[i] ^ b[i]; } static void xor_32(u8 *a, u8 *b, u8 *out) { sint i; for (i = 0; i < 4; i++) out[i] = a[i] ^ b[i]; } static u8 sbox(u8 a) { return sbox_table[(sint)a]; } static void next_key(u8 *key, sint round) { u8 rcon; u8 sbox_key[4]; u8 rcon_table[12] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x36, 0x36 }; sbox_key[0] = sbox(key[13]); sbox_key[1] = sbox(key[14]); sbox_key[2] = sbox(key[15]); sbox_key[3] = sbox(key[12]); rcon = rcon_table[round]; xor_32(&key[0], sbox_key, &key[0]); key[0] = key[0] ^ rcon; xor_32(&key[4], &key[0], &key[4]); xor_32(&key[8], &key[4], &key[8]); xor_32(&key[12], &key[8], &key[12]); } static void byte_sub(u8 *in, u8 *out) { sint i; for (i = 0; i < 16; i++) out[i] = sbox(in[i]); } static void shift_row(u8 *in, u8 *out) { out[0] = in[0]; out[1] = in[5]; out[2] = in[10]; out[3] = in[15]; out[4] = in[4]; out[5] = in[9]; out[6] = in[14]; out[7] = in[3]; out[8] = in[8]; out[9] = in[13]; out[10] = in[2]; out[11] = in[7]; out[12] = in[12]; out[13] = in[1]; out[14] = in[6]; out[15] = in[11]; } static void mix_column(u8 *in, u8 *out) { sint i; u8 add1b[4]; u8 add1bf7[4]; u8 rotl[4]; u8 swap_halfs[4]; u8 andf7[4]; u8 rotr[4]; u8 temp[4]; u8 tempb[4]; for (i = 0; i < 4; i++) { if ((in[i] & 0x80) == 0x80) add1b[i] = 0x1b; else add1b[i] = 0x00; } swap_halfs[0] = in[2]; /* Swap halves */ swap_halfs[1] = in[3]; swap_halfs[2] = in[0]; swap_halfs[3] = in[1]; rotl[0] = in[3]; /* Rotate left 8 bits */ rotl[1] = in[0]; rotl[2] = in[1]; rotl[3] = in[2]; andf7[0] = in[0] & 0x7f; andf7[1] = in[1] & 0x7f; andf7[2] = in[2] & 0x7f; andf7[3] = in[3] & 0x7f; for (i = 3; i > 0; i--) { /* logical shift left 1 bit */ andf7[i] = andf7[i] << 1; if ((andf7[i-1] & 0x80) == 0x80) andf7[i] = (andf7[i] | 0x01); } andf7[0] = andf7[0] << 1; andf7[0] = andf7[0] & 0xfe; xor_32(add1b, andf7, add1bf7); xor_32(in, add1bf7, rotr); temp[0] = rotr[0]; /* Rotate right 8 bits */ rotr[0] = rotr[1]; rotr[1] = rotr[2]; rotr[2] = rotr[3]; rotr[3] = temp[0]; xor_32(add1bf7, rotr, temp); xor_32(swap_halfs, rotl, tempb); xor_32(temp, tempb, out); } static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext) { sint round; sint i; u8 intermediatea[16]; u8 intermediateb[16]; u8 round_key[16]; for (i = 0; i < 16; i++) round_key[i] = key[i]; for (round = 0; round < 11; round++) { if (round == 0) { xor_128(round_key, data, ciphertext); next_key(round_key, round); } else if (round == 10) { byte_sub(ciphertext, intermediatea); shift_row(intermediatea, intermediateb); xor_128(intermediateb, round_key, ciphertext); } else { /* 1 - 9 */ byte_sub(ciphertext, intermediatea); shift_row(intermediatea, intermediateb); mix_column(&intermediateb[0], &intermediatea[0]); mix_column(&intermediateb[4], &intermediatea[4]); mix_column(&intermediateb[8], &intermediatea[8]); mix_column(&intermediateb[12], &intermediatea[12]); xor_128(intermediatea, round_key, ciphertext); next_key(round_key, round); } } } /************************************************/ /* construct_mic_iv() */ /* Builds the MIC IV from header fields and PN */ /************************************************/ static void construct_mic_iv(u8 *mic_iv, sint qc_exists, sint a4_exists, u8 *mpdu, uint payload_length, u8 *pn_vector) { sint i; mic_iv[0] = 0x59; if (qc_exists && a4_exists) mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */ if (qc_exists && !a4_exists) mic_iv[1] = mpdu[24] & 0x0f; /* mute bits 7-4 */ if (!qc_exists) mic_iv[1] = 0x00; for (i = 2; i < 8; i++) mic_iv[i] = mpdu[i + 8]; for (i = 8; i < 14; i++) mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */ mic_iv[14] = (unsigned char) (payload_length / 256); mic_iv[15] = (unsigned char) (payload_length % 256); } /************************************************/ /* construct_mic_header1() */ /* Builds the first MIC header block from */ /* header fields. */ /************************************************/ static void construct_mic_header1(u8 *mic_header1, sint header_length, u8 *mpdu) { mic_header1[0] = (u8)((header_length - 2) / 256); mic_header1[1] = (u8)((header_length - 2) % 256); mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */ /* Mute retry, more data and pwr mgt bits */ mic_header1[3] = mpdu[1] & 0xc7; mic_header1[4] = mpdu[4]; /* A1 */ mic_header1[5] = mpdu[5]; mic_header1[6] = mpdu[6]; mic_header1[7] = mpdu[7]; mic_header1[8] = mpdu[8]; mic_header1[9] = mpdu[9]; mic_header1[10] = mpdu[10]; /* A2 */ mic_header1[11] = mpdu[11]; mic_header1[12] = mpdu[12]; mic_header1[13] = mpdu[13]; mic_header1[14] = mpdu[14]; mic_header1[15] = mpdu[15]; } /************************************************/ /* construct_mic_header2() */ /* Builds the last MIC header block from */ /* header fields. */ /************************************************/ static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, sint a4_exists, sint qc_exists) { sint i; for (i = 0; i < 16; i++) mic_header2[i] = 0x00; mic_header2[0] = mpdu[16]; /* A3 */ mic_header2[1] = mpdu[17]; mic_header2[2] = mpdu[18]; mic_header2[3] = mpdu[19]; mic_header2[4] = mpdu[20]; mic_header2[5] = mpdu[21]; mic_header2[6] = 0x00; mic_header2[7] = 0x00; /* mpdu[23]; */ if (!qc_exists && a4_exists) for (i = 0; i < 6; i++) mic_header2[8 + i] = mpdu[24 + i]; /* A4 */ if (qc_exists && !a4_exists) { mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */ mic_header2[9] = mpdu[25] & 0x00; } if (qc_exists && a4_exists) { for (i = 0; i < 6; i++) mic_header2[8 + i] = mpdu[24 + i]; /* A4 */ mic_header2[14] = mpdu[30] & 0x0f; mic_header2[15] = mpdu[31] & 0x00; } } /************************************************/ /* construct_mic_header2() */ /* Builds the last MIC header block from */ /* header fields. */ /************************************************/ static void construct_ctr_preload(u8 *ctr_preload, sint a4_exists, sint qc_exists, u8 *mpdu, u8 *pn_vector, sint c) { sint i; for (i = 0; i < 16; i++) ctr_preload[i] = 0x00; i = 0; ctr_preload[0] = 0x01; /* flag */ if (qc_exists && a4_exists) ctr_preload[1] = mpdu[30] & 0x0f; if (qc_exists && !a4_exists) ctr_preload[1] = mpdu[24] & 0x0f; for (i = 2; i < 8; i++) ctr_preload[i] = mpdu[i + 8]; for (i = 8; i < 14; i++) ctr_preload[i] = pn_vector[13 - i]; ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */ ctr_preload[15] = (unsigned char) (c % 256); } /************************************/ /* bitwise_xor() */ /* A 128 bit, bitwise exclusive or */ /************************************/ static void bitwise_xor(u8 *ina, u8 *inb, u8 *out) { sint i; for (i = 0; i < 16; i++) out[i] = ina[i] ^ inb[i]; } static sint aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen) { uint qc_exists, a4_exists, i, j, payload_remainder; uint num_blocks, payload_index; u8 pn_vector[6]; u8 mic_iv[16]; u8 mic_header1[16]; u8 mic_header2[16]; u8 ctr_preload[16]; /* Intermediate Buffers */ u8 chain_buffer[16]; u8 aes_out[16]; u8 padded_buffer[16]; u8 mic[8]; uint frtype = GetFrameType(pframe); uint frsubtype = GetFrameSubType(pframe); frsubtype = frsubtype >> 4; memset((void *)mic_iv, 0, 16); memset((void *)mic_header1, 0, 16); memset((void *)mic_header2, 0, 16); memset((void *)ctr_preload, 0, 16); memset((void *)chain_buffer, 0, 16); memset((void *)aes_out, 0, 16); memset((void *)padded_buffer, 0, 16); if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN)) a4_exists = 0; else a4_exists = 1; if ((frtype == WIFI_DATA_CFACK) || (frtype == WIFI_DATA_CFPOLL) || (frtype == WIFI_DATA_CFACKPOLL)) { qc_exists = 1; if (hdrlen != WLAN_HDR_A3_QOS_LEN) hdrlen += 2; } else if ((frsubtype == 0x08) || (frsubtype == 0x09) || (frsubtype == 0x0a) || (frsubtype == 0x0b)) { if (hdrlen != WLAN_HDR_A3_QOS_LEN) hdrlen += 2; qc_exists = 1; } else qc_exists = 0; pn_vector[0] = pframe[hdrlen]; pn_vector[1] = pframe[hdrlen+1]; pn_vector[2] = pframe[hdrlen+4]; pn_vector[3] = pframe[hdrlen+5]; pn_vector[4] = pframe[hdrlen+6]; pn_vector[5] = pframe[hdrlen+7]; construct_mic_iv(mic_iv, qc_exists, a4_exists, pframe, plen, pn_vector); construct_mic_header1(mic_header1, hdrlen, pframe); construct_mic_header2(mic_header2, pframe, a4_exists, qc_exists); payload_remainder = plen % 16; num_blocks = plen / 16; /* Find start of payload */ payload_index = (hdrlen + 8); /* Calculate MIC */ aes128k128d(key, mic_iv, aes_out); bitwise_xor(aes_out, mic_header1, chain_buffer); aes128k128d(key, chain_buffer, aes_out); bitwise_xor(aes_out, mic_header2, chain_buffer); aes128k128d(key, chain_buffer, aes_out); for (i = 0; i < num_blocks; i++) { bitwise_xor(aes_out, &pframe[payload_index], chain_buffer); payload_index += 16; aes128k128d(key, chain_buffer, aes_out); } /* Add on the final payload block if it needs padding */ if (payload_remainder > 0) { for (j = 0; j < 16; j++) padded_buffer[j] = 0x00; for (j = 0; j < payload_remainder; j++) padded_buffer[j] = pframe[payload_index++]; bitwise_xor(aes_out, padded_buffer, chain_buffer); aes128k128d(key, chain_buffer, aes_out); } for (j = 0; j < 8; j++) mic[j] = aes_out[j]; /* Insert MIC into payload */ for (j = 0; j < 8; j++) pframe[payload_index+j] = mic[j]; payload_index = hdrlen + 8; for (i = 0; i < num_blocks; i++) { construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, i + 1); aes128k128d(key, ctr_preload, aes_out); bitwise_xor(aes_out, &pframe[payload_index], chain_buffer); for (j = 0; j < 16; j++) pframe[payload_index++] = chain_buffer[j]; } if (payload_remainder > 0) { /* If short final block, then pad it,*/ /* encrypt and copy unpadded part back */ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, num_blocks+1); for (j = 0; j < 16; j++) padded_buffer[j] = 0x00; for (j = 0; j < payload_remainder; j++) padded_buffer[j] = pframe[payload_index+j]; aes128k128d(key, ctr_preload, aes_out); bitwise_xor(aes_out, padded_buffer, chain_buffer); for (j = 0; j < payload_remainder; j++) pframe[payload_index++] = chain_buffer[j]; } /* Encrypt the MIC */ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, 0); for (j = 0; j < 16; j++) padded_buffer[j] = 0x00; for (j = 0; j < 8; j++) padded_buffer[j] = pframe[j+hdrlen+8+plen]; aes128k128d(key, ctr_preload, aes_out); bitwise_xor(aes_out, padded_buffer, chain_buffer); for (j = 0; j < 8; j++) pframe[payload_index++] = chain_buffer[j]; return _SUCCESS; } u32 r8712_aes_encrypt(struct _adapter *padapter, u8 *pxmitframe) { /* exclude ICV */ /* Intermediate Buffers */ sint curfragnum, length; u32 prwskeylen; u8 *pframe, *prwskey; struct sta_info *stainfo; struct pkt_attrib *pattrib = &((struct xmit_frame *) pxmitframe)->attrib; struct xmit_priv *pxmitpriv = &padapter->xmitpriv; u32 res = _SUCCESS; if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL) return _FAIL; pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + TXDESC_OFFSET; /* 4 start to encrypt each fragment */ if ((pattrib->encrypt == _AES_)) { if (pattrib->psta) stainfo = pattrib->psta; else stainfo = r8712_get_stainfo(&padapter->stapriv, &pattrib->ra[0]); if (stainfo != NULL) { prwskey = &stainfo->x_UncstKey.skey[0]; prwskeylen = 16; for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) { if ((curfragnum + 1) == pattrib->nr_frags) {\ length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len; aes_cipher(prwskey, pattrib-> hdrlen, pframe, length); } else { length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len; aes_cipher(prwskey, pattrib-> hdrlen, pframe, length); pframe += pxmitpriv->frag_len; pframe = (u8 *)RND4((addr_t)(pframe)); } } } else res = _FAIL; } return res; } static sint aes_decipher(u8 *key, uint hdrlen, u8 *pframe, uint plen) { static u8 message[MAX_MSG_SIZE]; uint qc_exists, a4_exists, i, j, payload_remainder; uint num_blocks, payload_index; u8 pn_vector[6]; u8 mic_iv[16]; u8 mic_header1[16]; u8 mic_header2[16]; u8 ctr_preload[16]; /* Intermediate Buffers */ u8 chain_buffer[16]; u8 aes_out[16]; u8 padded_buffer[16]; u8 mic[8]; uint frtype = GetFrameType(pframe); uint frsubtype = GetFrameSubType(pframe); frsubtype = frsubtype >> 4; memset((void *)mic_iv, 0, 16); memset((void *)mic_header1, 0, 16); memset((void *)mic_header2, 0, 16); memset((void *)ctr_preload, 0, 16); memset((void *)chain_buffer, 0, 16); memset((void *)aes_out, 0, 16); memset((void *)padded_buffer, 0, 16); /* start to decrypt the payload */ /*(plen including llc, payload and mic) */ num_blocks = (plen - 8) / 16; payload_remainder = (plen-8) % 16; pn_vector[0] = pframe[hdrlen]; pn_vector[1] = pframe[hdrlen+1]; pn_vector[2] = pframe[hdrlen+4]; pn_vector[3] = pframe[hdrlen+5]; pn_vector[4] = pframe[hdrlen+6]; pn_vector[5] = pframe[hdrlen+7]; if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN)) a4_exists = 0; else a4_exists = 1; if ((frtype == WIFI_DATA_CFACK) || (frtype == WIFI_DATA_CFPOLL) || (frtype == WIFI_DATA_CFACKPOLL)) { qc_exists = 1; if (hdrlen != WLAN_HDR_A3_QOS_LEN) hdrlen += 2; } else if ((frsubtype == 0x08) || (frsubtype == 0x09) || (frsubtype == 0x0a) || (frsubtype == 0x0b)) { if (hdrlen != WLAN_HDR_A3_QOS_LEN) hdrlen += 2; qc_exists = 1; } else { qc_exists = 0; } /* now, decrypt pframe with hdrlen offset and plen long */ payload_index = hdrlen + 8; /* 8 is for extiv */ for (i = 0; i < num_blocks; i++) { construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, i + 1); aes128k128d(key, ctr_preload, aes_out); bitwise_xor(aes_out, &pframe[payload_index], chain_buffer); for (j = 0; j < 16; j++) pframe[payload_index++] = chain_buffer[j]; } if (payload_remainder > 0) { /* If short final block, pad it,*/ /* encrypt it and copy the unpadded part back */ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, num_blocks+1); for (j = 0; j < 16; j++) padded_buffer[j] = 0x00; for (j = 0; j < payload_remainder; j++) padded_buffer[j] = pframe[payload_index + j]; aes128k128d(key, ctr_preload, aes_out); bitwise_xor(aes_out, padded_buffer, chain_buffer); for (j = 0; j < payload_remainder; j++) pframe[payload_index++] = chain_buffer[j]; } /* start to calculate the mic */ memcpy((void *)message, pframe, (hdrlen + plen + 8)); pn_vector[0] = pframe[hdrlen]; pn_vector[1] = pframe[hdrlen+1]; pn_vector[2] = pframe[hdrlen+4]; pn_vector[3] = pframe[hdrlen+5]; pn_vector[4] = pframe[hdrlen+6]; pn_vector[5] = pframe[hdrlen+7]; construct_mic_iv(mic_iv, qc_exists, a4_exists, message, plen-8, pn_vector); construct_mic_header1(mic_header1, hdrlen, message); construct_mic_header2(mic_header2, message, a4_exists, qc_exists); payload_remainder = (plen - 8) % 16; num_blocks = (plen - 8) / 16; /* Find start of payload */ payload_index = (hdrlen + 8); /* Calculate MIC */ aes128k128d(key, mic_iv, aes_out); bitwise_xor(aes_out, mic_header1, chain_buffer); aes128k128d(key, chain_buffer, aes_out); bitwise_xor(aes_out, mic_header2, chain_buffer); aes128k128d(key, chain_buffer, aes_out); for (i = 0; i < num_blocks; i++) { bitwise_xor(aes_out, &message[payload_index], chain_buffer); payload_index += 16; aes128k128d(key, chain_buffer, aes_out); } /* Add on the final payload block if it needs padding */ if (payload_remainder > 0) { for (j = 0; j < 16; j++) padded_buffer[j] = 0x00; for (j = 0; j < payload_remainder; j++) padded_buffer[j] = message[payload_index++]; bitwise_xor(aes_out, padded_buffer, chain_buffer); aes128k128d(key, chain_buffer, aes_out); } for (j = 0; j < 8; j++) mic[j] = aes_out[j]; /* Insert MIC into payload */ for (j = 0; j < 8; j++) message[payload_index+j] = mic[j]; payload_index = hdrlen + 8; for (i = 0; i < num_blocks; i++) { construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, i + 1); aes128k128d(key, ctr_preload, aes_out); bitwise_xor(aes_out, &message[payload_index], chain_buffer); for (j = 0; j < 16; j++) message[payload_index++] = chain_buffer[j]; } if (payload_remainder > 0) { /* If short final block, pad it,*/ /* encrypt and copy unpadded part back */ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, num_blocks+1); for (j = 0; j < 16; j++) padded_buffer[j] = 0x00; for (j = 0; j < payload_remainder; j++) padded_buffer[j] = message[payload_index + j]; aes128k128d(key, ctr_preload, aes_out); bitwise_xor(aes_out, padded_buffer, chain_buffer); for (j = 0; j < payload_remainder; j++) message[payload_index++] = chain_buffer[j]; } /* Encrypt the MIC */ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, 0); for (j = 0; j < 16; j++) padded_buffer[j] = 0x00; for (j = 0; j < 8; j++) padded_buffer[j] = message[j + hdrlen + plen]; aes128k128d(key, ctr_preload, aes_out); bitwise_xor(aes_out, padded_buffer, chain_buffer); for (j = 0; j < 8; j++) message[payload_index++] = chain_buffer[j]; /* compare the mic */ return _SUCCESS; } u32 r8712_aes_decrypt(struct _adapter *padapter, u8 *precvframe) { /* exclude ICV */ /* Intermediate Buffers */ sint length; u32 prwskeylen; u8 *pframe, *prwskey, *iv, idx; struct sta_info *stainfo; struct rx_pkt_attrib *prxattrib = &((union recv_frame *) precvframe)->u.hdr.attrib; struct security_priv *psecuritypriv = &padapter->securitypriv; pframe = (unsigned char *)((union recv_frame *)precvframe)-> u.hdr.rx_data; /* 4 start to encrypt each fragment */ if ((prxattrib->encrypt == _AES_)) { stainfo = r8712_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]); if (stainfo != NULL) { if (IS_MCAST(prxattrib->ra)) { iv = pframe+prxattrib->hdrlen; idx = iv[3]; prwskey = &psecuritypriv->XGrpKey[ ((idx >> 6) & 0x3) - 1].skey[0]; if (psecuritypriv->binstallGrpkey == false) return _FAIL; } else prwskey = &stainfo->x_UncstKey.skey[0]; prwskeylen = 16; length = ((union recv_frame *)precvframe)-> u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len; aes_decipher(prwskey, prxattrib->hdrlen, pframe, length); } else return _FAIL; } return _SUCCESS; } void r8712_use_tkipkey_handler(void *FunctionContext) { struct _adapter *padapter = (struct _adapter *)FunctionContext; padapter->securitypriv.busetkipkey = true; }
gpl-2.0
lostemp/linux-2.6.30.4_analysis
arch/arm/mach-ixp4xx/common.c
49
11770
/* * arch/arm/mach-ixp4xx/common.c * * Generic code shared across all IXP4XX platforms * * Maintainer: Deepak Saxena <dsaxena@plexity.net> * * Copyright 2002 (c) Intel Corporation * Copyright 2003-2004 (c) MontaVista, Software, Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/bootmem.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/io.h> #include <mach/udc.h> #include <mach/hardware.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/page.h> #include <asm/irq.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> static int __init ixp4xx_clocksource_init(void); static int __init ixp4xx_clockevent_init(void); static struct clock_event_device clockevent_ixp4xx; /************************************************************************* * IXP4xx chipset I/O mapping *************************************************************************/ static struct map_desc ixp4xx_io_desc[] __initdata = { { /* UART, Interrupt ctrl, GPIO, timers, NPEs, MACs, USB .... */ .virtual = IXP4XX_PERIPHERAL_BASE_VIRT, .pfn = __phys_to_pfn(IXP4XX_PERIPHERAL_BASE_PHYS), .length = IXP4XX_PERIPHERAL_REGION_SIZE, .type = MT_DEVICE }, { /* Expansion Bus Config Registers */ .virtual = IXP4XX_EXP_CFG_BASE_VIRT, .pfn = __phys_to_pfn(IXP4XX_EXP_CFG_BASE_PHYS), .length = IXP4XX_EXP_CFG_REGION_SIZE, .type = MT_DEVICE }, { /* PCI Registers */ .virtual = IXP4XX_PCI_CFG_BASE_VIRT, .pfn = __phys_to_pfn(IXP4XX_PCI_CFG_BASE_PHYS), .length = IXP4XX_PCI_CFG_REGION_SIZE, .type = MT_DEVICE }, #ifdef CONFIG_DEBUG_LL { /* Debug UART mapping */ .virtual = IXP4XX_DEBUG_UART_BASE_VIRT, .pfn = __phys_to_pfn(IXP4XX_DEBUG_UART_BASE_PHYS), .length = IXP4XX_DEBUG_UART_REGION_SIZE, .type = MT_DEVICE } #endif }; void __init ixp4xx_map_io(void) { iotable_init(ixp4xx_io_desc, ARRAY_SIZE(ixp4xx_io_desc)); } /************************************************************************* * IXP4xx chipset IRQ handling * * TODO: GPIO IRQs should be marked invalid until the user of the IRQ * (be it PCI or something else) configures that GPIO line * as an IRQ. **************************************************************************/ enum ixp4xx_irq_type { IXP4XX_IRQ_LEVEL, IXP4XX_IRQ_EDGE }; /* Each bit represents an IRQ: 1: edge-triggered, 0: level triggered */ static unsigned long long ixp4xx_irq_edge = 0; /* * IRQ -> GPIO mapping table */ static signed char irq2gpio[32] = { -1, -1, -1, -1, -1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, }; int gpio_to_irq(int gpio) { int irq; for (irq = 0; irq < 32; irq++) { if (irq2gpio[irq] == gpio) return irq; } return -EINVAL; } EXPORT_SYMBOL(gpio_to_irq); int irq_to_gpio(int irq) { int gpio = (irq < 32) ? irq2gpio[irq] : -EINVAL; if (gpio == -1) return -EINVAL; return gpio; } EXPORT_SYMBOL(irq_to_gpio); static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type) { int line = irq2gpio[irq]; u32 int_style; enum ixp4xx_irq_type irq_type; volatile u32 *int_reg; /* * Only for GPIO IRQs */ if (line < 0) return -EINVAL; switch (type){ case IRQ_TYPE_EDGE_BOTH: int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL; irq_type = IXP4XX_IRQ_EDGE; break; case IRQ_TYPE_EDGE_RISING: int_style = IXP4XX_GPIO_STYLE_RISING_EDGE; irq_type = IXP4XX_IRQ_EDGE; break; case IRQ_TYPE_EDGE_FALLING: int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE; irq_type = IXP4XX_IRQ_EDGE; break; case IRQ_TYPE_LEVEL_HIGH: int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH; irq_type = IXP4XX_IRQ_LEVEL; break; case IRQ_TYPE_LEVEL_LOW: int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW; irq_type = IXP4XX_IRQ_LEVEL; break; default: return -EINVAL; } if (irq_type == IXP4XX_IRQ_EDGE) ixp4xx_irq_edge |= (1 << irq); else ixp4xx_irq_edge &= ~(1 << irq); if (line >= 8) { /* pins 8-15 */ line -= 8; int_reg = IXP4XX_GPIO_GPIT2R; } else { /* pins 0-7 */ int_reg = IXP4XX_GPIO_GPIT1R; } /* Clear the style for the appropriate pin */ *int_reg &= ~(IXP4XX_GPIO_STYLE_CLEAR << (line * IXP4XX_GPIO_STYLE_SIZE)); *IXP4XX_GPIO_GPISR = (1 << line); /* Set the new style */ *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE)); /* Configure the line as an input */ gpio_line_config(irq2gpio[irq], IXP4XX_GPIO_IN); return 0; } static void ixp4xx_irq_mask(unsigned int irq) { if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && irq >= 32) *IXP4XX_ICMR2 &= ~(1 << (irq - 32)); else *IXP4XX_ICMR &= ~(1 << irq); } static void ixp4xx_irq_ack(unsigned int irq) { int line = (irq < 32) ? irq2gpio[irq] : -1; if (line >= 0) *IXP4XX_GPIO_GPISR = (1 << line); } /* * Level triggered interrupts on GPIO lines can only be cleared when the * interrupt condition disappears. */ static void ixp4xx_irq_unmask(unsigned int irq) { if (!(ixp4xx_irq_edge & (1 << irq))) ixp4xx_irq_ack(irq); if ((cpu_is_ixp46x() || cpu_is_ixp43x()) && irq >= 32) *IXP4XX_ICMR2 |= (1 << (irq - 32)); else *IXP4XX_ICMR |= (1 << irq); } static struct irq_chip ixp4xx_irq_chip = { .name = "IXP4xx", .ack = ixp4xx_irq_ack, .mask = ixp4xx_irq_mask, .unmask = ixp4xx_irq_unmask, .set_type = ixp4xx_set_irq_type, }; void __init ixp4xx_init_irq(void) { int i = 0; /* Route all sources to IRQ instead of FIQ */ *IXP4XX_ICLR = 0x0; /* Disable all interrupt */ *IXP4XX_ICMR = 0x0; if (cpu_is_ixp46x() || cpu_is_ixp43x()) { /* Route upper 32 sources to IRQ instead of FIQ */ *IXP4XX_ICLR2 = 0x00; /* Disable upper 32 interrupts */ *IXP4XX_ICMR2 = 0x00; } /* Default to all level triggered */ for(i = 0; i < NR_IRQS; i++) { set_irq_chip(i, &ixp4xx_irq_chip); set_irq_handler(i, handle_level_irq); set_irq_flags(i, IRQF_VALID); } } /************************************************************************* * IXP4xx timer tick * We use OS timer1 on the CPU for the timer tick and the timestamp * counter as a source of real clock ticks to account for missed jiffies. *************************************************************************/ static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_ixp4xx; /* Clear Pending Interrupt by writing '1' to it */ *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction ixp4xx_timer_irq = { .name = "timer1", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = ixp4xx_timer_interrupt, }; void __init ixp4xx_timer_init(void) { /* Reset/disable counter */ *IXP4XX_OSRT1 = 0; /* Clear Pending Interrupt by writing '1' to it */ *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; /* Reset time-stamp counter */ *IXP4XX_OSTS = 0; /* Connect the interrupt handler and enable the interrupt */ setup_irq(IRQ_IXP4XX_TIMER1, &ixp4xx_timer_irq); ixp4xx_clocksource_init(); ixp4xx_clockevent_init(); } struct sys_timer ixp4xx_timer = { .init = ixp4xx_timer_init, }; static struct pxa2xx_udc_mach_info ixp4xx_udc_info; void __init ixp4xx_set_udc_info(struct pxa2xx_udc_mach_info *info) { memcpy(&ixp4xx_udc_info, info, sizeof *info); } static struct resource ixp4xx_udc_resources[] = { [0] = { .start = 0xc800b000, .end = 0xc800bfff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IXP4XX_USB, .end = IRQ_IXP4XX_USB, .flags = IORESOURCE_IRQ, }, }; /* * USB device controller. The IXP4xx uses the same controller as PXA25X, * so we just use the same device. */ static struct platform_device ixp4xx_udc_device = { .name = "pxa25x-udc", .id = -1, .num_resources = 2, .resource = ixp4xx_udc_resources, .dev = { .platform_data = &ixp4xx_udc_info, }, }; static struct platform_device *ixp4xx_devices[] __initdata = { &ixp4xx_udc_device, }; static struct resource ixp46x_i2c_resources[] = { [0] = { .start = 0xc8011000, .end = 0xc801101c, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_IXP4XX_I2C, .end = IRQ_IXP4XX_I2C, .flags = IORESOURCE_IRQ } }; /* * I2C controller. The IXP46x uses the same block as the IOP3xx, so * we just use the same device name. */ static struct platform_device ixp46x_i2c_controller = { .name = "IOP3xx-I2C", .id = 0, .num_resources = 2, .resource = ixp46x_i2c_resources }; static struct platform_device *ixp46x_devices[] __initdata = { &ixp46x_i2c_controller }; unsigned long ixp4xx_exp_bus_size; EXPORT_SYMBOL(ixp4xx_exp_bus_size); void __init ixp4xx_sys_init(void) { ixp4xx_exp_bus_size = SZ_16M; platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices)); if (cpu_is_ixp46x()) { int region; platform_add_devices(ixp46x_devices, ARRAY_SIZE(ixp46x_devices)); for (region = 0; region < 7; region++) { if((*(IXP4XX_EXP_REG(0x4 * region)) & 0x200)) { ixp4xx_exp_bus_size = SZ_32M; break; } } } printk("IXP4xx: Using %luMiB expansion bus window size\n", ixp4xx_exp_bus_size >> 20); } /* * clocksource */ cycle_t ixp4xx_get_cycles(struct clocksource *cs) { return *IXP4XX_OSTS; } static struct clocksource clocksource_ixp4xx = { .name = "OSTS", .rating = 200, .read = ixp4xx_get_cycles, .mask = CLOCKSOURCE_MASK(32), .shift = 20, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; unsigned long ixp4xx_timer_freq = FREQ; static int __init ixp4xx_clocksource_init(void) { clocksource_ixp4xx.mult = clocksource_hz2mult(ixp4xx_timer_freq, clocksource_ixp4xx.shift); clocksource_register(&clocksource_ixp4xx); return 0; } /* * clockevents */ static int ixp4xx_set_next_event(unsigned long evt, struct clock_event_device *unused) { unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK; *IXP4XX_OSRT1 = (evt & ~IXP4XX_OST_RELOAD_MASK) | opts; return 0; } static void ixp4xx_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { unsigned long opts = *IXP4XX_OSRT1 & IXP4XX_OST_RELOAD_MASK; unsigned long osrt = *IXP4XX_OSRT1 & ~IXP4XX_OST_RELOAD_MASK; switch (mode) { case CLOCK_EVT_MODE_PERIODIC: osrt = LATCH & ~IXP4XX_OST_RELOAD_MASK; opts = IXP4XX_OST_ENABLE; break; case CLOCK_EVT_MODE_ONESHOT: /* period set by 'set next_event' */ osrt = 0; opts = IXP4XX_OST_ENABLE | IXP4XX_OST_ONE_SHOT; break; case CLOCK_EVT_MODE_SHUTDOWN: opts &= ~IXP4XX_OST_ENABLE; break; case CLOCK_EVT_MODE_RESUME: opts |= IXP4XX_OST_ENABLE; break; case CLOCK_EVT_MODE_UNUSED: default: osrt = opts = 0; break; } *IXP4XX_OSRT1 = osrt | opts; } static struct clock_event_device clockevent_ixp4xx = { .name = "ixp4xx timer1", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .rating = 200, .shift = 24, .set_mode = ixp4xx_set_mode, .set_next_event = ixp4xx_set_next_event, }; static int __init ixp4xx_clockevent_init(void) { clockevent_ixp4xx.mult = div_sc(FREQ, NSEC_PER_SEC, clockevent_ixp4xx.shift); clockevent_ixp4xx.max_delta_ns = clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx); clockevent_ixp4xx.min_delta_ns = clockevent_delta2ns(0xf, &clockevent_ixp4xx); clockevent_ixp4xx.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_ixp4xx); return 0; }
gpl-2.0
mambomark/linux-systemsim
drivers/gpu/drm/nouveau/nouveau_vga.c
49
3054
#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include "nouveau_drm.h" #include "nouveau_acpi.h" #include "nouveau_fbcon.h" #include "nouveau_vga.h" static unsigned int nouveau_vga_set_decode(void *priv, bool state) { struct nouveau_device *device = nouveau_dev(priv); if (device->card_type == NV_40 && device->chipset >= 0x4c) nv_wr32(device, 0x088060, state); else if (device->chipset >= 0x40) nv_wr32(device, 0x088054, state); else nv_wr32(device, 0x001854, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; else return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } static void nouveau_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_device *dev = pci_get_drvdata(pdev); if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; nouveau_pmops_resume(&pdev->dev); drm_kms_helper_poll_enable(dev); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; drm_kms_helper_poll_disable(dev); nouveau_switcheroo_optimus_dsm(); nouveau_pmops_suspend(&pdev->dev); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } static void nouveau_switcheroo_reprobe(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); nouveau_fbcon_output_poll_changed(dev); } static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); bool can_switch; spin_lock(&dev->count_lock); can_switch = (dev->open_count == 0); spin_unlock(&dev->count_lock); return can_switch; } static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = { .set_gpu_state = nouveau_switcheroo_set_state, .reprobe = nouveau_switcheroo_reprobe, .can_switch = nouveau_switcheroo_can_switch, }; void nouveau_vga_init(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; bool runtime = false; /* only relevant for PCI devices */ if (!dev->pdev) return; vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); if (nouveau_runtime_pm == 1) runtime = true; if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm())) runtime = true; vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime); if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain); } void nouveau_vga_fini(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); } void nouveau_vga_lastclose(struct drm_device *dev) { vga_switcheroo_process_delayed_switch(); }
gpl-2.0
jacobbarsoe/linux
drivers/usb/gadget/composite.c
49
52362
/* * composite.c - infrastructure for Composite USB Gadgets * * Copyright (C) 2006-2008 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ /* #define VERBOSE_DEBUG */ #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/device.h> #include <linux/utsname.h> #include <linux/usb/composite.h> #include <asm/unaligned.h> /* * The code in this file is utility code, used to build a gadget driver * from one or more "function" drivers, one or more "configuration" * objects, and a "usb_composite_driver" by gluing them together along * with the relevant device-wide data. */ static struct usb_gadget_strings **get_containers_gs( struct usb_gadget_string_container *uc) { return (struct usb_gadget_strings **)uc->stash; } /** * next_ep_desc() - advance to the next EP descriptor * @t: currect pointer within descriptor array * * Return: next EP descriptor or NULL * * Iterate over @t until either EP descriptor found or * NULL (that indicates end of list) encountered */ static struct usb_descriptor_header** next_ep_desc(struct usb_descriptor_header **t) { for (; *t; t++) { if ((*t)->bDescriptorType == USB_DT_ENDPOINT) return t; } return NULL; } /* * for_each_ep_desc()- iterate over endpoint descriptors in the * descriptors list * @start: pointer within descriptor array. * @ep_desc: endpoint descriptor to use as the loop cursor */ #define for_each_ep_desc(start, ep_desc) \ for (ep_desc = next_ep_desc(start); \ ep_desc; ep_desc = next_ep_desc(ep_desc+1)) /** * config_ep_by_speed() - configures the given endpoint * according to gadget speed. * @g: pointer to the gadget * @f: usb function * @_ep: the endpoint to configure * * Return: error code, 0 on success * * This function chooses the right descriptors for a given * endpoint according to gadget speed and saves it in the * endpoint desc field. If the endpoint already has a descriptor * assigned to it - overwrites it with currently corresponding * descriptor. The endpoint maxpacket field is updated according * to the chosen descriptor. * Note: the supplied function should hold all the descriptors * for supported speeds */ int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep) { struct usb_composite_dev *cdev = get_gadget_data(g); struct usb_endpoint_descriptor *chosen_desc = NULL; struct usb_descriptor_header **speed_desc = NULL; struct usb_ss_ep_comp_descriptor *comp_desc = NULL; int want_comp_desc = 0; struct usb_descriptor_header **d_spd; /* cursor for speed desc */ if (!g || !f || !_ep) return -EIO; /* select desired speed */ switch (g->speed) { case USB_SPEED_SUPER: if (gadget_is_superspeed(g)) { speed_desc = f->ss_descriptors; want_comp_desc = 1; break; } /* else: Fall trough */ case USB_SPEED_HIGH: if (gadget_is_dualspeed(g)) { speed_desc = f->hs_descriptors; break; } /* else: fall through */ default: speed_desc = f->fs_descriptors; } /* find descriptors */ for_each_ep_desc(speed_desc, d_spd) { chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; if (chosen_desc->bEndpointAddress == _ep->address) goto ep_found; } return -EIO; ep_found: /* commit results */ _ep->maxpacket = usb_endpoint_maxp(chosen_desc); _ep->desc = chosen_desc; _ep->comp_desc = NULL; _ep->maxburst = 0; _ep->mult = 0; if (!want_comp_desc) return 0; /* * Companion descriptor should follow EP descriptor * USB 3.0 spec, #9.6.7 */ comp_desc = (struct usb_ss_ep_comp_descriptor *)*(++d_spd); if (!comp_desc || (comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP)) return -EIO; _ep->comp_desc = comp_desc; if (g->speed == USB_SPEED_SUPER) { switch (usb_endpoint_type(_ep->desc)) { case USB_ENDPOINT_XFER_ISOC: /* mult: bits 1:0 of bmAttributes */ _ep->mult = comp_desc->bmAttributes & 0x3; case USB_ENDPOINT_XFER_BULK: case USB_ENDPOINT_XFER_INT: _ep->maxburst = comp_desc->bMaxBurst + 1; break; default: if (comp_desc->bMaxBurst != 0) ERROR(cdev, "ep0 bMaxBurst must be 0\n"); _ep->maxburst = 1; break; } } return 0; } EXPORT_SYMBOL_GPL(config_ep_by_speed); /** * usb_add_function() - add a function to a configuration * @config: the configuration * @function: the function being added * Context: single threaded during gadget setup * * After initialization, each configuration must have one or more * functions added to it. Adding a function involves calling its @bind() * method to allocate resources such as interface and string identifiers * and endpoints. * * This function returns the value of the function's bind(), which is * zero for success else a negative errno value. */ int usb_add_function(struct usb_configuration *config, struct usb_function *function) { int value = -EINVAL; DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n", function->name, function, config->label, config); if (!function->set_alt || !function->disable) goto done; function->config = config; list_add_tail(&function->list, &config->functions); /* REVISIT *require* function->bind? */ if (function->bind) { value = function->bind(config, function); if (value < 0) { list_del(&function->list); function->config = NULL; } } else value = 0; /* We allow configurations that don't work at both speeds. * If we run into a lowspeed Linux system, treat it the same * as full speed ... it's the function drivers that will need * to avoid bulk and ISO transfers. */ if (!config->fullspeed && function->fs_descriptors) config->fullspeed = true; if (!config->highspeed && function->hs_descriptors) config->highspeed = true; if (!config->superspeed && function->ss_descriptors) config->superspeed = true; done: if (value) DBG(config->cdev, "adding '%s'/%p --> %d\n", function->name, function, value); return value; } EXPORT_SYMBOL_GPL(usb_add_function); void usb_remove_function(struct usb_configuration *c, struct usb_function *f) { if (f->disable) f->disable(f); bitmap_zero(f->endpoints, 32); list_del(&f->list); if (f->unbind) f->unbind(c, f); } EXPORT_SYMBOL_GPL(usb_remove_function); /** * usb_function_deactivate - prevent function and gadget enumeration * @function: the function that isn't yet ready to respond * * Blocks response of the gadget driver to host enumeration by * preventing the data line pullup from being activated. This is * normally called during @bind() processing to change from the * initial "ready to respond" state, or when a required resource * becomes available. * * For example, drivers that serve as a passthrough to a userspace * daemon can block enumeration unless that daemon (such as an OBEX, * MTP, or print server) is ready to handle host requests. * * Not all systems support software control of their USB peripheral * data pullups. * * Returns zero on success, else negative errno. */ int usb_function_deactivate(struct usb_function *function) { struct usb_composite_dev *cdev = function->config->cdev; unsigned long flags; int status = 0; spin_lock_irqsave(&cdev->lock, flags); if (cdev->deactivations == 0) status = usb_gadget_disconnect(cdev->gadget); if (status == 0) cdev->deactivations++; spin_unlock_irqrestore(&cdev->lock, flags); return status; } EXPORT_SYMBOL_GPL(usb_function_deactivate); /** * usb_function_activate - allow function and gadget enumeration * @function: function on which usb_function_activate() was called * * Reverses effect of usb_function_deactivate(). If no more functions * are delaying their activation, the gadget driver will respond to * host enumeration procedures. * * Returns zero on success, else negative errno. */ int usb_function_activate(struct usb_function *function) { struct usb_composite_dev *cdev = function->config->cdev; unsigned long flags; int status = 0; spin_lock_irqsave(&cdev->lock, flags); if (WARN_ON(cdev->deactivations == 0)) status = -EINVAL; else { cdev->deactivations--; if (cdev->deactivations == 0) status = usb_gadget_connect(cdev->gadget); } spin_unlock_irqrestore(&cdev->lock, flags); return status; } EXPORT_SYMBOL_GPL(usb_function_activate); /** * usb_interface_id() - allocate an unused interface ID * @config: configuration associated with the interface * @function: function handling the interface * Context: single threaded during gadget setup * * usb_interface_id() is called from usb_function.bind() callbacks to * allocate new interface IDs. The function driver will then store that * ID in interface, association, CDC union, and other descriptors. It * will also handle any control requests targeted at that interface, * particularly changing its altsetting via set_alt(). There may * also be class-specific or vendor-specific requests to handle. * * All interface identifier should be allocated using this routine, to * ensure that for example different functions don't wrongly assign * different meanings to the same identifier. Note that since interface * identifiers are configuration-specific, functions used in more than * one configuration (or more than once in a given configuration) need * multiple versions of the relevant descriptors. * * Returns the interface ID which was allocated; or -ENODEV if no * more interface IDs can be allocated. */ int usb_interface_id(struct usb_configuration *config, struct usb_function *function) { unsigned id = config->next_interface_id; if (id < MAX_CONFIG_INTERFACES) { config->interface[id] = function; config->next_interface_id = id + 1; return id; } return -ENODEV; } EXPORT_SYMBOL_GPL(usb_interface_id); static u8 encode_bMaxPower(enum usb_device_speed speed, struct usb_configuration *c) { unsigned val; if (c->MaxPower) val = c->MaxPower; else val = CONFIG_USB_GADGET_VBUS_DRAW; if (!val) return 0; switch (speed) { case USB_SPEED_SUPER: return DIV_ROUND_UP(val, 8); default: return DIV_ROUND_UP(val, 2); } } static int config_buf(struct usb_configuration *config, enum usb_device_speed speed, void *buf, u8 type) { struct usb_config_descriptor *c = buf; void *next = buf + USB_DT_CONFIG_SIZE; int len; struct usb_function *f; int status; len = USB_COMP_EP0_BUFSIZ - USB_DT_CONFIG_SIZE; /* write the config descriptor */ c = buf; c->bLength = USB_DT_CONFIG_SIZE; c->bDescriptorType = type; /* wTotalLength is written later */ c->bNumInterfaces = config->next_interface_id; c->bConfigurationValue = config->bConfigurationValue; c->iConfiguration = config->iConfiguration; c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes; c->bMaxPower = encode_bMaxPower(speed, config); /* There may be e.g. OTG descriptors */ if (config->descriptors) { status = usb_descriptor_fillbuf(next, len, config->descriptors); if (status < 0) return status; len -= status; next += status; } /* add each function's descriptors */ list_for_each_entry(f, &config->functions, list) { struct usb_descriptor_header **descriptors; switch (speed) { case USB_SPEED_SUPER: descriptors = f->ss_descriptors; break; case USB_SPEED_HIGH: descriptors = f->hs_descriptors; break; default: descriptors = f->fs_descriptors; } if (!descriptors) continue; status = usb_descriptor_fillbuf(next, len, (const struct usb_descriptor_header **) descriptors); if (status < 0) return status; len -= status; next += status; } len = next - buf; c->wTotalLength = cpu_to_le16(len); return len; } static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c; u8 type = w_value >> 8; enum usb_device_speed speed = USB_SPEED_UNKNOWN; if (gadget->speed == USB_SPEED_SUPER) speed = gadget->speed; else if (gadget_is_dualspeed(gadget)) { int hs = 0; if (gadget->speed == USB_SPEED_HIGH) hs = 1; if (type == USB_DT_OTHER_SPEED_CONFIG) hs = !hs; if (hs) speed = USB_SPEED_HIGH; } /* This is a lookup by config *INDEX* */ w_value &= 0xff; list_for_each_entry(c, &cdev->configs, list) { /* ignore configs that won't work at this speed */ switch (speed) { case USB_SPEED_SUPER: if (!c->superspeed) continue; break; case USB_SPEED_HIGH: if (!c->highspeed) continue; break; default: if (!c->fullspeed) continue; } if (w_value == 0) return config_buf(c, speed, cdev->req->buf, type); w_value--; } return -EINVAL; } static int count_configs(struct usb_composite_dev *cdev, unsigned type) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c; unsigned count = 0; int hs = 0; int ss = 0; if (gadget_is_dualspeed(gadget)) { if (gadget->speed == USB_SPEED_HIGH) hs = 1; if (gadget->speed == USB_SPEED_SUPER) ss = 1; if (type == USB_DT_DEVICE_QUALIFIER) hs = !hs; } list_for_each_entry(c, &cdev->configs, list) { /* ignore configs that won't work at this speed */ if (ss) { if (!c->superspeed) continue; } else if (hs) { if (!c->highspeed) continue; } else { if (!c->fullspeed) continue; } count++; } return count; } /** * bos_desc() - prepares the BOS descriptor. * @cdev: pointer to usb_composite device to generate the bos * descriptor for * * This function generates the BOS (Binary Device Object) * descriptor and its device capabilities descriptors. The BOS * descriptor should be supported by a SuperSpeed device. */ static int bos_desc(struct usb_composite_dev *cdev) { struct usb_ext_cap_descriptor *usb_ext; struct usb_ss_cap_descriptor *ss_cap; struct usb_dcd_config_params dcd_config_params; struct usb_bos_descriptor *bos = cdev->req->buf; bos->bLength = USB_DT_BOS_SIZE; bos->bDescriptorType = USB_DT_BOS; bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE); bos->bNumDeviceCaps = 0; /* * A SuperSpeed device shall include the USB2.0 extension descriptor * and shall support LPM when operating in USB2.0 HS mode. */ usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength); bos->bNumDeviceCaps++; le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE); usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE; usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY; usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT; usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT); /* * The Superspeed USB Capability descriptor shall be implemented by all * SuperSpeed devices. */ ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength); bos->bNumDeviceCaps++; le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE); ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE; ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE; ss_cap->bmAttributes = 0; /* LTM is not supported yet */ ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION | USB_FULL_SPEED_OPERATION | USB_HIGH_SPEED_OPERATION | USB_5GBPS_OPERATION); ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION; /* Get Controller configuration */ if (cdev->gadget->ops->get_config_params) cdev->gadget->ops->get_config_params(&dcd_config_params); else { dcd_config_params.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT; dcd_config_params.bU2DevExitLat = cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT); } ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat; ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat; return le16_to_cpu(bos->wTotalLength); } static void device_qual(struct usb_composite_dev *cdev) { struct usb_qualifier_descriptor *qual = cdev->req->buf; qual->bLength = sizeof(*qual); qual->bDescriptorType = USB_DT_DEVICE_QUALIFIER; /* POLICY: same bcdUSB and device type info at both speeds */ qual->bcdUSB = cdev->desc.bcdUSB; qual->bDeviceClass = cdev->desc.bDeviceClass; qual->bDeviceSubClass = cdev->desc.bDeviceSubClass; qual->bDeviceProtocol = cdev->desc.bDeviceProtocol; /* ASSUME same EP0 fifo size at both speeds */ qual->bMaxPacketSize0 = cdev->gadget->ep0->maxpacket; qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER); qual->bRESERVED = 0; } /*-------------------------------------------------------------------------*/ static void reset_config(struct usb_composite_dev *cdev) { struct usb_function *f; DBG(cdev, "reset config\n"); list_for_each_entry(f, &cdev->config->functions, list) { if (f->disable) f->disable(f); bitmap_zero(f->endpoints, 32); } cdev->config = NULL; cdev->delayed_status = 0; } static int set_config(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl, unsigned number) { struct usb_gadget *gadget = cdev->gadget; struct usb_configuration *c = NULL; int result = -EINVAL; unsigned power = gadget_is_otg(gadget) ? 8 : 100; int tmp; if (number) { list_for_each_entry(c, &cdev->configs, list) { if (c->bConfigurationValue == number) { /* * We disable the FDs of the previous * configuration only if the new configuration * is a valid one */ if (cdev->config) reset_config(cdev); result = 0; break; } } if (result < 0) goto done; } else { /* Zero configuration value - need to reset the config */ if (cdev->config) reset_config(cdev); result = 0; } INFO(cdev, "%s config #%d: %s\n", usb_speed_string(gadget->speed), number, c ? c->label : "unconfigured"); if (!c) goto done; cdev->config = c; /* Initialize all interfaces by setting them to altsetting zero. */ for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) { struct usb_function *f = c->interface[tmp]; struct usb_descriptor_header **descriptors; if (!f) break; /* * Record which endpoints are used by the function. This is used * to dispatch control requests targeted at that endpoint to the * function's setup callback instead of the current * configuration's setup callback. */ switch (gadget->speed) { case USB_SPEED_SUPER: descriptors = f->ss_descriptors; break; case USB_SPEED_HIGH: descriptors = f->hs_descriptors; break; default: descriptors = f->fs_descriptors; } for (; *descriptors; ++descriptors) { struct usb_endpoint_descriptor *ep; int addr; if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT) continue; ep = (struct usb_endpoint_descriptor *)*descriptors; addr = ((ep->bEndpointAddress & 0x80) >> 3) | (ep->bEndpointAddress & 0x0f); set_bit(addr, f->endpoints); } result = f->set_alt(f, tmp, 0); if (result < 0) { DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n", tmp, f->name, f, result); reset_config(cdev); goto done; } if (result == USB_GADGET_DELAYED_STATUS) { DBG(cdev, "%s: interface %d (%s) requested delayed status\n", __func__, tmp, f->name); cdev->delayed_status++; DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } } /* when we return, be sure our power usage is valid */ power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW; done: usb_gadget_vbus_draw(gadget, power); if (result >= 0 && cdev->delayed_status) result = USB_GADGET_DELAYED_STATUS; return result; } int usb_add_config_only(struct usb_composite_dev *cdev, struct usb_configuration *config) { struct usb_configuration *c; if (!config->bConfigurationValue) return -EINVAL; /* Prevent duplicate configuration identifiers */ list_for_each_entry(c, &cdev->configs, list) { if (c->bConfigurationValue == config->bConfigurationValue) return -EBUSY; } config->cdev = cdev; list_add_tail(&config->list, &cdev->configs); INIT_LIST_HEAD(&config->functions); config->next_interface_id = 0; memset(config->interface, 0, sizeof(config->interface)); return 0; } EXPORT_SYMBOL_GPL(usb_add_config_only); /** * usb_add_config() - add a configuration to a device. * @cdev: wraps the USB gadget * @config: the configuration, with bConfigurationValue assigned * @bind: the configuration's bind function * Context: single threaded during gadget setup * * One of the main tasks of a composite @bind() routine is to * add each of the configurations it supports, using this routine. * * This function returns the value of the configuration's @bind(), which * is zero for success else a negative errno value. Binding configurations * assigns global resources including string IDs, and per-configuration * resources such as interface IDs and endpoints. */ int usb_add_config(struct usb_composite_dev *cdev, struct usb_configuration *config, int (*bind)(struct usb_configuration *)) { int status = -EINVAL; if (!bind) goto done; DBG(cdev, "adding config #%u '%s'/%p\n", config->bConfigurationValue, config->label, config); status = usb_add_config_only(cdev, config); if (status) goto done; status = bind(config); if (status < 0) { while (!list_empty(&config->functions)) { struct usb_function *f; f = list_first_entry(&config->functions, struct usb_function, list); list_del(&f->list); if (f->unbind) { DBG(cdev, "unbind function '%s'/%p\n", f->name, f); f->unbind(config, f); /* may free memory for "f" */ } } list_del(&config->list); config->cdev = NULL; } else { unsigned i; DBG(cdev, "cfg %d/%p speeds:%s%s%s\n", config->bConfigurationValue, config, config->superspeed ? " super" : "", config->highspeed ? " high" : "", config->fullspeed ? (gadget_is_dualspeed(cdev->gadget) ? " full" : " full/low") : ""); for (i = 0; i < MAX_CONFIG_INTERFACES; i++) { struct usb_function *f = config->interface[i]; if (!f) continue; DBG(cdev, " interface %d = %s/%p\n", i, f->name, f); } } /* set_alt(), or next bind(), sets up * ep->driver_data as needed. */ usb_ep_autoconfig_reset(cdev->gadget); done: if (status) DBG(cdev, "added config '%s'/%u --> %d\n", config->label, config->bConfigurationValue, status); return status; } EXPORT_SYMBOL_GPL(usb_add_config); static void remove_config(struct usb_composite_dev *cdev, struct usb_configuration *config) { while (!list_empty(&config->functions)) { struct usb_function *f; f = list_first_entry(&config->functions, struct usb_function, list); list_del(&f->list); if (f->unbind) { DBG(cdev, "unbind function '%s'/%p\n", f->name, f); f->unbind(config, f); /* may free memory for "f" */ } } list_del(&config->list); if (config->unbind) { DBG(cdev, "unbind config '%s'/%p\n", config->label, config); config->unbind(config); /* may free memory for "c" */ } } /** * usb_remove_config() - remove a configuration from a device. * @cdev: wraps the USB gadget * @config: the configuration * * Drivers must call usb_gadget_disconnect before calling this function * to disconnect the device from the host and make sure the host will not * try to enumerate the device while we are changing the config list. */ void usb_remove_config(struct usb_composite_dev *cdev, struct usb_configuration *config) { unsigned long flags; spin_lock_irqsave(&cdev->lock, flags); if (cdev->config == config) reset_config(cdev); spin_unlock_irqrestore(&cdev->lock, flags); remove_config(cdev, config); } /*-------------------------------------------------------------------------*/ /* We support strings in multiple languages ... string descriptor zero * says which languages are supported. The typical case will be that * only one language (probably English) is used, with I18N handled on * the host side. */ static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) { const struct usb_gadget_strings *s; __le16 language; __le16 *tmp; while (*sp) { s = *sp; language = cpu_to_le16(s->language); for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { if (*tmp == language) goto repeat; } *tmp++ = language; repeat: sp++; } } static int lookup_string( struct usb_gadget_strings **sp, void *buf, u16 language, int id ) { struct usb_gadget_strings *s; int value; while (*sp) { s = *sp++; if (s->language != language) continue; value = usb_gadget_get_string(s, id, buf); if (value > 0) return value; } return -EINVAL; } static int get_string(struct usb_composite_dev *cdev, void *buf, u16 language, int id) { struct usb_composite_driver *composite = cdev->driver; struct usb_gadget_string_container *uc; struct usb_configuration *c; struct usb_function *f; int len; /* Yes, not only is USB's I18N support probably more than most * folk will ever care about ... also, it's all supported here. * (Except for UTF8 support for Unicode's "Astral Planes".) */ /* 0 == report all available language codes */ if (id == 0) { struct usb_string_descriptor *s = buf; struct usb_gadget_strings **sp; memset(s, 0, 256); s->bDescriptorType = USB_DT_STRING; sp = composite->strings; if (sp) collect_langs(sp, s->wData); list_for_each_entry(c, &cdev->configs, list) { sp = c->strings; if (sp) collect_langs(sp, s->wData); list_for_each_entry(f, &c->functions, list) { sp = f->strings; if (sp) collect_langs(sp, s->wData); } } list_for_each_entry(uc, &cdev->gstrings, list) { struct usb_gadget_strings **sp; sp = get_containers_gs(uc); collect_langs(sp, s->wData); } for (len = 0; len <= 126 && s->wData[len]; len++) continue; if (!len) return -EINVAL; s->bLength = 2 * (len + 1); return s->bLength; } list_for_each_entry(uc, &cdev->gstrings, list) { struct usb_gadget_strings **sp; sp = get_containers_gs(uc); len = lookup_string(sp, buf, language, id); if (len > 0) return len; } /* String IDs are device-scoped, so we look up each string * table we're told about. These lookups are infrequent; * simpler-is-better here. */ if (composite->strings) { len = lookup_string(composite->strings, buf, language, id); if (len > 0) return len; } list_for_each_entry(c, &cdev->configs, list) { if (c->strings) { len = lookup_string(c->strings, buf, language, id); if (len > 0) return len; } list_for_each_entry(f, &c->functions, list) { if (!f->strings) continue; len = lookup_string(f->strings, buf, language, id); if (len > 0) return len; } } return -EINVAL; } /** * usb_string_id() - allocate an unused string ID * @cdev: the device whose string descriptor IDs are being allocated * Context: single threaded during gadget setup * * @usb_string_id() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then store that ID in the appropriate descriptors and string table. * * All string identifier should be allocated using this, * @usb_string_ids_tab() or @usb_string_ids_n() routine, to ensure * that for example different functions don't wrongly assign different * meanings to the same identifier. */ int usb_string_id(struct usb_composite_dev *cdev) { if (cdev->next_string_id < 254) { /* string id 0 is reserved by USB spec for list of * supported languages */ /* 255 reserved as well? -- mina86 */ cdev->next_string_id++; return cdev->next_string_id; } return -ENODEV; } EXPORT_SYMBOL_GPL(usb_string_id); /** * usb_string_ids() - allocate unused string IDs in batch * @cdev: the device whose string descriptor IDs are being allocated * @str: an array of usb_string objects to assign numbers to * Context: single threaded during gadget setup * * @usb_string_ids() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then copy IDs from the string table to the appropriate descriptors * and string table for other languages. * * All string identifier should be allocated using this, * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for * example different functions don't wrongly assign different meanings * to the same identifier. */ int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str) { int next = cdev->next_string_id; for (; str->s; ++str) { if (unlikely(next >= 254)) return -ENODEV; str->id = ++next; } cdev->next_string_id = next; return 0; } EXPORT_SYMBOL_GPL(usb_string_ids_tab); static struct usb_gadget_string_container *copy_gadget_strings( struct usb_gadget_strings **sp, unsigned n_gstrings, unsigned n_strings) { struct usb_gadget_string_container *uc; struct usb_gadget_strings **gs_array; struct usb_gadget_strings *gs; struct usb_string *s; unsigned mem; unsigned n_gs; unsigned n_s; void *stash; mem = sizeof(*uc); mem += sizeof(void *) * (n_gstrings + 1); mem += sizeof(struct usb_gadget_strings) * n_gstrings; mem += sizeof(struct usb_string) * (n_strings + 1) * (n_gstrings); uc = kmalloc(mem, GFP_KERNEL); if (!uc) return ERR_PTR(-ENOMEM); gs_array = get_containers_gs(uc); stash = uc->stash; stash += sizeof(void *) * (n_gstrings + 1); for (n_gs = 0; n_gs < n_gstrings; n_gs++) { struct usb_string *org_s; gs_array[n_gs] = stash; gs = gs_array[n_gs]; stash += sizeof(struct usb_gadget_strings); gs->language = sp[n_gs]->language; gs->strings = stash; org_s = sp[n_gs]->strings; for (n_s = 0; n_s < n_strings; n_s++) { s = stash; stash += sizeof(struct usb_string); if (org_s->s) s->s = org_s->s; else s->s = ""; org_s++; } s = stash; s->s = NULL; stash += sizeof(struct usb_string); } gs_array[n_gs] = NULL; return uc; } /** * usb_gstrings_attach() - attach gadget strings to a cdev and assign ids * @cdev: the device whose string descriptor IDs are being allocated * and attached. * @sp: an array of usb_gadget_strings to attach. * @n_strings: number of entries in each usb_strings array (sp[]->strings) * * This function will create a deep copy of usb_gadget_strings and usb_string * and attach it to the cdev. The actual string (usb_string.s) will not be * copied but only a referenced will be made. The struct usb_gadget_strings * array may contain multiple languges and should be NULL terminated. * The ->language pointer of each struct usb_gadget_strings has to contain the * same amount of entries. * For instance: sp[0] is en-US, sp[1] is es-ES. It is expected that the first * usb_string entry of es-ES containts the translation of the first usb_string * entry of en-US. Therefore both entries become the same id assign. */ struct usb_string *usb_gstrings_attach(struct usb_composite_dev *cdev, struct usb_gadget_strings **sp, unsigned n_strings) { struct usb_gadget_string_container *uc; struct usb_gadget_strings **n_gs; unsigned n_gstrings = 0; unsigned i; int ret; for (i = 0; sp[i]; i++) n_gstrings++; if (!n_gstrings) return ERR_PTR(-EINVAL); uc = copy_gadget_strings(sp, n_gstrings, n_strings); if (IS_ERR(uc)) return ERR_CAST(uc); n_gs = get_containers_gs(uc); ret = usb_string_ids_tab(cdev, n_gs[0]->strings); if (ret) goto err; for (i = 1; i < n_gstrings; i++) { struct usb_string *m_s; struct usb_string *s; unsigned n; m_s = n_gs[0]->strings; s = n_gs[i]->strings; for (n = 0; n < n_strings; n++) { s->id = m_s->id; s++; m_s++; } } list_add_tail(&uc->list, &cdev->gstrings); return n_gs[0]->strings; err: kfree(uc); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(usb_gstrings_attach); /** * usb_string_ids_n() - allocate unused string IDs in batch * @c: the device whose string descriptor IDs are being allocated * @n: number of string IDs to allocate * Context: single threaded during gadget setup * * Returns the first requested ID. This ID and next @n-1 IDs are now * valid IDs. At least provided that @n is non-zero because if it * is, returns last requested ID which is now very useful information. * * @usb_string_ids_n() is called from bind() callbacks to allocate * string IDs. Drivers for functions, configurations, or gadgets will * then store that ID in the appropriate descriptors and string table. * * All string identifier should be allocated using this, * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for * example different functions don't wrongly assign different meanings * to the same identifier. */ int usb_string_ids_n(struct usb_composite_dev *c, unsigned n) { unsigned next = c->next_string_id; if (unlikely(n > 254 || (unsigned)next + n > 254)) return -ENODEV; c->next_string_id += n; return next + 1; } EXPORT_SYMBOL_GPL(usb_string_ids_n); /*-------------------------------------------------------------------------*/ static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req) { if (req->status || req->actual != req->length) DBG((struct usb_composite_dev *) ep->driver_data, "setup complete --> %d, %d/%d\n", req->status, req->actual, req->length); } /* * The setup() callback implements all the ep0 functionality that's * not handled lower down, in hardware or the hardware driver(like * device and endpoint feature flags, and their status). It's all * housekeeping for the gadget function we're implementing. Most of * the work is in config and function specific setup. */ int composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; int status = 0; u16 w_index = le16_to_cpu(ctrl->wIndex); u8 intf = w_index & 0xFF; u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); struct usb_function *f = NULL; u8 endp; /* partial re-init of the response message; the function or the * gadget might need to intercept e.g. a control-OUT completion * when we delegate to it. */ req->zero = 0; req->complete = composite_setup_complete; req->length = 0; gadget->ep0->driver_data = cdev; switch (ctrl->bRequest) { /* we handle all standard USB descriptors */ case USB_REQ_GET_DESCRIPTOR: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; switch (w_value >> 8) { case USB_DT_DEVICE: cdev->desc.bNumConfigurations = count_configs(cdev, USB_DT_DEVICE); cdev->desc.bMaxPacketSize0 = cdev->gadget->ep0->maxpacket; if (gadget_is_superspeed(gadget)) { if (gadget->speed >= USB_SPEED_SUPER) { cdev->desc.bcdUSB = cpu_to_le16(0x0300); cdev->desc.bMaxPacketSize0 = 9; } else { cdev->desc.bcdUSB = cpu_to_le16(0x0210); } } value = min(w_length, (u16) sizeof cdev->desc); memcpy(req->buf, &cdev->desc, value); break; case USB_DT_DEVICE_QUALIFIER: if (!gadget_is_dualspeed(gadget) || gadget->speed >= USB_SPEED_SUPER) break; device_qual(cdev); value = min_t(int, w_length, sizeof(struct usb_qualifier_descriptor)); break; case USB_DT_OTHER_SPEED_CONFIG: if (!gadget_is_dualspeed(gadget) || gadget->speed >= USB_SPEED_SUPER) break; /* FALLTHROUGH */ case USB_DT_CONFIG: value = config_desc(cdev, w_value); if (value >= 0) value = min(w_length, (u16) value); break; case USB_DT_STRING: value = get_string(cdev, req->buf, w_index, w_value & 0xff); if (value >= 0) value = min(w_length, (u16) value); break; case USB_DT_BOS: if (gadget_is_superspeed(gadget)) { value = bos_desc(cdev); value = min(w_length, (u16) value); } break; } break; /* any number of configs can work */ case USB_REQ_SET_CONFIGURATION: if (ctrl->bRequestType != 0) goto unknown; if (gadget_is_otg(gadget)) { if (gadget->a_hnp_support) DBG(cdev, "HNP available\n"); else if (gadget->a_alt_hnp_support) DBG(cdev, "HNP on another port\n"); else VDBG(cdev, "HNP inactive\n"); } spin_lock(&cdev->lock); value = set_config(cdev, ctrl, w_value); spin_unlock(&cdev->lock); break; case USB_REQ_GET_CONFIGURATION: if (ctrl->bRequestType != USB_DIR_IN) goto unknown; if (cdev->config) *(u8 *)req->buf = cdev->config->bConfigurationValue; else *(u8 *)req->buf = 0; value = min(w_length, (u16) 1); break; /* function drivers must handle get/set altsetting; if there's * no get() method, we know only altsetting zero works. */ case USB_REQ_SET_INTERFACE: if (ctrl->bRequestType != USB_RECIP_INTERFACE) goto unknown; if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; if (w_value && !f->set_alt) break; value = f->set_alt(f, w_index, w_value); if (value == USB_GADGET_DELAYED_STATUS) { DBG(cdev, "%s: interface %d (%s) requested delayed status\n", __func__, intf, f->name); cdev->delayed_status++; DBG(cdev, "delayed_status count %d\n", cdev->delayed_status); } break; case USB_REQ_GET_INTERFACE: if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) goto unknown; if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; /* lots of interfaces only need altsetting zero... */ value = f->get_alt ? f->get_alt(f, w_index) : 0; if (value < 0) break; *((u8 *)req->buf) = value; value = min(w_length, (u16) 1); break; /* * USB 3.0 additions: * Function driver should handle get_status request. If such cb * wasn't supplied we respond with default value = 0 * Note: function driver should supply such cb only for the first * interface of the function */ case USB_REQ_GET_STATUS: if (!gadget_is_superspeed(gadget)) goto unknown; if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE)) goto unknown; value = 2; /* This is the length of the get_status reply */ put_unaligned_le16(0, req->buf); if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; status = f->get_status ? f->get_status(f) : 0; if (status < 0) break; put_unaligned_le16(status & 0x0000ffff, req->buf); break; /* * Function drivers should handle SetFeature/ClearFeature * (FUNCTION_SUSPEND) request. function_suspend cb should be supplied * only for the first interface of the function */ case USB_REQ_CLEAR_FEATURE: case USB_REQ_SET_FEATURE: if (!gadget_is_superspeed(gadget)) goto unknown; if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE)) goto unknown; switch (w_value) { case USB_INTRF_FUNC_SUSPEND: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; if (!f) break; value = 0; if (f->func_suspend) value = f->func_suspend(f, w_index >> 8); if (value < 0) { ERROR(cdev, "func_suspend() returned error %d\n", value); value = 0; } break; } break; default: unknown: VDBG(cdev, "non-core control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* functions always handle their interfaces and endpoints... * punt other recipients (other, WUSB, ...) to the current * configuration code. * * REVISIT it could make sense to let the composite device * take such requests too, if that's ever needed: to work * in config 0, etc. */ switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) break; f = cdev->config->interface[intf]; break; case USB_RECIP_ENDPOINT: endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); list_for_each_entry(f, &cdev->config->functions, list) { if (test_bit(endp, f->endpoints)) break; } if (&f->list == &cdev->config->functions) f = NULL; break; } if (f && f->setup) value = f->setup(f, ctrl); else { struct usb_configuration *c; c = cdev->config; if (!c) goto done; /* try current config's setup */ if (c->setup) { value = c->setup(c, ctrl); goto done; } /* try the only function in the current config */ if (!list_is_singular(&c->functions)) goto done; f = list_first_entry(&c->functions, struct usb_function, list); if (f->setup) value = f->setup(f, ctrl); } goto done; } /* respond with data transfer before status phase? */ if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) { req->length = value; req->zero = value < w_length; value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC); if (value < 0) { DBG(cdev, "ep_queue --> %d\n", value); req->status = 0; composite_setup_complete(gadget->ep0, req); } } else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) { WARN(cdev, "%s: Delayed status not supported for w_length != 0", __func__); } done: /* device either stalls (value < 0) or reports success */ return value; } void composite_disconnect(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); unsigned long flags; /* REVISIT: should we have config and device level * disconnect callbacks? */ spin_lock_irqsave(&cdev->lock, flags); if (cdev->config) reset_config(cdev); if (cdev->driver->disconnect) cdev->driver->disconnect(cdev); spin_unlock_irqrestore(&cdev->lock, flags); } /*-------------------------------------------------------------------------*/ static ssize_t suspended_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_composite_dev *cdev = get_gadget_data(gadget); return sprintf(buf, "%d\n", cdev->suspended); } static DEVICE_ATTR_RO(suspended); static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) { struct usb_composite_dev *cdev = get_gadget_data(gadget); /* composite_disconnect() must already have been called * by the underlying peripheral controller driver! * so there's no i/o concurrency that could affect the * state protected by cdev->lock. */ WARN_ON(cdev->config); while (!list_empty(&cdev->configs)) { struct usb_configuration *c; c = list_first_entry(&cdev->configs, struct usb_configuration, list); remove_config(cdev, c); } if (cdev->driver->unbind && unbind_driver) cdev->driver->unbind(cdev); composite_dev_cleanup(cdev); kfree(cdev->def_manufacturer); kfree(cdev); set_gadget_data(gadget, NULL); } static void composite_unbind(struct usb_gadget *gadget) { __composite_unbind(gadget, true); } static void update_unchanged_dev_desc(struct usb_device_descriptor *new, const struct usb_device_descriptor *old) { __le16 idVendor; __le16 idProduct; __le16 bcdDevice; u8 iSerialNumber; u8 iManufacturer; u8 iProduct; /* * these variables may have been set in * usb_composite_overwrite_options() */ idVendor = new->idVendor; idProduct = new->idProduct; bcdDevice = new->bcdDevice; iSerialNumber = new->iSerialNumber; iManufacturer = new->iManufacturer; iProduct = new->iProduct; *new = *old; if (idVendor) new->idVendor = idVendor; if (idProduct) new->idProduct = idProduct; if (bcdDevice) new->bcdDevice = bcdDevice; else new->bcdDevice = cpu_to_le16(get_default_bcdDevice()); if (iSerialNumber) new->iSerialNumber = iSerialNumber; if (iManufacturer) new->iManufacturer = iManufacturer; if (iProduct) new->iProduct = iProduct; } int composite_dev_prepare(struct usb_composite_driver *composite, struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; int ret = -ENOMEM; /* preallocate control response and buffer */ cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL); if (!cdev->req) return -ENOMEM; cdev->req->buf = kmalloc(USB_COMP_EP0_BUFSIZ, GFP_KERNEL); if (!cdev->req->buf) goto fail; ret = device_create_file(&gadget->dev, &dev_attr_suspended); if (ret) goto fail_dev; cdev->req->complete = composite_setup_complete; gadget->ep0->driver_data = cdev; cdev->driver = composite; /* * As per USB compliance update, a device that is actively drawing * more than 100mA from USB must report itself as bus-powered in * the GetStatus(DEVICE) call. */ if (CONFIG_USB_GADGET_VBUS_DRAW <= USB_SELF_POWER_VBUS_MAX_DRAW) usb_gadget_set_selfpowered(gadget); /* interface and string IDs start at zero via kzalloc. * we force endpoints to start unassigned; few controller * drivers will zero ep->driver_data. */ usb_ep_autoconfig_reset(gadget); return 0; fail_dev: kfree(cdev->req->buf); fail: usb_ep_free_request(gadget->ep0, cdev->req); cdev->req = NULL; return ret; } void composite_dev_cleanup(struct usb_composite_dev *cdev) { struct usb_gadget_string_container *uc, *tmp; list_for_each_entry_safe(uc, tmp, &cdev->gstrings, list) { list_del(&uc->list); kfree(uc); } if (cdev->req) { kfree(cdev->req->buf); usb_ep_free_request(cdev->gadget->ep0, cdev->req); } cdev->next_string_id = 0; device_remove_file(&cdev->gadget->dev, &dev_attr_suspended); } static int composite_bind(struct usb_gadget *gadget, struct usb_gadget_driver *gdriver) { struct usb_composite_dev *cdev; struct usb_composite_driver *composite = to_cdriver(gdriver); int status = -ENOMEM; cdev = kzalloc(sizeof *cdev, GFP_KERNEL); if (!cdev) return status; spin_lock_init(&cdev->lock); cdev->gadget = gadget; set_gadget_data(gadget, cdev); INIT_LIST_HEAD(&cdev->configs); INIT_LIST_HEAD(&cdev->gstrings); status = composite_dev_prepare(composite, cdev); if (status) goto fail; /* composite gadget needs to assign strings for whole device (like * serial number), register function drivers, potentially update * power state and consumption, etc */ status = composite->bind(cdev); if (status < 0) goto fail; update_unchanged_dev_desc(&cdev->desc, composite->dev); /* has userspace failed to provide a serial number? */ if (composite->needs_serial && !cdev->desc.iSerialNumber) WARNING(cdev, "userspace failed to provide iSerialNumber\n"); INFO(cdev, "%s ready\n", composite->name); return 0; fail: __composite_unbind(gadget, false); return status; } /*-------------------------------------------------------------------------*/ static void composite_suspend(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; /* REVISIT: should we have config level * suspend/resume callbacks? */ DBG(cdev, "suspend\n"); if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) { if (f->suspend) f->suspend(f); } } if (cdev->driver->suspend) cdev->driver->suspend(cdev); cdev->suspended = 1; usb_gadget_vbus_draw(gadget, 2); } static void composite_resume(struct usb_gadget *gadget) { struct usb_composite_dev *cdev = get_gadget_data(gadget); struct usb_function *f; u16 maxpower; /* REVISIT: should we have config level * suspend/resume callbacks? */ DBG(cdev, "resume\n"); if (cdev->driver->resume) cdev->driver->resume(cdev); if (cdev->config) { list_for_each_entry(f, &cdev->config->functions, list) { if (f->resume) f->resume(f); } maxpower = cdev->config->MaxPower; usb_gadget_vbus_draw(gadget, maxpower ? maxpower : CONFIG_USB_GADGET_VBUS_DRAW); } cdev->suspended = 0; } /*-------------------------------------------------------------------------*/ static const struct usb_gadget_driver composite_driver_template = { .bind = composite_bind, .unbind = composite_unbind, .setup = composite_setup, .disconnect = composite_disconnect, .suspend = composite_suspend, .resume = composite_resume, .driver = { .owner = THIS_MODULE, }, }; /** * usb_composite_probe() - register a composite driver * @driver: the driver to register * * Context: single threaded during gadget setup * * This function is used to register drivers using the composite driver * framework. The return value is zero, or a negative errno value. * Those values normally come from the driver's @bind method, which does * all the work of setting up the driver to match the hardware. * * On successful return, the gadget is ready to respond to requests from * the host, unless one of its components invokes usb_gadget_disconnect() * while it was binding. That would usually be done in order to wait for * some userspace participation. */ int usb_composite_probe(struct usb_composite_driver *driver) { struct usb_gadget_driver *gadget_driver; if (!driver || !driver->dev || !driver->bind) return -EINVAL; if (!driver->name) driver->name = "composite"; driver->gadget_driver = composite_driver_template; gadget_driver = &driver->gadget_driver; gadget_driver->function = (char *) driver->name; gadget_driver->driver.name = driver->name; gadget_driver->max_speed = driver->max_speed; return usb_gadget_probe_driver(gadget_driver); } EXPORT_SYMBOL_GPL(usb_composite_probe); /** * usb_composite_unregister() - unregister a composite driver * @driver: the driver to unregister * * This function is used to unregister drivers using the composite * driver framework. */ void usb_composite_unregister(struct usb_composite_driver *driver) { usb_gadget_unregister_driver(&driver->gadget_driver); } EXPORT_SYMBOL_GPL(usb_composite_unregister); /** * usb_composite_setup_continue() - Continue with the control transfer * @cdev: the composite device who's control transfer was kept waiting * * This function must be called by the USB function driver to continue * with the control transfer's data/status stage in case it had requested to * delay the data/status stages. A USB function's setup handler (e.g. set_alt()) * can request the composite framework to delay the setup request's data/status * stages by returning USB_GADGET_DELAYED_STATUS. */ void usb_composite_setup_continue(struct usb_composite_dev *cdev) { int value; struct usb_request *req = cdev->req; unsigned long flags; DBG(cdev, "%s\n", __func__); spin_lock_irqsave(&cdev->lock, flags); if (cdev->delayed_status == 0) { WARN(cdev, "%s: Unexpected call\n", __func__); } else if (--cdev->delayed_status == 0) { DBG(cdev, "%s: Completing delayed status\n", __func__); req->length = 0; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) { DBG(cdev, "ep_queue --> %d\n", value); req->status = 0; composite_setup_complete(cdev->gadget->ep0, req); } } spin_unlock_irqrestore(&cdev->lock, flags); } EXPORT_SYMBOL_GPL(usb_composite_setup_continue); static char *composite_default_mfr(struct usb_gadget *gadget) { char *mfr; int len; len = snprintf(NULL, 0, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); len++; mfr = kmalloc(len, GFP_KERNEL); if (!mfr) return NULL; snprintf(mfr, len, "%s %s with %s", init_utsname()->sysname, init_utsname()->release, gadget->name); return mfr; } void usb_composite_overwrite_options(struct usb_composite_dev *cdev, struct usb_composite_overwrite *covr) { struct usb_device_descriptor *desc = &cdev->desc; struct usb_gadget_strings *gstr = cdev->driver->strings[0]; struct usb_string *dev_str = gstr->strings; if (covr->idVendor) desc->idVendor = cpu_to_le16(covr->idVendor); if (covr->idProduct) desc->idProduct = cpu_to_le16(covr->idProduct); if (covr->bcdDevice) desc->bcdDevice = cpu_to_le16(covr->bcdDevice); if (covr->serial_number) { desc->iSerialNumber = dev_str[USB_GADGET_SERIAL_IDX].id; dev_str[USB_GADGET_SERIAL_IDX].s = covr->serial_number; } if (covr->manufacturer) { desc->iManufacturer = dev_str[USB_GADGET_MANUFACTURER_IDX].id; dev_str[USB_GADGET_MANUFACTURER_IDX].s = covr->manufacturer; } else if (!strlen(dev_str[USB_GADGET_MANUFACTURER_IDX].s)) { desc->iManufacturer = dev_str[USB_GADGET_MANUFACTURER_IDX].id; cdev->def_manufacturer = composite_default_mfr(cdev->gadget); dev_str[USB_GADGET_MANUFACTURER_IDX].s = cdev->def_manufacturer; } if (covr->product) { desc->iProduct = dev_str[USB_GADGET_PRODUCT_IDX].id; dev_str[USB_GADGET_PRODUCT_IDX].s = covr->product; } } EXPORT_SYMBOL_GPL(usb_composite_overwrite_options); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Brownell");
gpl-2.0
Josemurillo/Core-06-09-12
src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_watchkeeper_gargolmar.cpp
49
5342
/* * Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Watchkeeper_Gargolmar SD%Complete: 80 SDComment: Missing adds to heal him. Surge should be used on target furthest away, not random. SDCategory: Hellfire Citadel, Hellfire Ramparts EndScriptData */ #include "ScriptMgr.h" #include "ScriptedCreature.h" enum eSays { SAY_TAUNT = -1543000, SAY_HEAL = -1543001, SAY_SURGE = -1543002, SAY_AGGRO_1 = -1543003, SAY_AGGRO_2 = -1543004, SAY_AGGRO_3 = -1543005, SAY_KILL_1 = -1543006, SAY_KILL_2 = -1543007, SAY_DIE = -1543008, }; enum eSpells { SPELL_MORTAL_WOUND = 30641, H_SPELL_MORTAL_WOUND = 36814, SPELL_SURGE = 34645, SPELL_RETALIATION = 22857, }; class boss_watchkeeper_gargolmar : public CreatureScript { public: boss_watchkeeper_gargolmar() : CreatureScript("boss_watchkeeper_gargolmar") { } struct boss_watchkeeper_gargolmarAI : public ScriptedAI { boss_watchkeeper_gargolmarAI(Creature* creature) : ScriptedAI(creature) { } uint32 Surge_Timer; uint32 MortalWound_Timer; uint32 Retaliation_Timer; bool HasTaunted; bool YelledForHeal; void Reset() { Surge_Timer = 5000; MortalWound_Timer = 4000; Retaliation_Timer = 0; HasTaunted = false; YelledForHeal = false; } void EnterCombat(Unit* /*who*/) { DoScriptText(RAND(SAY_AGGRO_1, SAY_AGGRO_2, SAY_AGGRO_3), me); } void MoveInLineOfSight(Unit* who) { if (!me->getVictim() && me->canCreatureAttack(who)) { if (!me->CanFly() && me->GetDistanceZ(who) > CREATURE_Z_ATTACK_RANGE) return; float attackRadius = me->GetAttackDistance(who); if (me->IsWithinDistInMap(who, attackRadius) && me->IsWithinLOSInMap(who)) { //who->RemoveSpellsCausingAura(SPELL_AURA_MOD_STEALTH); AttackStart(who); } else if (!HasTaunted && me->IsWithinDistInMap(who, 60.0f)) { DoScriptText(SAY_TAUNT, me); HasTaunted = true; } } } void KilledUnit(Unit* /*victim*/) { DoScriptText(RAND(SAY_KILL_1, SAY_KILL_2), me); } void JustDied(Unit* /*killer*/) { DoScriptText(SAY_DIE, me); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; if (MortalWound_Timer <= diff) { DoCast(me->getVictim(), SPELL_MORTAL_WOUND); MortalWound_Timer = 5000+rand()%8000; } else MortalWound_Timer -= diff; if (Surge_Timer <= diff) { DoScriptText(SAY_SURGE, me); if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0)) DoCast(target, SPELL_SURGE); Surge_Timer = 5000+rand()%8000; } else Surge_Timer -= diff; if (HealthBelowPct(20)) { if (Retaliation_Timer <= diff) { DoCast(me, SPELL_RETALIATION); Retaliation_Timer = 30000; } else Retaliation_Timer -= diff; } if (!YelledForHeal) { if (HealthBelowPct(40)) { DoScriptText(SAY_HEAL, me); YelledForHeal = true; } } DoMeleeAttackIfReady(); } }; CreatureAI* GetAI(Creature* creature) const { return new boss_watchkeeper_gargolmarAI(creature); } }; void AddSC_boss_watchkeeper_gargolmar() { new boss_watchkeeper_gargolmar(); }
gpl-2.0
evan-a-a/linux-braswell
drivers/net/ethernet/rocker/rocker_ofdpa.c
49
83521
/* * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like * implementation * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com> * Copyright (c) 2014-2016 Jiri Pirko <jiri@mellanox.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/hashtable.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/if_vlan.h> #include <linux/if_bridge.h> #include <net/neighbour.h> #include <net/switchdev.h> #include <net/ip_fib.h> #include <net/arp.h> #include "rocker.h" #include "rocker_tlv.h" struct ofdpa_flow_tbl_key { u32 priority; enum rocker_of_dpa_table_id tbl_id; union { struct { u32 in_pport; u32 in_pport_mask; enum rocker_of_dpa_table_id goto_tbl; } ig_port; struct { u32 in_pport; __be16 vlan_id; __be16 vlan_id_mask; enum rocker_of_dpa_table_id goto_tbl; bool untagged; __be16 new_vlan_id; } vlan; struct { u32 in_pport; u32 in_pport_mask; __be16 eth_type; u8 eth_dst[ETH_ALEN]; u8 eth_dst_mask[ETH_ALEN]; __be16 vlan_id; __be16 vlan_id_mask; enum rocker_of_dpa_table_id goto_tbl; bool copy_to_cpu; } term_mac; struct { __be16 eth_type; __be32 dst4; __be32 dst4_mask; enum rocker_of_dpa_table_id goto_tbl; u32 group_id; } ucast_routing; struct { u8 eth_dst[ETH_ALEN]; u8 eth_dst_mask[ETH_ALEN]; int has_eth_dst; int has_eth_dst_mask; __be16 vlan_id; u32 tunnel_id; enum rocker_of_dpa_table_id goto_tbl; u32 group_id; bool copy_to_cpu; } bridge; struct { u32 in_pport; u32 in_pport_mask; u8 eth_src[ETH_ALEN]; u8 eth_src_mask[ETH_ALEN]; u8 eth_dst[ETH_ALEN]; u8 eth_dst_mask[ETH_ALEN]; __be16 eth_type; __be16 vlan_id; __be16 vlan_id_mask; u8 ip_proto; u8 ip_proto_mask; u8 ip_tos; u8 ip_tos_mask; u32 group_id; } acl; }; }; struct ofdpa_flow_tbl_entry { struct hlist_node entry; u32 cmd; u64 cookie; struct ofdpa_flow_tbl_key key; size_t key_len; u32 key_crc32; /* key */ struct fib_info *fi; }; struct ofdpa_group_tbl_entry { struct hlist_node entry; u32 cmd; u32 group_id; /* key */ u16 group_count; u32 *group_ids; union { struct { u8 pop_vlan; } l2_interface; struct { u8 eth_src[ETH_ALEN]; u8 eth_dst[ETH_ALEN]; __be16 vlan_id; u32 group_id; } l2_rewrite; struct { u8 eth_src[ETH_ALEN]; u8 eth_dst[ETH_ALEN]; __be16 vlan_id; bool ttl_check; u32 group_id; } l3_unicast; }; }; struct ofdpa_fdb_tbl_entry { struct hlist_node entry; u32 key_crc32; /* key */ bool learned; unsigned long touched; struct ofdpa_fdb_tbl_key { struct ofdpa_port *ofdpa_port; u8 addr[ETH_ALEN]; __be16 vlan_id; } key; }; struct ofdpa_internal_vlan_tbl_entry { struct hlist_node entry; int ifindex; /* key */ u32 ref_count; __be16 vlan_id; }; struct ofdpa_neigh_tbl_entry { struct hlist_node entry; __be32 ip_addr; /* key */ struct net_device *dev; u32 ref_count; u32 index; u8 eth_dst[ETH_ALEN]; bool ttl_check; }; enum { OFDPA_CTRL_LINK_LOCAL_MCAST, OFDPA_CTRL_LOCAL_ARP, OFDPA_CTRL_IPV4_MCAST, OFDPA_CTRL_IPV6_MCAST, OFDPA_CTRL_DFLT_BRIDGING, OFDPA_CTRL_DFLT_OVS, OFDPA_CTRL_MAX, }; #define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00 #define OFDPA_N_INTERNAL_VLANS 255 #define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) #define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS) #define OFDPA_UNTAGGED_VID 0 struct ofdpa { struct rocker *rocker; DECLARE_HASHTABLE(flow_tbl, 16); spinlock_t flow_tbl_lock; /* for flow tbl accesses */ u64 flow_tbl_next_cookie; DECLARE_HASHTABLE(group_tbl, 16); spinlock_t group_tbl_lock; /* for group tbl accesses */ struct timer_list fdb_cleanup_timer; DECLARE_HASHTABLE(fdb_tbl, 16); spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN]; DECLARE_HASHTABLE(internal_vlan_tbl, 8); spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ DECLARE_HASHTABLE(neigh_tbl, 16); spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ u32 neigh_tbl_next_index; unsigned long ageing_time; bool fib_aborted; }; struct ofdpa_port { struct ofdpa *ofdpa; struct rocker_port *rocker_port; struct net_device *dev; u32 pport; struct net_device *bridge_dev; __be16 internal_vlan_id; int stp_state; u32 brport_flags; unsigned long ageing_time; bool ctrls[OFDPA_CTRL_MAX]; unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN]; }; static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; /* Rocker priority levels for flow table entries. Higher * priority match takes precedence over lower priority match. */ enum { OFDPA_PRIORITY_UNKNOWN = 0, OFDPA_PRIORITY_IG_PORT = 1, OFDPA_PRIORITY_VLAN = 1, OFDPA_PRIORITY_TERM_MAC_UCAST = 0, OFDPA_PRIORITY_TERM_MAC_MCAST = 1, OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, OFDPA_PRIORITY_BRIDGING_VLAN = 3, OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, OFDPA_PRIORITY_BRIDGING_TENANT = 3, OFDPA_PRIORITY_ACL_CTRL = 3, OFDPA_PRIORITY_ACL_NORMAL = 2, OFDPA_PRIORITY_ACL_DFLT = 1, }; static bool ofdpa_vlan_id_is_internal(__be16 vlan_id) { u16 start = OFDPA_INTERNAL_VLAN_ID_BASE; u16 end = 0xffe; u16 _vlan_id = ntohs(vlan_id); return (_vlan_id >= start && _vlan_id <= end); } static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port, u16 vid, bool *pop_vlan) { __be16 vlan_id; if (pop_vlan) *pop_vlan = false; vlan_id = htons(vid); if (!vlan_id) { vlan_id = ofdpa_port->internal_vlan_id; if (pop_vlan) *pop_vlan = true; } return vlan_id; } static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port, __be16 vlan_id) { if (ofdpa_vlan_id_is_internal(vlan_id)) return 0; return ntohs(vlan_id); } static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port, const char *kind) { return ofdpa_port->bridge_dev && !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind); } static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port) { return ofdpa_port_is_slave(ofdpa_port, "bridge"); } static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port) { return ofdpa_port_is_slave(ofdpa_port, "openvswitch"); } #define OFDPA_OP_FLAG_REMOVE BIT(0) #define OFDPA_OP_FLAG_NOWAIT BIT(1) #define OFDPA_OP_FLAG_LEARNED BIT(2) #define OFDPA_OP_FLAG_REFRESH BIT(3) static bool ofdpa_flags_nowait(int flags) { return flags & OFDPA_OP_FLAG_NOWAIT; } static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags, size_t size) { struct switchdev_trans_item *elem = NULL; gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ? GFP_ATOMIC : GFP_KERNEL; /* If in transaction prepare phase, allocate the memory * and enqueue it on a transaction. If in transaction * commit phase, dequeue the memory from the transaction * rather than re-allocating the memory. The idea is the * driver code paths for prepare and commit are identical * so the memory allocated in the prepare phase is the * memory used in the commit phase. */ if (!trans) { elem = kzalloc(size + sizeof(*elem), gfp_flags); } else if (switchdev_trans_ph_prepare(trans)) { elem = kzalloc(size + sizeof(*elem), gfp_flags); if (!elem) return NULL; switchdev_trans_item_enqueue(trans, elem, kfree, elem); } else { elem = switchdev_trans_item_dequeue(trans); } return elem ? elem + 1 : NULL; } static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags, size_t size) { return __ofdpa_mem_alloc(trans, flags, size); } static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags, size_t n, size_t size) { return __ofdpa_mem_alloc(trans, flags, n * size); } static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem) { struct switchdev_trans_item *elem; /* Frees are ignored if in transaction prepare phase. The * memory remains on the per-port list until freed in the * commit phase. */ if (switchdev_trans_ph_prepare(trans)) return; elem = (struct switchdev_trans_item *) mem - 1; kfree(elem); } /************************************************************* * Flow, group, FDB, internal VLAN and neigh command prepares *************************************************************/ static int ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, const struct ofdpa_flow_tbl_entry *entry) { if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, entry->key.ig_port.in_pport)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, entry->key.ig_port.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, entry->key.ig_port.goto_tbl)) return -EMSGSIZE; return 0; } static int ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, const struct ofdpa_flow_tbl_entry *entry) { if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, entry->key.vlan.in_pport)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, entry->key.vlan.vlan_id)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, entry->key.vlan.vlan_id_mask)) return -EMSGSIZE; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, entry->key.vlan.goto_tbl)) return -EMSGSIZE; if (entry->key.vlan.untagged && rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, entry->key.vlan.new_vlan_id)) return -EMSGSIZE; return 0; } static int ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, const struct ofdpa_flow_tbl_entry *entry) { if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, entry->key.term_mac.in_pport)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, entry->key.term_mac.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, entry->key.term_mac.eth_type)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, ETH_ALEN, entry->key.term_mac.eth_dst)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, ETH_ALEN, entry->key.term_mac.eth_dst_mask)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, entry->key.term_mac.vlan_id)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, entry->key.term_mac.vlan_id_mask)) return -EMSGSIZE; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, entry->key.term_mac.goto_tbl)) return -EMSGSIZE; if (entry->key.term_mac.copy_to_cpu && rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, entry->key.term_mac.copy_to_cpu)) return -EMSGSIZE; return 0; } static int ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, const struct ofdpa_flow_tbl_entry *entry) { if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, entry->key.ucast_routing.eth_type)) return -EMSGSIZE; if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, entry->key.ucast_routing.dst4)) return -EMSGSIZE; if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, entry->key.ucast_routing.dst4_mask)) return -EMSGSIZE; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, entry->key.ucast_routing.goto_tbl)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, entry->key.ucast_routing.group_id)) return -EMSGSIZE; return 0; } static int ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, const struct ofdpa_flow_tbl_entry *entry) { if (entry->key.bridge.has_eth_dst && rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, ETH_ALEN, entry->key.bridge.eth_dst)) return -EMSGSIZE; if (entry->key.bridge.has_eth_dst_mask && rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, ETH_ALEN, entry->key.bridge.eth_dst_mask)) return -EMSGSIZE; if (entry->key.bridge.vlan_id && rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, entry->key.bridge.vlan_id)) return -EMSGSIZE; if (entry->key.bridge.tunnel_id && rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, entry->key.bridge.tunnel_id)) return -EMSGSIZE; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, entry->key.bridge.goto_tbl)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, entry->key.bridge.group_id)) return -EMSGSIZE; if (entry->key.bridge.copy_to_cpu && rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, entry->key.bridge.copy_to_cpu)) return -EMSGSIZE; return 0; } static int ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, const struct ofdpa_flow_tbl_entry *entry) { if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, entry->key.acl.in_pport)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, entry->key.acl.in_pport_mask)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, ETH_ALEN, entry->key.acl.eth_src)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, ETH_ALEN, entry->key.acl.eth_src_mask)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, ETH_ALEN, entry->key.acl.eth_dst)) return -EMSGSIZE; if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, ETH_ALEN, entry->key.acl.eth_dst_mask)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, entry->key.acl.eth_type)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, entry->key.acl.vlan_id)) return -EMSGSIZE; if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, entry->key.acl.vlan_id_mask)) return -EMSGSIZE; switch (ntohs(entry->key.acl.eth_type)) { case ETH_P_IP: case ETH_P_IPV6: if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, entry->key.acl.ip_proto)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO_MASK, entry->key.acl.ip_proto_mask)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, entry->key.acl.ip_tos & 0x3f)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP_MASK, entry->key.acl.ip_tos_mask & 0x3f)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, (entry->key.acl.ip_tos & 0xc0) >> 6)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN_MASK, (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) return -EMSGSIZE; break; } if (entry->key.acl.group_id != ROCKER_GROUP_NONE && rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, entry->key.acl.group_id)) return -EMSGSIZE; return 0; } static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { const struct ofdpa_flow_tbl_entry *entry = priv; struct rocker_tlv *cmd_info; int err = 0; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) return -EMSGSIZE; cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, entry->key.tbl_id)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, entry->key.priority)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) return -EMSGSIZE; if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, entry->cookie)) return -EMSGSIZE; switch (entry->key.tbl_id) { case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry); break; case ROCKER_OF_DPA_TABLE_ID_VLAN: err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry); break; case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry); break; case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry); break; case ROCKER_OF_DPA_TABLE_ID_BRIDGING: err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry); break; case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry); break; default: err = -ENOTSUPP; break; } if (err) return err; rocker_tlv_nest_end(desc_info, cmd_info); return 0; } static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { const struct ofdpa_flow_tbl_entry *entry = priv; struct rocker_tlv *cmd_info; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) return -EMSGSIZE; cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, entry->cookie)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_info); return 0; } static int ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, struct ofdpa_group_tbl_entry *entry) { if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, ROCKER_GROUP_PORT_GET(entry->group_id))) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, entry->l2_interface.pop_vlan)) return -EMSGSIZE; return 0; } static int ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, const struct ofdpa_group_tbl_entry *entry) { if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, entry->l2_rewrite.group_id)) return -EMSGSIZE; if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, ETH_ALEN, entry->l2_rewrite.eth_src)) return -EMSGSIZE; if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, ETH_ALEN, entry->l2_rewrite.eth_dst)) return -EMSGSIZE; if (entry->l2_rewrite.vlan_id && rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, entry->l2_rewrite.vlan_id)) return -EMSGSIZE; return 0; } static int ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, const struct ofdpa_group_tbl_entry *entry) { int i; struct rocker_tlv *group_ids; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, entry->group_count)) return -EMSGSIZE; group_ids = rocker_tlv_nest_start(desc_info, ROCKER_TLV_OF_DPA_GROUP_IDS); if (!group_ids) return -EMSGSIZE; for (i = 0; i < entry->group_count; i++) /* Note TLV array is 1-based */ if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, group_ids); return 0; } static int ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, const struct ofdpa_group_tbl_entry *entry) { if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, ETH_ALEN, entry->l3_unicast.eth_src)) return -EMSGSIZE; if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, ETH_ALEN, entry->l3_unicast.eth_dst)) return -EMSGSIZE; if (entry->l3_unicast.vlan_id && rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, entry->l3_unicast.vlan_id)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, entry->l3_unicast.ttl_check)) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, entry->l3_unicast.group_id)) return -EMSGSIZE; return 0; } static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { struct ofdpa_group_tbl_entry *entry = priv; struct rocker_tlv *cmd_info; int err = 0; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) return -EMSGSIZE; cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, entry->group_id)) return -EMSGSIZE; switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry); break; case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry); break; case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry); break; case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry); break; default: err = -ENOTSUPP; break; } if (err) return err; rocker_tlv_nest_end(desc_info, cmd_info); return 0; } static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { const struct ofdpa_group_tbl_entry *entry = priv; struct rocker_tlv *cmd_info; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) return -EMSGSIZE; cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); if (!cmd_info) return -EMSGSIZE; if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, entry->group_id)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_info); return 0; } /*************************************************** * Flow, group, FDB, internal VLAN and neigh tables ***************************************************/ static struct ofdpa_flow_tbl_entry * ofdpa_flow_tbl_find(const struct ofdpa *ofdpa, const struct ofdpa_flow_tbl_entry *match) { struct ofdpa_flow_tbl_entry *found; size_t key_len = match->key_len ? match->key_len : sizeof(found->key); hash_for_each_possible(ofdpa->flow_tbl, found, entry, match->key_crc32) { if (memcmp(&found->key, &match->key, key_len) == 0) return found; } return NULL; } static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, struct ofdpa_flow_tbl_entry *match) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_flow_tbl_entry *found; size_t key_len = match->key_len ? match->key_len : sizeof(found->key); unsigned long lock_flags; match->key_crc32 = crc32(~0, &match->key, key_len); spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); found = ofdpa_flow_tbl_find(ofdpa, match); if (found) { match->cookie = found->cookie; if (!switchdev_trans_ph_prepare(trans)) hash_del(&found->entry); ofdpa_kfree(trans, found); found = match; found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; } else { found = match; found->cookie = ofdpa->flow_tbl_next_cookie++; found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; } if (!switchdev_trans_ph_prepare(trans)) hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32); spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); if (!switchdev_trans_ph_prepare(trans)) return rocker_cmd_exec(ofdpa_port->rocker_port, ofdpa_flags_nowait(flags), ofdpa_cmd_flow_tbl_add, found, NULL, NULL); return 0; } static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, struct ofdpa_flow_tbl_entry *match) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_flow_tbl_entry *found; size_t key_len = match->key_len ? match->key_len : sizeof(found->key); unsigned long lock_flags; int err = 0; match->key_crc32 = crc32(~0, &match->key, key_len); spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); found = ofdpa_flow_tbl_find(ofdpa, match); if (found) { if (!switchdev_trans_ph_prepare(trans)) hash_del(&found->entry); found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; } spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); ofdpa_kfree(trans, match); if (found) { if (!switchdev_trans_ph_prepare(trans)) err = rocker_cmd_exec(ofdpa_port->rocker_port, ofdpa_flags_nowait(flags), ofdpa_cmd_flow_tbl_del, found, NULL, NULL); ofdpa_kfree(trans, found); } return err; } static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, struct ofdpa_flow_tbl_entry *entry) { if (flags & OFDPA_OP_FLAG_REMOVE) return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry); else return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry); } static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, u32 in_pport, u32 in_pport_mask, enum rocker_of_dpa_table_id goto_tbl) { struct ofdpa_flow_tbl_entry *entry; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; entry->key.priority = OFDPA_PRIORITY_IG_PORT; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; entry->key.ig_port.in_pport = in_pport; entry->key.ig_port.in_pport_mask = in_pport_mask; entry->key.ig_port.goto_tbl = goto_tbl; return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); } static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, u32 in_pport, __be16 vlan_id, __be16 vlan_id_mask, enum rocker_of_dpa_table_id goto_tbl, bool untagged, __be16 new_vlan_id) { struct ofdpa_flow_tbl_entry *entry; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; entry->key.priority = OFDPA_PRIORITY_VLAN; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; entry->key.vlan.in_pport = in_pport; entry->key.vlan.vlan_id = vlan_id; entry->key.vlan.vlan_id_mask = vlan_id_mask; entry->key.vlan.goto_tbl = goto_tbl; entry->key.vlan.untagged = untagged; entry->key.vlan.new_vlan_id = new_vlan_id; return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); } static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, u32 in_pport, u32 in_pport_mask, __be16 eth_type, const u8 *eth_dst, const u8 *eth_dst_mask, __be16 vlan_id, __be16 vlan_id_mask, bool copy_to_cpu, int flags) { struct ofdpa_flow_tbl_entry *entry; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; if (is_multicast_ether_addr(eth_dst)) { entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST; entry->key.term_mac.goto_tbl = ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; } else { entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST; entry->key.term_mac.goto_tbl = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; } entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; entry->key.term_mac.in_pport = in_pport; entry->key.term_mac.in_pport_mask = in_pport_mask; entry->key.term_mac.eth_type = eth_type; ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); entry->key.term_mac.vlan_id = vlan_id; entry->key.term_mac.vlan_id_mask = vlan_id_mask; entry->key.term_mac.copy_to_cpu = copy_to_cpu; return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); } static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, const u8 *eth_dst, const u8 *eth_dst_mask, __be16 vlan_id, u32 tunnel_id, enum rocker_of_dpa_table_id goto_tbl, u32 group_id, bool copy_to_cpu) { struct ofdpa_flow_tbl_entry *entry; u32 priority; bool vlan_bridging = !!vlan_id; bool dflt = !eth_dst || (eth_dst && eth_dst_mask); bool wild = false; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; if (eth_dst) { entry->key.bridge.has_eth_dst = 1; ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); } if (eth_dst_mask) { entry->key.bridge.has_eth_dst_mask = 1; ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); if (!ether_addr_equal(eth_dst_mask, ff_mac)) wild = true; } priority = OFDPA_PRIORITY_UNKNOWN; if (vlan_bridging && dflt && wild) priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD; else if (vlan_bridging && dflt && !wild) priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; else if (vlan_bridging && !dflt) priority = OFDPA_PRIORITY_BRIDGING_VLAN; else if (!vlan_bridging && dflt && wild) priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD; else if (!vlan_bridging && dflt && !wild) priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; else if (!vlan_bridging && !dflt) priority = OFDPA_PRIORITY_BRIDGING_TENANT; entry->key.priority = priority; entry->key.bridge.vlan_id = vlan_id; entry->key.bridge.tunnel_id = tunnel_id; entry->key.bridge.goto_tbl = goto_tbl; entry->key.bridge.group_id = group_id; entry->key.bridge.copy_to_cpu = copy_to_cpu; return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); } static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, __be16 eth_type, __be32 dst, __be32 dst_mask, u32 priority, enum rocker_of_dpa_table_id goto_tbl, u32 group_id, struct fib_info *fi, int flags) { struct ofdpa_flow_tbl_entry *entry; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; entry->key.priority = priority; entry->key.ucast_routing.eth_type = eth_type; entry->key.ucast_routing.dst4 = dst; entry->key.ucast_routing.dst4_mask = dst_mask; entry->key.ucast_routing.goto_tbl = goto_tbl; entry->key.ucast_routing.group_id = group_id; entry->key_len = offsetof(struct ofdpa_flow_tbl_key, ucast_routing.group_id); entry->fi = fi; return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); } static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, u32 in_pport, u32 in_pport_mask, const u8 *eth_src, const u8 *eth_src_mask, const u8 *eth_dst, const u8 *eth_dst_mask, __be16 eth_type, __be16 vlan_id, __be16 vlan_id_mask, u8 ip_proto, u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, u32 group_id) { u32 priority; struct ofdpa_flow_tbl_entry *entry; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; priority = OFDPA_PRIORITY_ACL_NORMAL; if (eth_dst && eth_dst_mask) { if (ether_addr_equal(eth_dst_mask, mcast_mac)) priority = OFDPA_PRIORITY_ACL_DFLT; else if (is_link_local_ether_addr(eth_dst)) priority = OFDPA_PRIORITY_ACL_CTRL; } entry->key.priority = priority; entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; entry->key.acl.in_pport = in_pport; entry->key.acl.in_pport_mask = in_pport_mask; if (eth_src) ether_addr_copy(entry->key.acl.eth_src, eth_src); if (eth_src_mask) ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); if (eth_dst) ether_addr_copy(entry->key.acl.eth_dst, eth_dst); if (eth_dst_mask) ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); entry->key.acl.eth_type = eth_type; entry->key.acl.vlan_id = vlan_id; entry->key.acl.vlan_id_mask = vlan_id_mask; entry->key.acl.ip_proto = ip_proto; entry->key.acl.ip_proto_mask = ip_proto_mask; entry->key.acl.ip_tos = ip_tos; entry->key.acl.ip_tos_mask = ip_tos_mask; entry->key.acl.group_id = group_id; return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); } static struct ofdpa_group_tbl_entry * ofdpa_group_tbl_find(const struct ofdpa *ofdpa, const struct ofdpa_group_tbl_entry *match) { struct ofdpa_group_tbl_entry *found; hash_for_each_possible(ofdpa->group_tbl, found, entry, match->group_id) { if (found->group_id == match->group_id) return found; } return NULL; } static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans, struct ofdpa_group_tbl_entry *entry) { switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: ofdpa_kfree(trans, entry->group_ids); break; default: break; } ofdpa_kfree(trans, entry); } static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, struct ofdpa_group_tbl_entry *match) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_group_tbl_entry *found; unsigned long lock_flags; spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); found = ofdpa_group_tbl_find(ofdpa, match); if (found) { if (!switchdev_trans_ph_prepare(trans)) hash_del(&found->entry); ofdpa_group_tbl_entry_free(trans, found); found = match; found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; } else { found = match; found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; } if (!switchdev_trans_ph_prepare(trans)) hash_add(ofdpa->group_tbl, &found->entry, found->group_id); spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); if (!switchdev_trans_ph_prepare(trans)) return rocker_cmd_exec(ofdpa_port->rocker_port, ofdpa_flags_nowait(flags), ofdpa_cmd_group_tbl_add, found, NULL, NULL); return 0; } static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, struct ofdpa_group_tbl_entry *match) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_group_tbl_entry *found; unsigned long lock_flags; int err = 0; spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); found = ofdpa_group_tbl_find(ofdpa, match); if (found) { if (!switchdev_trans_ph_prepare(trans)) hash_del(&found->entry); found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; } spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); ofdpa_group_tbl_entry_free(trans, match); if (found) { if (!switchdev_trans_ph_prepare(trans)) err = rocker_cmd_exec(ofdpa_port->rocker_port, ofdpa_flags_nowait(flags), ofdpa_cmd_group_tbl_del, found, NULL, NULL); ofdpa_group_tbl_entry_free(trans, found); } return err; } static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, struct ofdpa_group_tbl_entry *entry) { if (flags & OFDPA_OP_FLAG_REMOVE) return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry); else return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry); } static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, __be16 vlan_id, u32 out_pport, int pop_vlan) { struct ofdpa_group_tbl_entry *entry; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); entry->l2_interface.pop_vlan = pop_vlan; return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); } static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, u8 group_count, const u32 *group_ids, u32 group_id) { struct ofdpa_group_tbl_entry *entry; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; entry->group_id = group_id; entry->group_count = group_count; entry->group_ids = ofdpa_kcalloc(trans, flags, group_count, sizeof(u32)); if (!entry->group_ids) { ofdpa_kfree(trans, entry); return -ENOMEM; } memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); } static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, __be16 vlan_id, u8 group_count, const u32 *group_ids, u32 group_id) { return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags, group_count, group_ids, group_id); } static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, u32 index, const u8 *src_mac, const u8 *dst_mac, __be16 vlan_id, bool ttl_check, u32 pport) { struct ofdpa_group_tbl_entry *entry; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; entry->group_id = ROCKER_GROUP_L3_UNICAST(index); if (src_mac) ether_addr_copy(entry->l3_unicast.eth_src, src_mac); if (dst_mac) ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); entry->l3_unicast.vlan_id = vlan_id; entry->l3_unicast.ttl_check = ttl_check; entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); } static struct ofdpa_neigh_tbl_entry * ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr) { struct ofdpa_neigh_tbl_entry *found; hash_for_each_possible(ofdpa->neigh_tbl, found, entry, be32_to_cpu(ip_addr)) if (found->ip_addr == ip_addr) return found; return NULL; } static void ofdpa_neigh_add(struct ofdpa *ofdpa, struct switchdev_trans *trans, struct ofdpa_neigh_tbl_entry *entry) { if (!switchdev_trans_ph_commit(trans)) entry->index = ofdpa->neigh_tbl_next_index++; if (switchdev_trans_ph_prepare(trans)) return; entry->ref_count++; hash_add(ofdpa->neigh_tbl, &entry->entry, be32_to_cpu(entry->ip_addr)); } static void ofdpa_neigh_del(struct switchdev_trans *trans, struct ofdpa_neigh_tbl_entry *entry) { if (switchdev_trans_ph_prepare(trans)) return; if (--entry->ref_count == 0) { hash_del(&entry->entry); ofdpa_kfree(trans, entry); } } static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry, struct switchdev_trans *trans, const u8 *eth_dst, bool ttl_check) { if (eth_dst) { ether_addr_copy(entry->eth_dst, eth_dst); entry->ttl_check = ttl_check; } else if (!switchdev_trans_ph_prepare(trans)) { entry->ref_count++; } } static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, __be32 ip_addr, const u8 *eth_dst) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_neigh_tbl_entry *entry; struct ofdpa_neigh_tbl_entry *found; unsigned long lock_flags; __be16 eth_type = htons(ETH_P_IP); enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; u32 group_id; u32 priority = 0; bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); bool updating; bool removing; int err = 0; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); updating = found && adding; removing = found && !adding; adding = !found && adding; if (adding) { entry->ip_addr = ip_addr; entry->dev = ofdpa_port->dev; ether_addr_copy(entry->eth_dst, eth_dst); entry->ttl_check = true; ofdpa_neigh_add(ofdpa, trans, entry); } else if (removing) { memcpy(entry, found, sizeof(*entry)); ofdpa_neigh_del(trans, found); } else if (updating) { ofdpa_neigh_update(found, trans, eth_dst, true); memcpy(entry, found, sizeof(*entry)); } else { err = -ENOENT; } spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); if (err) goto err_out; /* For each active neighbor, we have an L3 unicast group and * a /32 route to the neighbor, which uses the L3 unicast * group. The L3 unicast group can also be referred to by * other routes' nexthops. */ err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags, entry->index, ofdpa_port->dev->dev_addr, entry->eth_dst, ofdpa_port->internal_vlan_id, entry->ttl_check, ofdpa_port->pport); if (err) { netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n", err, entry->index); goto err_out; } if (adding || removing) { group_id = ROCKER_GROUP_L3_UNICAST(entry->index); err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, ip_addr, inet_make_mask(32), priority, goto_tbl, group_id, NULL, flags); if (err) netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n", err, &entry->ip_addr, group_id); } err_out: if (!adding) ofdpa_kfree(trans, entry); return err; } static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, __be32 ip_addr) { struct net_device *dev = ofdpa_port->dev; struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); int err = 0; if (!n) { n = neigh_create(&arp_tbl, &ip_addr, dev); if (IS_ERR(n)) return PTR_ERR(n); } /* If the neigh is already resolved, then go ahead and * install the entry, otherwise start the ARP process to * resolve the neigh. */ if (n->nud_state & NUD_VALID) err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0, ip_addr, n->ha); else neigh_event_send(n, NULL); neigh_release(n); return err; } static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, __be32 ip_addr, u32 *index) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_neigh_tbl_entry *entry; struct ofdpa_neigh_tbl_entry *found; unsigned long lock_flags; bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); bool updating; bool removing; bool resolved = true; int err = 0; entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); updating = found && adding; removing = found && !adding; adding = !found && adding; if (adding) { entry->ip_addr = ip_addr; entry->dev = ofdpa_port->dev; ofdpa_neigh_add(ofdpa, trans, entry); *index = entry->index; resolved = false; } else if (removing) { ofdpa_neigh_del(trans, found); *index = found->index; } else if (updating) { ofdpa_neigh_update(found, trans, NULL, false); resolved = !is_zero_ether_addr(found->eth_dst); *index = found->index; } else { err = -ENOENT; } spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); if (!adding) ofdpa_kfree(trans, entry); if (err) return err; /* Resolved means neigh ip_addr is resolved to neigh mac. */ if (!resolved) err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr); return err; } static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa, int port_index) { struct rocker_port *rocker_port; rocker_port = ofdpa->rocker->ports[port_index]; return rocker_port ? rocker_port->wpriv : NULL; } static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, __be16 vlan_id) { struct ofdpa_port *p; const struct ofdpa *ofdpa = ofdpa_port->ofdpa; unsigned int port_count = ofdpa->rocker->port_count; u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); u32 *group_ids; u8 group_count = 0; int err = 0; int i; group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32)); if (!group_ids) return -ENOMEM; /* Adjust the flood group for this VLAN. The flood group * references an L2 interface group for each port in this * VLAN. */ for (i = 0; i < port_count; i++) { p = ofdpa_port_get(ofdpa, i); if (!p) continue; if (!ofdpa_port_is_bridged(p)) continue; if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { group_ids[group_count++] = ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); } } /* If there are no bridged ports in this VLAN, we're done */ if (group_count == 0) goto no_ports_in_vlan; err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id, group_count, group_ids, group_id); if (err) netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); no_ports_in_vlan: ofdpa_kfree(trans, group_ids); return err; } static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, __be16 vlan_id, bool pop_vlan) { const struct ofdpa *ofdpa = ofdpa_port->ofdpa; unsigned int port_count = ofdpa->rocker->port_count; struct ofdpa_port *p; bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); u32 out_pport; int ref = 0; int err; int i; /* An L2 interface group for this port in this VLAN, but * only when port STP state is LEARNING|FORWARDING. */ if (ofdpa_port->stp_state == BR_STATE_LEARNING || ofdpa_port->stp_state == BR_STATE_FORWARDING) { out_pport = ofdpa_port->pport; err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, vlan_id, out_pport, pop_vlan); if (err) { netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", err, out_pport); return err; } } /* An L2 interface group for this VLAN to CPU port. * Add when first port joins this VLAN and destroy when * last port leaves this VLAN. */ for (i = 0; i < port_count; i++) { p = ofdpa_port_get(ofdpa, i); if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) ref++; } if ((!adding || ref != 1) && (adding || ref != 0)) return 0; out_pport = 0; err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, vlan_id, out_pport, pop_vlan); if (err) { netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err); return err; } return 0; } static struct ofdpa_ctrl { const u8 *eth_dst; const u8 *eth_dst_mask; __be16 eth_type; bool acl; bool bridge; bool term; bool copy_to_cpu; } ofdpa_ctrls[] = { [OFDPA_CTRL_LINK_LOCAL_MCAST] = { /* pass link local multicast pkts up to CPU for filtering */ .eth_dst = ll_mac, .eth_dst_mask = ll_mask, .acl = true, }, [OFDPA_CTRL_LOCAL_ARP] = { /* pass local ARP pkts up to CPU */ .eth_dst = zero_mac, .eth_dst_mask = zero_mac, .eth_type = htons(ETH_P_ARP), .acl = true, }, [OFDPA_CTRL_IPV4_MCAST] = { /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ .eth_dst = ipv4_mcast, .eth_dst_mask = ipv4_mask, .eth_type = htons(ETH_P_IP), .term = true, .copy_to_cpu = true, }, [OFDPA_CTRL_IPV6_MCAST] = { /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ .eth_dst = ipv6_mcast, .eth_dst_mask = ipv6_mask, .eth_type = htons(ETH_P_IPV6), .term = true, .copy_to_cpu = true, }, [OFDPA_CTRL_DFLT_BRIDGING] = { /* flood any pkts on vlan */ .bridge = true, .copy_to_cpu = true, }, [OFDPA_CTRL_DFLT_OVS] = { /* pass all pkts up to CPU */ .eth_dst = zero_mac, .eth_dst_mask = zero_mac, .acl = true, }, }; static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, const struct ofdpa_ctrl *ctrl, __be16 vlan_id) { u32 in_pport = ofdpa_port->pport; u32 in_pport_mask = 0xffffffff; u32 out_pport = 0; const u8 *eth_src = NULL; const u8 *eth_src_mask = NULL; __be16 vlan_id_mask = htons(0xffff); u8 ip_proto = 0; u8 ip_proto_mask = 0; u8 ip_tos = 0; u8 ip_tos_mask = 0; u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); int err; err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags, in_pport, in_pport_mask, eth_src, eth_src_mask, ctrl->eth_dst, ctrl->eth_dst_mask, ctrl->eth_type, vlan_id, vlan_id_mask, ip_proto, ip_proto_mask, ip_tos, ip_tos_mask, group_id); if (err) netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err); return err; } static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, const struct ofdpa_ctrl *ctrl, __be16 vlan_id) { enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); u32 tunnel_id = 0; int err; if (!ofdpa_port_is_bridged(ofdpa_port)) return 0; err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, ctrl->eth_dst, ctrl->eth_dst_mask, vlan_id, tunnel_id, goto_tbl, group_id, ctrl->copy_to_cpu); if (err) netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err); return err; } static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, const struct ofdpa_ctrl *ctrl, __be16 vlan_id) { u32 in_pport_mask = 0xffffffff; __be16 vlan_id_mask = htons(0xffff); int err; if (ntohs(vlan_id) == 0) vlan_id = ofdpa_port->internal_vlan_id; err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, ofdpa_port->pport, in_pport_mask, ctrl->eth_type, ctrl->eth_dst, ctrl->eth_dst_mask, vlan_id, vlan_id_mask, ctrl->copy_to_cpu, flags); if (err) netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err); return err; } static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, const struct ofdpa_ctrl *ctrl, __be16 vlan_id) { if (ctrl->acl) return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags, ctrl, vlan_id); if (ctrl->bridge) return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags, ctrl, vlan_id); if (ctrl->term) return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags, ctrl, vlan_id); return -EOPNOTSUPP; } static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, __be16 vlan_id) { int err = 0; int i; for (i = 0; i < OFDPA_CTRL_MAX; i++) { if (ofdpa_port->ctrls[i]) { err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags, &ofdpa_ctrls[i], vlan_id); if (err) return err; } } return err; } static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, const struct ofdpa_ctrl *ctrl) { u16 vid; int err = 0; for (vid = 1; vid < VLAN_N_VID; vid++) { if (!test_bit(vid, ofdpa_port->vlan_bitmap)) continue; err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags, ctrl, htons(vid)); if (err) break; } return err; } static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, u16 vid) { enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; u32 in_pport = ofdpa_port->pport; __be16 vlan_id = htons(vid); __be16 vlan_id_mask = htons(0xffff); __be16 internal_vlan_id; bool untagged; bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); int err; internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged); if (adding && test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) return 0; /* already added */ else if (!adding && !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) return 0; /* already removed */ change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); if (adding) { err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags, internal_vlan_id); if (err) { netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err); goto err_out; } } err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags, internal_vlan_id, untagged); if (err) { netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err); goto err_out; } err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags, internal_vlan_id); if (err) { netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); goto err_out; } err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags, in_pport, vlan_id, vlan_id_mask, goto_tbl, untagged, internal_vlan_id); if (err) netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err); err_out: if (switchdev_trans_ph_prepare(trans)) change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); return err; } static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags) { enum rocker_of_dpa_table_id goto_tbl; u32 in_pport; u32 in_pport_mask; int err; /* Normal Ethernet Frames. Matches pkts from any local physical * ports. Goto VLAN tbl. */ in_pport = 0; in_pport_mask = 0xffff0000; goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags, in_pport, in_pport_mask, goto_tbl); if (err) netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err); return err; } struct ofdpa_fdb_learn_work { struct work_struct work; struct ofdpa_port *ofdpa_port; struct switchdev_trans *trans; int flags; u8 addr[ETH_ALEN]; u16 vid; }; static void ofdpa_port_fdb_learn_work(struct work_struct *work) { const struct ofdpa_fdb_learn_work *lw = container_of(work, struct ofdpa_fdb_learn_work, work); bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE); bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED); struct switchdev_notifier_fdb_info info; info.addr = lw->addr; info.vid = lw->vid; rtnl_lock(); if (learned && removing) call_switchdev_notifiers(SWITCHDEV_FDB_DEL, lw->ofdpa_port->dev, &info.info); else if (learned && !removing) call_switchdev_notifiers(SWITCHDEV_FDB_ADD, lw->ofdpa_port->dev, &info.info); rtnl_unlock(); ofdpa_kfree(lw->trans, work); } static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, const u8 *addr, __be16 vlan_id) { struct ofdpa_fdb_learn_work *lw; enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; u32 out_pport = ofdpa_port->pport; u32 tunnel_id = 0; u32 group_id = ROCKER_GROUP_NONE; bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC); bool copy_to_cpu = false; int err; if (ofdpa_port_is_bridged(ofdpa_port)) group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); if (!(flags & OFDPA_OP_FLAG_REFRESH)) { err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr, NULL, vlan_id, tunnel_id, goto_tbl, group_id, copy_to_cpu); if (err) return err; } if (!syncing) return 0; if (!ofdpa_port_is_bridged(ofdpa_port)) return 0; lw = ofdpa_kzalloc(trans, flags, sizeof(*lw)); if (!lw) return -ENOMEM; INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work); lw->ofdpa_port = ofdpa_port; lw->trans = trans; lw->flags = flags; ether_addr_copy(lw->addr, addr); lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id); if (switchdev_trans_ph_prepare(trans)) ofdpa_kfree(trans, lw); else schedule_work(&lw->work); return 0; } static struct ofdpa_fdb_tbl_entry * ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa, const struct ofdpa_fdb_tbl_entry *match) { struct ofdpa_fdb_tbl_entry *found; hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32) if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) return found; return NULL; } static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, const unsigned char *addr, __be16 vlan_id, int flags) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_fdb_tbl_entry *fdb; struct ofdpa_fdb_tbl_entry *found; bool removing = (flags & OFDPA_OP_FLAG_REMOVE); unsigned long lock_flags; fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb)); if (!fdb) return -ENOMEM; fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED); fdb->touched = jiffies; fdb->key.ofdpa_port = ofdpa_port; ether_addr_copy(fdb->key.addr, addr); fdb->key.vlan_id = vlan_id; fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); found = ofdpa_fdb_tbl_find(ofdpa, fdb); if (found) { found->touched = jiffies; if (removing) { ofdpa_kfree(trans, fdb); if (!switchdev_trans_ph_prepare(trans)) hash_del(&found->entry); } } else if (!removing) { if (!switchdev_trans_ph_prepare(trans)) hash_add(ofdpa->fdb_tbl, &fdb->entry, fdb->key_crc32); } spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); /* Check if adding and already exists, or removing and can't find */ if (!found != !removing) { ofdpa_kfree(trans, fdb); if (!found && removing) return 0; /* Refreshing existing to update aging timers */ flags |= OFDPA_OP_FLAG_REFRESH; } return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id); } static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_fdb_tbl_entry *found; unsigned long lock_flags; struct hlist_node *tmp; int bkt; int err = 0; if (ofdpa_port->stp_state == BR_STATE_LEARNING || ofdpa_port->stp_state == BR_STATE_FORWARDING) return 0; flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE; spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { if (found->key.ofdpa_port != ofdpa_port) continue; if (!found->learned) continue; err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags, found->key.addr, found->key.vlan_id); if (err) goto err_out; if (!switchdev_trans_ph_prepare(trans)) hash_del(&found->entry); } err_out: spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); return err; } static void ofdpa_fdb_cleanup(unsigned long data) { struct ofdpa *ofdpa = (struct ofdpa *)data; struct ofdpa_port *ofdpa_port; struct ofdpa_fdb_tbl_entry *entry; struct hlist_node *tmp; unsigned long next_timer = jiffies + ofdpa->ageing_time; unsigned long expires; unsigned long lock_flags; int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_LEARNED; int bkt; spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) { if (!entry->learned) continue; ofdpa_port = entry->key.ofdpa_port; expires = entry->touched + ofdpa_port->ageing_time; if (time_before_eq(expires, jiffies)) { ofdpa_port_fdb_learn(ofdpa_port, NULL, flags, entry->key.addr, entry->key.vlan_id); hash_del(&entry->entry); } else if (time_before(expires, next_timer)) { next_timer = expires; } } spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer)); } static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, __be16 vlan_id) { u32 in_pport_mask = 0xffffffff; __be16 eth_type; const u8 *dst_mac_mask = ff_mac; __be16 vlan_id_mask = htons(0xffff); bool copy_to_cpu = false; int err; if (ntohs(vlan_id) == 0) vlan_id = ofdpa_port->internal_vlan_id; eth_type = htons(ETH_P_IP); err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, ofdpa_port->pport, in_pport_mask, eth_type, ofdpa_port->dev->dev_addr, dst_mac_mask, vlan_id, vlan_id_mask, copy_to_cpu, flags); if (err) return err; eth_type = htons(ETH_P_IPV6); err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, ofdpa_port->pport, in_pport_mask, eth_type, ofdpa_port->dev->dev_addr, dst_mac_mask, vlan_id, vlan_id_mask, copy_to_cpu, flags); return err; } static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags) { bool pop_vlan; u32 out_pport; __be16 vlan_id; u16 vid; int err; /* Port will be forwarding-enabled if its STP state is LEARNING * or FORWARDING. Traffic from CPU can still egress, regardless of * port STP state. Use L2 interface group on port VLANs as a way * to toggle port forwarding: if forwarding is disabled, L2 * interface group will not exist. */ if (ofdpa_port->stp_state != BR_STATE_LEARNING && ofdpa_port->stp_state != BR_STATE_FORWARDING) flags |= OFDPA_OP_FLAG_REMOVE; out_pport = ofdpa_port->pport; for (vid = 1; vid < VLAN_N_VID; vid++) { if (!test_bit(vid, ofdpa_port->vlan_bitmap)) continue; vlan_id = htons(vid); pop_vlan = ofdpa_vlan_id_is_internal(vlan_id); err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, vlan_id, out_pport, pop_vlan); if (err) { netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", err, out_pport); return err; } } return 0; } static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, int flags, u8 state) { bool want[OFDPA_CTRL_MAX] = { 0, }; bool prev_ctrls[OFDPA_CTRL_MAX]; u8 uninitialized_var(prev_state); int err; int i; if (switchdev_trans_ph_prepare(trans)) { memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); prev_state = ofdpa_port->stp_state; } if (ofdpa_port->stp_state == state) return 0; ofdpa_port->stp_state = state; switch (state) { case BR_STATE_DISABLED: /* port is completely disabled */ break; case BR_STATE_LISTENING: case BR_STATE_BLOCKING: want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; break; case BR_STATE_LEARNING: case BR_STATE_FORWARDING: if (!ofdpa_port_is_ovsed(ofdpa_port)) want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; want[OFDPA_CTRL_IPV4_MCAST] = true; want[OFDPA_CTRL_IPV6_MCAST] = true; if (ofdpa_port_is_bridged(ofdpa_port)) want[OFDPA_CTRL_DFLT_BRIDGING] = true; else if (ofdpa_port_is_ovsed(ofdpa_port)) want[OFDPA_CTRL_DFLT_OVS] = true; else want[OFDPA_CTRL_LOCAL_ARP] = true; break; } for (i = 0; i < OFDPA_CTRL_MAX; i++) { if (want[i] != ofdpa_port->ctrls[i]) { int ctrl_flags = flags | (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE); err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags, &ofdpa_ctrls[i]); if (err) goto err_out; ofdpa_port->ctrls[i] = want[i]; } } err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags); if (err) goto err_out; err = ofdpa_port_fwding(ofdpa_port, trans, flags); err_out: if (switchdev_trans_ph_prepare(trans)) { memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); ofdpa_port->stp_state = prev_state; } return err; } static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags) { if (ofdpa_port_is_bridged(ofdpa_port)) /* bridge STP will enable port */ return 0; /* port is not bridged, so simulate going to FORWARDING state */ return ofdpa_port_stp_update(ofdpa_port, NULL, flags, BR_STATE_FORWARDING); } static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags) { if (ofdpa_port_is_bridged(ofdpa_port)) /* bridge STP will disable port */ return 0; /* port is not bridged, so simulate going to DISABLED state */ return ofdpa_port_stp_update(ofdpa_port, NULL, flags, BR_STATE_DISABLED); } static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, u16 vid, u16 flags) { int err; /* XXX deal with flags for PVID and untagged */ err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid); if (err) return err; err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid)); if (err) ofdpa_port_vlan(ofdpa_port, trans, OFDPA_OP_FLAG_REMOVE, vid); return err; } static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port, u16 vid, u16 flags) { int err; err = ofdpa_port_router_mac(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE, htons(vid)); if (err) return err; return ofdpa_port_vlan(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE, vid); } static struct ofdpa_internal_vlan_tbl_entry * ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex) { struct ofdpa_internal_vlan_tbl_entry *found; hash_for_each_possible(ofdpa->internal_vlan_tbl, found, entry, ifindex) { if (found->ifindex == ifindex) return found; } return NULL; } static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port, int ifindex) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_internal_vlan_tbl_entry *entry; struct ofdpa_internal_vlan_tbl_entry *found; unsigned long lock_flags; int i; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return 0; entry->ifindex = ifindex; spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); if (found) { kfree(entry); goto found; } found = entry; hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex); for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) { if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap)) continue; found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i); goto found; } netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n"); found: found->ref_count++; spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); return found->vlan_id; } static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, struct switchdev_trans *trans, __be32 dst, int dst_len, struct fib_info *fi, u32 tb_id, int flags) { const struct fib_nh *nh; __be16 eth_type = htons(ETH_P_IP); __be32 dst_mask = inet_make_mask(dst_len); __be16 internal_vlan_id = ofdpa_port->internal_vlan_id; u32 priority = fi->fib_priority; enum rocker_of_dpa_table_id goto_tbl = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; u32 group_id; bool nh_on_port; bool has_gw; u32 index; int err; /* XXX support ECMP */ nh = fi->fib_nh; nh_on_port = (fi->fib_dev == ofdpa_port->dev); has_gw = !!nh->nh_gw; if (has_gw && nh_on_port) { err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags, nh->nh_gw, &index); if (err) return err; group_id = ROCKER_GROUP_L3_UNICAST(index); } else { /* Send to CPU for processing */ group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); } err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst, dst_mask, priority, goto_tbl, group_id, fi, flags); if (err) netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n", err, &dst); return err; } static void ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port, int ifindex) { struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_internal_vlan_tbl_entry *found; unsigned long lock_flags; unsigned long bit; spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); if (!found) { netdev_err(ofdpa_port->dev, "ifindex (%d) not found in internal VLAN tbl\n", ifindex); goto not_found; } if (--found->ref_count <= 0) { bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE; clear_bit(bit, ofdpa->internal_vlan_bitmap); hash_del(&found->entry); kfree(found); } not_found: spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); } /********************************** * Rocker world ops implementation **********************************/ static int ofdpa_init(struct rocker *rocker) { struct ofdpa *ofdpa = rocker->wpriv; ofdpa->rocker = rocker; hash_init(ofdpa->flow_tbl); spin_lock_init(&ofdpa->flow_tbl_lock); hash_init(ofdpa->group_tbl); spin_lock_init(&ofdpa->group_tbl_lock); hash_init(ofdpa->fdb_tbl); spin_lock_init(&ofdpa->fdb_tbl_lock); hash_init(ofdpa->internal_vlan_tbl); spin_lock_init(&ofdpa->internal_vlan_tbl_lock); hash_init(ofdpa->neigh_tbl); spin_lock_init(&ofdpa->neigh_tbl_lock); setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, (unsigned long) ofdpa); mod_timer(&ofdpa->fdb_cleanup_timer, jiffies); ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME; return 0; } static void ofdpa_fini(struct rocker *rocker) { struct ofdpa *ofdpa = rocker->wpriv; unsigned long flags; struct ofdpa_flow_tbl_entry *flow_entry; struct ofdpa_group_tbl_entry *group_entry; struct ofdpa_fdb_tbl_entry *fdb_entry; struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry; struct ofdpa_neigh_tbl_entry *neigh_entry; struct hlist_node *tmp; int bkt; del_timer_sync(&ofdpa->fdb_cleanup_timer); spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags); hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) hash_del(&flow_entry->entry); spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); spin_lock_irqsave(&ofdpa->group_tbl_lock, flags); hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry) hash_del(&group_entry->entry); spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags); spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags); hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry) hash_del(&fdb_entry->entry); spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags); spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags); hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt, tmp, internal_vlan_entry, entry) hash_del(&internal_vlan_entry->entry); spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags); spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags); hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry) hash_del(&neigh_entry->entry); spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags); } static int ofdpa_port_pre_init(struct rocker_port *rocker_port) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; ofdpa_port->ofdpa = rocker_port->rocker->wpriv; ofdpa_port->rocker_port = rocker_port; ofdpa_port->dev = rocker_port->dev; ofdpa_port->pport = rocker_port->pport; ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME; return 0; } static int ofdpa_port_init(struct rocker_port *rocker_port) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int err; rocker_port_set_learning(rocker_port, !!(ofdpa_port->brport_flags & BR_LEARNING)); err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0); if (err) { netdev_err(ofdpa_port->dev, "install ig port table failed\n"); return err; } ofdpa_port->internal_vlan_id = ofdpa_port_internal_vlan_id_get(ofdpa_port, ofdpa_port->dev->ifindex); err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); if (err) { netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n"); goto err_untagged_vlan; } return 0; err_untagged_vlan: ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE); return err; } static void ofdpa_port_fini(struct rocker_port *rocker_port) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE); } static int ofdpa_port_open(struct rocker_port *rocker_port) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; return ofdpa_port_fwd_enable(ofdpa_port, 0); } static void ofdpa_port_stop(struct rocker_port *rocker_port) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT); } static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port, u8 state, struct switchdev_trans *trans) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; return ofdpa_port_stp_update(ofdpa_port, trans, 0, state); } static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port, unsigned long brport_flags, struct switchdev_trans *trans) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; unsigned long orig_flags; int err = 0; orig_flags = ofdpa_port->brport_flags; ofdpa_port->brport_flags = brport_flags; if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING && !switchdev_trans_ph_prepare(trans)) err = rocker_port_set_learning(ofdpa_port->rocker_port, !!(ofdpa_port->brport_flags & BR_LEARNING)); if (switchdev_trans_ph_prepare(trans)) ofdpa_port->brport_flags = orig_flags; return err; } static int ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, unsigned long *p_brport_flags) { const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; *p_brport_flags = ofdpa_port->brport_flags; return 0; } static int ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, u32 ageing_time, struct switchdev_trans *trans) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; struct ofdpa *ofdpa = ofdpa_port->ofdpa; if (!switchdev_trans_ph_prepare(trans)) { ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time); if (ofdpa_port->ageing_time < ofdpa->ageing_time) ofdpa->ageing_time = ofdpa_port->ageing_time; mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies); } return 0; } static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; u16 vid; int err; for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags); if (err) return err; } return 0; } static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port, const struct switchdev_obj_port_vlan *vlan) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; u16 vid; int err; for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags); if (err) return err; } return 0; } static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port, struct switchdev_obj_port_vlan *vlan, switchdev_obj_dump_cb_t *cb) { const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; u16 vid; int err = 0; for (vid = 1; vid < VLAN_N_VID; vid++) { if (!test_bit(vid, ofdpa_port->vlan_bitmap)) continue; vlan->flags = 0; if (ofdpa_vlan_id_is_internal(htons(vid))) vlan->flags |= BRIDGE_VLAN_INFO_PVID; vlan->vid_begin = vlan->vid_end = vid; err = cb(&vlan->obj); if (err) break; } return err; } static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL); if (!ofdpa_port_is_bridged(ofdpa_port)) return -EINVAL; return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0); } static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port, const struct switchdev_obj_port_fdb *fdb) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL); int flags = OFDPA_OP_FLAG_REMOVE; if (!ofdpa_port_is_bridged(ofdpa_port)) return -EINVAL; return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags); } static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port, struct switchdev_obj_port_fdb *fdb, switchdev_obj_dump_cb_t *cb) { const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; struct ofdpa *ofdpa = ofdpa_port->ofdpa; struct ofdpa_fdb_tbl_entry *found; struct hlist_node *tmp; unsigned long lock_flags; int bkt; int err = 0; spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { if (found->key.ofdpa_port != ofdpa_port) continue; ether_addr_copy(fdb->addr, found->key.addr); fdb->ndm_state = NUD_REACHABLE; fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port, found->key.vlan_id); err = cb(&fdb->obj); if (err) break; } spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); return err; } static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, struct net_device *bridge) { int err; /* Port is joining bridge, so the internal VLAN for the * port is going to change to the bridge internal VLAN. * Let's remove untagged VLAN (vid=0) from port and * re-add once internal VLAN has changed. */ err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); if (err) return err; ofdpa_port_internal_vlan_id_put(ofdpa_port, ofdpa_port->dev->ifindex); ofdpa_port->internal_vlan_id = ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex); ofdpa_port->bridge_dev = bridge; return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); } static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port) { int err; err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); if (err) return err; ofdpa_port_internal_vlan_id_put(ofdpa_port, ofdpa_port->bridge_dev->ifindex); ofdpa_port->internal_vlan_id = ofdpa_port_internal_vlan_id_get(ofdpa_port, ofdpa_port->dev->ifindex); ofdpa_port->bridge_dev = NULL; err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); if (err) return err; if (ofdpa_port->dev->flags & IFF_UP) err = ofdpa_port_fwd_enable(ofdpa_port, 0); return err; } static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port, struct net_device *master) { int err; ofdpa_port->bridge_dev = master; err = ofdpa_port_fwd_disable(ofdpa_port, 0); if (err) return err; err = ofdpa_port_fwd_enable(ofdpa_port, 0); return err; } static int ofdpa_port_master_linked(struct rocker_port *rocker_port, struct net_device *master) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int err = 0; if (netif_is_bridge_master(master)) err = ofdpa_port_bridge_join(ofdpa_port, master); else if (netif_is_ovs_master(master)) err = ofdpa_port_ovs_changed(ofdpa_port, master); return err; } static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port, struct net_device *master) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int err = 0; if (ofdpa_port_is_bridged(ofdpa_port)) err = ofdpa_port_bridge_leave(ofdpa_port); else if (ofdpa_port_is_ovsed(ofdpa_port)) err = ofdpa_port_ovs_changed(ofdpa_port, NULL); return err; } static int ofdpa_port_neigh_update(struct rocker_port *rocker_port, struct neighbour *n) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) | OFDPA_OP_FLAG_NOWAIT; __be32 ip_addr = *(__be32 *) n->primary_key; return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha); } static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port, struct neighbour *n) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT; __be32 ip_addr = *(__be32 *) n->primary_key; return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha); } static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, const unsigned char *addr, __be16 vlan_id) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED; if (ofdpa_port->stp_state != BR_STATE_LEARNING && ofdpa_port->stp_state != BR_STATE_FORWARDING) return 0; return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags); } static struct ofdpa_port *ofdpa_port_dev_lower_find(struct net_device *dev, struct rocker *rocker) { struct rocker_port *rocker_port; rocker_port = rocker_port_dev_lower_find(dev, rocker); return rocker_port ? rocker_port->wpriv : NULL; } static int ofdpa_fib4_add(struct rocker *rocker, const struct fib_entry_notifier_info *fen_info) { struct ofdpa *ofdpa = rocker->wpriv; struct ofdpa_port *ofdpa_port; int err; if (ofdpa->fib_aborted) return 0; ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); if (!ofdpa_port) return 0; err = ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst), fen_info->dst_len, fen_info->fi, fen_info->tb_id, 0); if (err) return err; fib_info_offload_inc(fen_info->fi); return 0; } static int ofdpa_fib4_del(struct rocker *rocker, const struct fib_entry_notifier_info *fen_info) { struct ofdpa *ofdpa = rocker->wpriv; struct ofdpa_port *ofdpa_port; if (ofdpa->fib_aborted) return 0; ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); if (!ofdpa_port) return 0; fib_info_offload_dec(fen_info->fi); return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fen_info->dst), fen_info->dst_len, fen_info->fi, fen_info->tb_id, OFDPA_OP_FLAG_REMOVE); } static void ofdpa_fib4_abort(struct rocker *rocker) { struct ofdpa *ofdpa = rocker->wpriv; struct ofdpa_port *ofdpa_port; struct ofdpa_flow_tbl_entry *flow_entry; struct hlist_node *tmp; unsigned long flags; int bkt; if (ofdpa->fib_aborted) return; spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags); hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) { if (flow_entry->key.tbl_id != ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING) continue; ofdpa_port = ofdpa_port_dev_lower_find(flow_entry->fi->fib_dev, rocker); if (!ofdpa_port) continue; fib_info_offload_dec(flow_entry->fi); ofdpa_flow_tbl_del(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE, flow_entry); } spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); ofdpa->fib_aborted = true; } struct rocker_world_ops rocker_ofdpa_ops = { .kind = "ofdpa", .priv_size = sizeof(struct ofdpa), .port_priv_size = sizeof(struct ofdpa_port), .mode = ROCKER_PORT_MODE_OF_DPA, .init = ofdpa_init, .fini = ofdpa_fini, .port_pre_init = ofdpa_port_pre_init, .port_init = ofdpa_port_init, .port_fini = ofdpa_port_fini, .port_open = ofdpa_port_open, .port_stop = ofdpa_port_stop, .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set, .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set, .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get, .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set, .port_obj_vlan_add = ofdpa_port_obj_vlan_add, .port_obj_vlan_del = ofdpa_port_obj_vlan_del, .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump, .port_obj_fdb_add = ofdpa_port_obj_fdb_add, .port_obj_fdb_del = ofdpa_port_obj_fdb_del, .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump, .port_master_linked = ofdpa_port_master_linked, .port_master_unlinked = ofdpa_port_master_unlinked, .port_neigh_update = ofdpa_port_neigh_update, .port_neigh_destroy = ofdpa_port_neigh_destroy, .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen, .fib4_add = ofdpa_fib4_add, .fib4_del = ofdpa_fib4_del, .fib4_abort = ofdpa_fib4_abort, };
gpl-2.0
tescande/linux-nfc-next-stable
drivers/media/platform/s3c-camif/camif-core.c
49
15731
/* * s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver * * Copyright (C) 2012 Sylwester Nawrocki <sylvester.nawrocki@gmail.com> * Copyright (C) 2012 Tomasz Figa <tomasz.figa@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published * by the Free Software Foundation, either version 2 of the License, * or (at your option) any later version. */ #define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__ #include <linux/bug.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/version.h> #include <media/media-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> #include "camif-core.h" static char *camif_clocks[CLK_MAX_NUM] = { /* HCLK CAMIF clock */ [CLK_GATE] = "camif", /* CAMIF / external camera sensor master clock */ [CLK_CAM] = "camera", }; static const struct camif_fmt camif_formats[] = { { .name = "YUV 4:2:2 planar, Y/Cb/Cr", .fourcc = V4L2_PIX_FMT_YUV422P, .depth = 16, .ybpp = 1, .color = IMG_FMT_YCBCR422P, .colplanes = 3, .flags = FMT_FL_S3C24XX_CODEC | FMT_FL_S3C64XX, }, { .name = "YUV 4:2:0 planar, Y/Cb/Cr", .fourcc = V4L2_PIX_FMT_YUV420, .depth = 12, .ybpp = 1, .color = IMG_FMT_YCBCR420, .colplanes = 3, .flags = FMT_FL_S3C24XX_CODEC | FMT_FL_S3C64XX, }, { .name = "YVU 4:2:0 planar, Y/Cr/Cb", .fourcc = V4L2_PIX_FMT_YVU420, .depth = 12, .ybpp = 1, .color = IMG_FMT_YCRCB420, .colplanes = 3, .flags = FMT_FL_S3C24XX_CODEC | FMT_FL_S3C64XX, }, { .name = "RGB565, 16 bpp", .fourcc = V4L2_PIX_FMT_RGB565X, .depth = 16, .ybpp = 2, .color = IMG_FMT_RGB565, .colplanes = 1, .flags = FMT_FL_S3C24XX_PREVIEW | FMT_FL_S3C64XX, }, { .name = "XRGB8888, 32 bpp", .fourcc = V4L2_PIX_FMT_RGB32, .depth = 32, .ybpp = 4, .color = IMG_FMT_XRGB8888, .colplanes = 1, .flags = FMT_FL_S3C24XX_PREVIEW | FMT_FL_S3C64XX, }, { .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666, .depth = 32, .ybpp = 4, .color = IMG_FMT_RGB666, .colplanes = 1, .flags = FMT_FL_S3C64XX, } }; /** * s3c_camif_find_format() - lookup camif color format by fourcc or an index * @pixelformat: fourcc to match, ignored if null * @index: index to the camif_formats array, ignored if negative */ const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp, const u32 *pixelformat, int index) { const struct camif_fmt *fmt, *def_fmt = NULL; unsigned int i; int id = 0; if (index >= (int)ARRAY_SIZE(camif_formats)) return NULL; for (i = 0; i < ARRAY_SIZE(camif_formats); ++i) { fmt = &camif_formats[i]; if (vp && !(vp->fmt_flags & fmt->flags)) continue; if (pixelformat && fmt->fourcc == *pixelformat) return fmt; if (index == id) def_fmt = fmt; id++; } return def_fmt; } static int camif_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift) { unsigned int sh = 6; if (src >= 64 * tar) return -EINVAL; while (sh--) { unsigned int tmp = 1 << sh; if (src >= tar * tmp) { *shift = sh, *ratio = tmp; return 0; } } *shift = 0, *ratio = 1; return 0; } int s3c_camif_get_scaler_config(struct camif_vp *vp, struct camif_scaler *scaler) { struct v4l2_rect *camif_crop = &vp->camif->camif_crop; int source_x = camif_crop->width; int source_y = camif_crop->height; int target_x = vp->out_frame.rect.width; int target_y = vp->out_frame.rect.height; int ret; if (vp->rotation == 90 || vp->rotation == 270) swap(target_x, target_y); ret = camif_get_scaler_factor(source_x, target_x, &scaler->pre_h_ratio, &scaler->h_shift); if (ret < 0) return ret; ret = camif_get_scaler_factor(source_y, target_y, &scaler->pre_v_ratio, &scaler->v_shift); if (ret < 0) return ret; scaler->pre_dst_width = source_x / scaler->pre_h_ratio; scaler->pre_dst_height = source_y / scaler->pre_v_ratio; scaler->main_h_ratio = (source_x << 8) / (target_x << scaler->h_shift); scaler->main_v_ratio = (source_y << 8) / (target_y << scaler->v_shift); scaler->scaleup_h = (target_x >= source_x); scaler->scaleup_v = (target_y >= source_y); scaler->copy = 0; pr_debug("H: ratio: %u, shift: %u. V: ratio: %u, shift: %u.\n", scaler->pre_h_ratio, scaler->h_shift, scaler->pre_v_ratio, scaler->v_shift); pr_debug("Source: %dx%d, Target: %dx%d, scaleup_h/v: %d/%d\n", source_x, source_y, target_x, target_y, scaler->scaleup_h, scaler->scaleup_v); return 0; } static int camif_register_sensor(struct camif_dev *camif) { struct s3c_camif_sensor_info *sensor = &camif->pdata.sensor; struct v4l2_device *v4l2_dev = &camif->v4l2_dev; struct i2c_adapter *adapter; struct v4l2_subdev_format format; struct v4l2_subdev *sd; int ret; camif->sensor.sd = NULL; if (sensor->i2c_board_info.addr == 0) return -EINVAL; adapter = i2c_get_adapter(sensor->i2c_bus_num); if (adapter == NULL) { v4l2_warn(v4l2_dev, "failed to get I2C adapter %d\n", sensor->i2c_bus_num); return -EPROBE_DEFER; } sd = v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &sensor->i2c_board_info, NULL); if (sd == NULL) { i2c_put_adapter(adapter); v4l2_warn(v4l2_dev, "failed to acquire subdev %s\n", sensor->i2c_board_info.type); return -EPROBE_DEFER; } camif->sensor.sd = sd; v4l2_info(v4l2_dev, "registered sensor subdevice %s\n", sd->name); /* Get initial pixel format and set it at the camif sink pad */ format.pad = 0; format.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format); if (ret < 0) return 0; format.pad = CAMIF_SD_PAD_SINK; v4l2_subdev_call(&camif->subdev, pad, set_fmt, NULL, &format); v4l2_info(sd, "Initial format from sensor: %dx%d, %#x\n", format.format.width, format.format.height, format.format.code); return 0; } static void camif_unregister_sensor(struct camif_dev *camif) { struct v4l2_subdev *sd = camif->sensor.sd; struct i2c_client *client = sd ? v4l2_get_subdevdata(sd) : NULL; struct i2c_adapter *adapter; if (client == NULL) return; adapter = client->adapter; v4l2_device_unregister_subdev(sd); camif->sensor.sd = NULL; i2c_unregister_device(client); i2c_put_adapter(adapter); } static int camif_create_media_links(struct camif_dev *camif) { int i, ret; ret = media_create_pad_link(&camif->sensor.sd->entity, 0, &camif->subdev.entity, CAMIF_SD_PAD_SINK, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) return ret; for (i = 1; i < CAMIF_SD_PADS_NUM && !ret; i++) { ret = media_create_pad_link(&camif->subdev.entity, i, &camif->vp[i - 1].vdev.entity, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); } return ret; } static int camif_register_video_nodes(struct camif_dev *camif) { int ret = s3c_camif_register_video_node(camif, VP_CODEC); if (ret < 0) return ret; return s3c_camif_register_video_node(camif, VP_PREVIEW); } static void camif_unregister_video_nodes(struct camif_dev *camif) { s3c_camif_unregister_video_node(camif, VP_CODEC); s3c_camif_unregister_video_node(camif, VP_PREVIEW); } static void camif_unregister_media_entities(struct camif_dev *camif) { camif_unregister_video_nodes(camif); camif_unregister_sensor(camif); s3c_camif_unregister_subdev(camif); } /* * Media device */ static int camif_media_dev_init(struct camif_dev *camif) { struct media_device *md = &camif->media_dev; struct v4l2_device *v4l2_dev = &camif->v4l2_dev; unsigned int ip_rev = camif->variant->ip_revision; int ret; memset(md, 0, sizeof(*md)); snprintf(md->model, sizeof(md->model), "SAMSUNG S3C%s CAMIF", ip_rev == S3C6410_CAMIF_IP_REV ? "6410" : "244X"); strlcpy(md->bus_info, "platform", sizeof(md->bus_info)); md->hw_revision = ip_rev; md->dev = camif->dev; strlcpy(v4l2_dev->name, "s3c-camif", sizeof(v4l2_dev->name)); v4l2_dev->mdev = md; media_device_init(md); ret = v4l2_device_register(camif->dev, v4l2_dev); if (ret < 0) return ret; return ret; } static void camif_clk_put(struct camif_dev *camif) { int i; for (i = 0; i < CLK_MAX_NUM; i++) { if (IS_ERR(camif->clock[i])) continue; clk_unprepare(camif->clock[i]); clk_put(camif->clock[i]); camif->clock[i] = ERR_PTR(-EINVAL); } } static int camif_clk_get(struct camif_dev *camif) { int ret, i; for (i = 1; i < CLK_MAX_NUM; i++) camif->clock[i] = ERR_PTR(-EINVAL); for (i = 0; i < CLK_MAX_NUM; i++) { camif->clock[i] = clk_get(camif->dev, camif_clocks[i]); if (IS_ERR(camif->clock[i])) { ret = PTR_ERR(camif->clock[i]); goto err; } ret = clk_prepare(camif->clock[i]); if (ret < 0) { clk_put(camif->clock[i]); camif->clock[i] = NULL; goto err; } } return 0; err: camif_clk_put(camif); dev_err(camif->dev, "failed to get clock: %s\n", camif_clocks[i]); return ret; } /* * The CAMIF device has two relatively independent data processing paths * that can source data from memory or the common camera input frontend. * Register interrupts for each data processing path (camif_vp). */ static int camif_request_irqs(struct platform_device *pdev, struct camif_dev *camif) { int irq, ret, i; for (i = 0; i < CAMIF_VP_NUM; i++) { struct camif_vp *vp = &camif->vp[i]; init_waitqueue_head(&vp->irq_queue); irq = platform_get_irq(pdev, i); if (irq <= 0) { dev_err(&pdev->dev, "failed to get IRQ %d\n", i); return -ENXIO; } ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler, 0, dev_name(&pdev->dev), vp); if (ret < 0) { dev_err(&pdev->dev, "failed to install IRQ: %d\n", ret); break; } } return ret; } static int s3c_camif_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct s3c_camif_plat_data *pdata = dev->platform_data; struct s3c_camif_drvdata *drvdata; struct camif_dev *camif; struct resource *mres; int ret = 0; camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL); if (!camif) return -ENOMEM; spin_lock_init(&camif->slock); mutex_init(&camif->lock); camif->dev = dev; if (!pdata || !pdata->gpio_get || !pdata->gpio_put) { dev_err(dev, "wrong platform data\n"); return -EINVAL; } camif->pdata = *pdata; drvdata = (void *)platform_get_device_id(pdev)->driver_data; camif->variant = drvdata->variant; mres = platform_get_resource(pdev, IORESOURCE_MEM, 0); camif->io_base = devm_ioremap_resource(dev, mres); if (IS_ERR(camif->io_base)) return PTR_ERR(camif->io_base); ret = camif_request_irqs(pdev, camif); if (ret < 0) return ret; ret = pdata->gpio_get(); if (ret < 0) return ret; ret = s3c_camif_create_subdev(camif); if (ret < 0) goto err_sd; ret = camif_clk_get(camif); if (ret < 0) goto err_clk; platform_set_drvdata(pdev, camif); clk_set_rate(camif->clock[CLK_CAM], camif->pdata.sensor.clock_frequency); dev_info(dev, "sensor clock frequency: %lu\n", clk_get_rate(camif->clock[CLK_CAM])); /* * Set initial pixel format, resolution and crop rectangle. * Must be done before a sensor subdev is registered as some * settings are overrode with values from sensor subdev. */ s3c_camif_set_defaults(camif); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) goto err_pm; ret = camif_media_dev_init(camif); if (ret < 0) goto err_alloc; ret = camif_register_sensor(camif); if (ret < 0) goto err_sens; ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev); if (ret < 0) goto err_sens; ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev); if (ret < 0) goto err_sens; ret = camif_register_video_nodes(camif); if (ret < 0) goto err_sens; ret = camif_create_media_links(camif); if (ret < 0) goto err_sens; ret = media_device_register(&camif->media_dev); if (ret < 0) goto err_sens; pm_runtime_put(dev); return 0; err_sens: v4l2_device_unregister(&camif->v4l2_dev); media_device_unregister(&camif->media_dev); media_device_cleanup(&camif->media_dev); camif_unregister_media_entities(camif); err_alloc: pm_runtime_put(dev); pm_runtime_disable(dev); err_pm: camif_clk_put(camif); err_clk: s3c_camif_unregister_subdev(camif); err_sd: pdata->gpio_put(); return ret; } static int s3c_camif_remove(struct platform_device *pdev) { struct camif_dev *camif = platform_get_drvdata(pdev); struct s3c_camif_plat_data *pdata = &camif->pdata; media_device_unregister(&camif->media_dev); media_device_cleanup(&camif->media_dev); camif_unregister_media_entities(camif); v4l2_device_unregister(&camif->v4l2_dev); pm_runtime_disable(&pdev->dev); camif_clk_put(camif); pdata->gpio_put(); return 0; } static int s3c_camif_runtime_resume(struct device *dev) { struct camif_dev *camif = dev_get_drvdata(dev); clk_enable(camif->clock[CLK_GATE]); /* null op on s3c244x */ clk_enable(camif->clock[CLK_CAM]); return 0; } static int s3c_camif_runtime_suspend(struct device *dev) { struct camif_dev *camif = dev_get_drvdata(dev); /* null op on s3c244x */ clk_disable(camif->clock[CLK_CAM]); clk_disable(camif->clock[CLK_GATE]); return 0; } static const struct s3c_camif_variant s3c244x_camif_variant = { .vp_pix_limits = { [VP_CODEC] = { .max_out_width = 4096, .max_sc_out_width = 2048, .out_width_align = 16, .min_out_width = 16, .max_height = 4096, }, [VP_PREVIEW] = { .max_out_width = 640, .max_sc_out_width = 640, .out_width_align = 16, .min_out_width = 16, .max_height = 480, } }, .pix_limits = { .win_hor_offset_align = 8, }, .ip_revision = S3C244X_CAMIF_IP_REV, }; static struct s3c_camif_drvdata s3c244x_camif_drvdata = { .variant = &s3c244x_camif_variant, .bus_clk_freq = 24000000UL, }; static const struct s3c_camif_variant s3c6410_camif_variant = { .vp_pix_limits = { [VP_CODEC] = { .max_out_width = 4096, .max_sc_out_width = 2048, .out_width_align = 16, .min_out_width = 16, .max_height = 4096, }, [VP_PREVIEW] = { .max_out_width = 4096, .max_sc_out_width = 720, .out_width_align = 16, .min_out_width = 16, .max_height = 4096, } }, .pix_limits = { .win_hor_offset_align = 8, }, .ip_revision = S3C6410_CAMIF_IP_REV, .has_img_effect = 1, .vp_offset = 0x20, }; static struct s3c_camif_drvdata s3c6410_camif_drvdata = { .variant = &s3c6410_camif_variant, .bus_clk_freq = 133000000UL, }; static const struct platform_device_id s3c_camif_driver_ids[] = { { .name = "s3c2440-camif", .driver_data = (unsigned long)&s3c244x_camif_drvdata, }, { .name = "s3c6410-camif", .driver_data = (unsigned long)&s3c6410_camif_drvdata, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, s3c_camif_driver_ids); static const struct dev_pm_ops s3c_camif_pm_ops = { .runtime_suspend = s3c_camif_runtime_suspend, .runtime_resume = s3c_camif_runtime_resume, }; static struct platform_driver s3c_camif_driver = { .probe = s3c_camif_probe, .remove = s3c_camif_remove, .id_table = s3c_camif_driver_ids, .driver = { .name = S3C_CAMIF_DRIVER_NAME, .pm = &s3c_camif_pm_ops, } }; module_platform_driver(s3c_camif_driver); MODULE_AUTHOR("Sylwester Nawrocki <sylvester.nawrocki@gmail.com>"); MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>"); MODULE_DESCRIPTION("S3C24XX/S3C64XX SoC camera interface driver"); MODULE_LICENSE("GPL");
gpl-2.0
allenbh/linux
arch/mips/cavium-octeon/octeon-platform.c
49
27367
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2011 Cavium Networks * Copyright (C) 2008 Wind River Systems */ #include <linux/delay.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/i2c.h> #include <linux/usb.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/of_platform.h> #include <linux/of_fdt.h> #include <linux/libfdt.h> #include <linux/usb/ehci_pdriver.h> #include <linux/usb/ohci_pdriver.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-rnm-defs.h> #include <asm/octeon/cvmx-helper.h> #include <asm/octeon/cvmx-helper-board.h> #include <asm/octeon/cvmx-uctlx-defs.h> /* Octeon Random Number Generator. */ static int __init octeon_rng_device_init(void) { struct platform_device *pd; int ret = 0; struct resource rng_resources[] = { { .flags = IORESOURCE_MEM, .start = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS), .end = XKPHYS_TO_PHYS(CVMX_RNM_CTL_STATUS) + 0xf }, { .flags = IORESOURCE_MEM, .start = cvmx_build_io_address(8, 0), .end = cvmx_build_io_address(8, 0) + 0x7 } }; pd = platform_device_alloc("octeon_rng", -1); if (!pd) { ret = -ENOMEM; goto out; } ret = platform_device_add_resources(pd, rng_resources, ARRAY_SIZE(rng_resources)); if (ret) goto fail; ret = platform_device_add(pd); if (ret) goto fail; return ret; fail: platform_device_put(pd); out: return ret; } device_initcall(octeon_rng_device_init); #ifdef CONFIG_USB static DEFINE_MUTEX(octeon2_usb_clocks_mutex); static int octeon2_usb_clock_start_cnt; static void octeon2_usb_clocks_start(struct device *dev) { u64 div; union cvmx_uctlx_if_ena if_ena; union cvmx_uctlx_clk_rst_ctl clk_rst_ctl; union cvmx_uctlx_uphy_ctl_status uphy_ctl_status; union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status; int i; unsigned long io_clk_64_to_ns; u32 clock_rate = 12000000; bool is_crystal_clock = false; mutex_lock(&octeon2_usb_clocks_mutex); octeon2_usb_clock_start_cnt++; if (octeon2_usb_clock_start_cnt != 1) goto exit; io_clk_64_to_ns = 64000000000ull / octeon_get_io_clock_rate(); if (dev->of_node) { struct device_node *uctl_node; const char *clock_type; uctl_node = of_get_parent(dev->of_node); if (!uctl_node) { dev_err(dev, "No UCTL device node\n"); goto exit; } i = of_property_read_u32(uctl_node, "refclk-frequency", &clock_rate); if (i) { dev_err(dev, "No UCTL \"refclk-frequency\"\n"); goto exit; } i = of_property_read_string(uctl_node, "refclk-type", &clock_type); if (!i && strcmp("crystal", clock_type) == 0) is_crystal_clock = true; } /* * Step 1: Wait for voltages stable. That surely happened * before starting the kernel. * * Step 2: Enable SCLK of UCTL by writing UCTL0_IF_ENA[EN] = 1 */ if_ena.u64 = 0; if_ena.s.en = 1; cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64); /* Step 3: Configure the reference clock, PHY, and HCLK */ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); /* * If the UCTL looks like it has already been started, skip * the initialization, otherwise bus errors are obtained. */ if (clk_rst_ctl.s.hrst) goto end_clock; /* 3a */ clk_rst_ctl.s.p_por = 1; clk_rst_ctl.s.hrst = 0; clk_rst_ctl.s.p_prst = 0; clk_rst_ctl.s.h_clkdiv_rst = 0; clk_rst_ctl.s.o_clkdiv_rst = 0; clk_rst_ctl.s.h_clkdiv_en = 0; clk_rst_ctl.s.o_clkdiv_en = 0; cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); /* 3b */ clk_rst_ctl.s.p_refclk_sel = is_crystal_clock ? 0 : 1; switch (clock_rate) { default: pr_err("Invalid UCTL clock rate of %u, using 12000000 instead\n", clock_rate); /* Fall through */ case 12000000: clk_rst_ctl.s.p_refclk_div = 0; break; case 24000000: clk_rst_ctl.s.p_refclk_div = 1; break; case 48000000: clk_rst_ctl.s.p_refclk_div = 2; break; } cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); /* 3c */ div = octeon_get_io_clock_rate() / 130000000ull; switch (div) { case 0: div = 1; break; case 1: case 2: case 3: case 4: break; case 5: div = 4; break; case 6: case 7: div = 6; break; case 8: case 9: case 10: case 11: div = 8; break; default: div = 12; break; } clk_rst_ctl.s.h_div = div; cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); /* Read it back, */ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0)); clk_rst_ctl.s.h_clkdiv_en = 1; cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); /* 3d */ clk_rst_ctl.s.h_clkdiv_rst = 1; cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); /* 3e: delay 64 io clocks */ ndelay(io_clk_64_to_ns); /* * Step 4: Program the power-on reset field in the UCTL * clock-reset-control register. */ clk_rst_ctl.s.p_por = 0; cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); /* Step 5: Wait 1 ms for the PHY clock to start. */ mdelay(1); /* * Step 6: Program the reset input from automatic test * equipment field in the UPHY CSR */ uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0)); uphy_ctl_status.s.ate_reset = 1; cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64); /* Step 7: Wait for at least 10ns. */ ndelay(10); /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */ uphy_ctl_status.s.ate_reset = 0; cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64); /* * Step 9: Wait for at least 20ns for UPHY to output PHY clock * signals and OHCI_CLK48 */ ndelay(20); /* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */ /* 10a */ clk_rst_ctl.s.o_clkdiv_rst = 1; cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); /* 10b */ clk_rst_ctl.s.o_clkdiv_en = 1; cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); /* 10c */ ndelay(io_clk_64_to_ns); /* * Step 11: Program the PHY reset field: * UCTL0_CLK_RST_CTL[P_PRST] = 1 */ clk_rst_ctl.s.p_prst = 1; cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); /* Step 12: Wait 1 uS. */ udelay(1); /* Step 13: Program the HRESET_N field: UCTL0_CLK_RST_CTL[HRST] = 1 */ clk_rst_ctl.s.hrst = 1; cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64); end_clock: /* Now we can set some other registers. */ for (i = 0; i <= 1; i++) { port_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0)); /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */ port_ctl_status.s.txvreftune = 15; port_ctl_status.s.txrisetune = 1; port_ctl_status.s.txpreemphasistune = 1; cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0), port_ctl_status.u64); } /* Set uSOF cycle period to 60,000 bits. */ cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull); exit: mutex_unlock(&octeon2_usb_clocks_mutex); } static void octeon2_usb_clocks_stop(void) { mutex_lock(&octeon2_usb_clocks_mutex); octeon2_usb_clock_start_cnt--; mutex_unlock(&octeon2_usb_clocks_mutex); } static int octeon_ehci_power_on(struct platform_device *pdev) { octeon2_usb_clocks_start(&pdev->dev); return 0; } static void octeon_ehci_power_off(struct platform_device *pdev) { octeon2_usb_clocks_stop(); } static struct usb_ehci_pdata octeon_ehci_pdata = { /* Octeon EHCI matches CPU endianness. */ #ifdef __BIG_ENDIAN .big_endian_mmio = 1, #endif .dma_mask_64 = 1, .power_on = octeon_ehci_power_on, .power_off = octeon_ehci_power_off, }; static void __init octeon_ehci_hw_start(struct device *dev) { union cvmx_uctlx_ehci_ctl ehci_ctl; octeon2_usb_clocks_start(dev); ehci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_EHCI_CTL(0)); /* Use 64-bit addressing. */ ehci_ctl.s.ehci_64b_addr_en = 1; ehci_ctl.s.l2c_addr_msb = 0; #ifdef __BIG_ENDIAN ehci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */ ehci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */ #else ehci_ctl.s.l2c_buff_emod = 0; /* not swapped. */ ehci_ctl.s.l2c_desc_emod = 0; /* not swapped. */ ehci_ctl.s.inv_reg_a2 = 1; #endif cvmx_write_csr(CVMX_UCTLX_EHCI_CTL(0), ehci_ctl.u64); octeon2_usb_clocks_stop(); } static int __init octeon_ehci_device_init(void) { struct platform_device *pd; struct device_node *ehci_node; int ret = 0; ehci_node = of_find_node_by_name(NULL, "ehci"); if (!ehci_node) return 0; pd = of_find_device_by_node(ehci_node); if (!pd) return 0; pd->dev.platform_data = &octeon_ehci_pdata; octeon_ehci_hw_start(&pd->dev); return ret; } device_initcall(octeon_ehci_device_init); static int octeon_ohci_power_on(struct platform_device *pdev) { octeon2_usb_clocks_start(&pdev->dev); return 0; } static void octeon_ohci_power_off(struct platform_device *pdev) { octeon2_usb_clocks_stop(); } static struct usb_ohci_pdata octeon_ohci_pdata = { /* Octeon OHCI matches CPU endianness. */ #ifdef __BIG_ENDIAN .big_endian_mmio = 1, #endif .power_on = octeon_ohci_power_on, .power_off = octeon_ohci_power_off, }; static void __init octeon_ohci_hw_start(struct device *dev) { union cvmx_uctlx_ohci_ctl ohci_ctl; octeon2_usb_clocks_start(dev); ohci_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_OHCI_CTL(0)); ohci_ctl.s.l2c_addr_msb = 0; #ifdef __BIG_ENDIAN ohci_ctl.s.l2c_buff_emod = 1; /* Byte swapped. */ ohci_ctl.s.l2c_desc_emod = 1; /* Byte swapped. */ #else ohci_ctl.s.l2c_buff_emod = 0; /* not swapped. */ ohci_ctl.s.l2c_desc_emod = 0; /* not swapped. */ ohci_ctl.s.inv_reg_a2 = 1; #endif cvmx_write_csr(CVMX_UCTLX_OHCI_CTL(0), ohci_ctl.u64); octeon2_usb_clocks_stop(); } static int __init octeon_ohci_device_init(void) { struct platform_device *pd; struct device_node *ohci_node; int ret = 0; ohci_node = of_find_node_by_name(NULL, "ohci"); if (!ohci_node) return 0; pd = of_find_device_by_node(ohci_node); if (!pd) return 0; pd->dev.platform_data = &octeon_ohci_pdata; octeon_ohci_hw_start(&pd->dev); return ret; } device_initcall(octeon_ohci_device_init); #endif /* CONFIG_USB */ static struct of_device_id __initdata octeon_ids[] = { { .compatible = "simple-bus", }, { .compatible = "cavium,octeon-6335-uctl", }, { .compatible = "cavium,octeon-5750-usbn", }, { .compatible = "cavium,octeon-3860-bootbus", }, { .compatible = "cavium,mdio-mux", }, { .compatible = "gpio-leds", }, {}, }; static bool __init octeon_has_88e1145(void) { return !OCTEON_IS_MODEL(OCTEON_CN52XX) && !OCTEON_IS_MODEL(OCTEON_CN6XXX) && !OCTEON_IS_MODEL(OCTEON_CN56XX); } static void __init octeon_fdt_set_phy(int eth, int phy_addr) { const __be32 *phy_handle; const __be32 *alt_phy_handle; const __be32 *reg; u32 phandle; int phy; int alt_phy; const char *p; int current_len; char new_name[20]; phy_handle = fdt_getprop(initial_boot_params, eth, "phy-handle", NULL); if (!phy_handle) return; phandle = be32_to_cpup(phy_handle); phy = fdt_node_offset_by_phandle(initial_boot_params, phandle); alt_phy_handle = fdt_getprop(initial_boot_params, eth, "cavium,alt-phy-handle", NULL); if (alt_phy_handle) { u32 alt_phandle = be32_to_cpup(alt_phy_handle); alt_phy = fdt_node_offset_by_phandle(initial_boot_params, alt_phandle); } else { alt_phy = -1; } if (phy_addr < 0 || phy < 0) { /* Delete the PHY things */ fdt_nop_property(initial_boot_params, eth, "phy-handle"); /* This one may fail */ fdt_nop_property(initial_boot_params, eth, "cavium,alt-phy-handle"); if (phy >= 0) fdt_nop_node(initial_boot_params, phy); if (alt_phy >= 0) fdt_nop_node(initial_boot_params, alt_phy); return; } if (phy_addr >= 256 && alt_phy > 0) { const struct fdt_property *phy_prop; struct fdt_property *alt_prop; u32 phy_handle_name; /* Use the alt phy node instead.*/ phy_prop = fdt_get_property(initial_boot_params, eth, "phy-handle", NULL); phy_handle_name = phy_prop->nameoff; fdt_nop_node(initial_boot_params, phy); fdt_nop_property(initial_boot_params, eth, "phy-handle"); alt_prop = fdt_get_property_w(initial_boot_params, eth, "cavium,alt-phy-handle", NULL); alt_prop->nameoff = phy_handle_name; phy = alt_phy; } phy_addr &= 0xff; if (octeon_has_88e1145()) { fdt_nop_property(initial_boot_params, phy, "marvell,reg-init"); memset(new_name, 0, sizeof(new_name)); strcpy(new_name, "marvell,88e1145"); p = fdt_getprop(initial_boot_params, phy, "compatible", &current_len); if (p && current_len >= strlen(new_name)) fdt_setprop_inplace(initial_boot_params, phy, "compatible", new_name, current_len); } reg = fdt_getprop(initial_boot_params, phy, "reg", NULL); if (phy_addr == be32_to_cpup(reg)) return; fdt_setprop_inplace_cell(initial_boot_params, phy, "reg", phy_addr); snprintf(new_name, sizeof(new_name), "ethernet-phy@%x", phy_addr); p = fdt_get_name(initial_boot_params, phy, &current_len); if (p && current_len == strlen(new_name)) fdt_set_name(initial_boot_params, phy, new_name); else pr_err("Error: could not rename ethernet phy: <%s>", p); } static void __init octeon_fdt_set_mac_addr(int n, u64 *pmac) { const u8 *old_mac; int old_len; u8 new_mac[6]; u64 mac = *pmac; int r; old_mac = fdt_getprop(initial_boot_params, n, "local-mac-address", &old_len); if (!old_mac || old_len != 6 || is_valid_ether_addr(old_mac)) return; new_mac[0] = (mac >> 40) & 0xff; new_mac[1] = (mac >> 32) & 0xff; new_mac[2] = (mac >> 24) & 0xff; new_mac[3] = (mac >> 16) & 0xff; new_mac[4] = (mac >> 8) & 0xff; new_mac[5] = mac & 0xff; r = fdt_setprop_inplace(initial_boot_params, n, "local-mac-address", new_mac, sizeof(new_mac)); if (r) { pr_err("Setting \"local-mac-address\" failed %d", r); return; } *pmac = mac + 1; } static void __init octeon_fdt_rm_ethernet(int node) { const __be32 *phy_handle; phy_handle = fdt_getprop(initial_boot_params, node, "phy-handle", NULL); if (phy_handle) { u32 ph = be32_to_cpup(phy_handle); int p = fdt_node_offset_by_phandle(initial_boot_params, ph); if (p >= 0) fdt_nop_node(initial_boot_params, p); } fdt_nop_node(initial_boot_params, node); } static void __init octeon_fdt_pip_port(int iface, int i, int p, int max) { char name_buffer[20]; int eth; int phy_addr; int ipd_port; snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p); eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer); if (eth < 0) return; if (p > max) { pr_debug("Deleting port %x:%x\n", i, p); octeon_fdt_rm_ethernet(eth); return; } if (OCTEON_IS_MODEL(OCTEON_CN68XX)) ipd_port = (0x100 * i) + (0x10 * p) + 0x800; else ipd_port = 16 * i + p; phy_addr = cvmx_helper_board_get_mii_address(ipd_port); octeon_fdt_set_phy(eth, phy_addr); } static void __init octeon_fdt_pip_iface(int pip, int idx) { char name_buffer[20]; int iface; int p; int count = 0; snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx); iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer); if (iface < 0) return; if (cvmx_helper_interface_enumerate(idx) == 0) count = cvmx_helper_ports_on_interface(idx); for (p = 0; p < 16; p++) octeon_fdt_pip_port(iface, idx, p, count - 1); } void __init octeon_fill_mac_addresses(void) { const char *alias_prop; char name_buffer[20]; u64 mac_addr_base; int aliases; int pip; int i; aliases = fdt_path_offset(initial_boot_params, "/aliases"); if (aliases < 0) return; mac_addr_base = ((octeon_bootinfo->mac_addr_base[0] & 0xffull)) << 40 | ((octeon_bootinfo->mac_addr_base[1] & 0xffull)) << 32 | ((octeon_bootinfo->mac_addr_base[2] & 0xffull)) << 24 | ((octeon_bootinfo->mac_addr_base[3] & 0xffull)) << 16 | ((octeon_bootinfo->mac_addr_base[4] & 0xffull)) << 8 | (octeon_bootinfo->mac_addr_base[5] & 0xffull); for (i = 0; i < 2; i++) { int mgmt; snprintf(name_buffer, sizeof(name_buffer), "mix%d", i); alias_prop = fdt_getprop(initial_boot_params, aliases, name_buffer, NULL); if (!alias_prop) continue; mgmt = fdt_path_offset(initial_boot_params, alias_prop); if (mgmt < 0) continue; octeon_fdt_set_mac_addr(mgmt, &mac_addr_base); } alias_prop = fdt_getprop(initial_boot_params, aliases, "pip", NULL); if (!alias_prop) return; pip = fdt_path_offset(initial_boot_params, alias_prop); if (pip < 0) return; for (i = 0; i <= 4; i++) { int iface; int p; snprintf(name_buffer, sizeof(name_buffer), "interface@%d", i); iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer); if (iface < 0) continue; for (p = 0; p < 16; p++) { int eth; snprintf(name_buffer, sizeof(name_buffer), "ethernet@%x", p); eth = fdt_subnode_offset(initial_boot_params, iface, name_buffer); if (eth < 0) continue; octeon_fdt_set_mac_addr(eth, &mac_addr_base); } } } int __init octeon_prune_device_tree(void) { int i, max_port, uart_mask; const char *pip_path; const char *alias_prop; char name_buffer[20]; int aliases; if (fdt_check_header(initial_boot_params)) panic("Corrupt Device Tree."); aliases = fdt_path_offset(initial_boot_params, "/aliases"); if (aliases < 0) { pr_err("Error: No /aliases node in device tree."); return -EINVAL; } if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX)) max_port = 2; else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN68XX)) max_port = 1; else max_port = 0; if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E) max_port = 0; for (i = 0; i < 2; i++) { int mgmt; snprintf(name_buffer, sizeof(name_buffer), "mix%d", i); alias_prop = fdt_getprop(initial_boot_params, aliases, name_buffer, NULL); if (alias_prop) { mgmt = fdt_path_offset(initial_boot_params, alias_prop); if (mgmt < 0) continue; if (i >= max_port) { pr_debug("Deleting mix%d\n", i); octeon_fdt_rm_ethernet(mgmt); fdt_nop_property(initial_boot_params, aliases, name_buffer); } else { int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i); octeon_fdt_set_phy(mgmt, phy_addr); } } } pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL); if (pip_path) { int pip = fdt_path_offset(initial_boot_params, pip_path); if (pip >= 0) for (i = 0; i <= 4; i++) octeon_fdt_pip_iface(pip, i); } /* I2C */ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) max_port = 2; else max_port = 1; for (i = 0; i < 2; i++) { int i2c; snprintf(name_buffer, sizeof(name_buffer), "twsi%d", i); alias_prop = fdt_getprop(initial_boot_params, aliases, name_buffer, NULL); if (alias_prop) { i2c = fdt_path_offset(initial_boot_params, alias_prop); if (i2c < 0) continue; if (i >= max_port) { pr_debug("Deleting twsi%d\n", i); fdt_nop_node(initial_boot_params, i2c); fdt_nop_property(initial_boot_params, aliases, name_buffer); } } } /* SMI/MDIO */ if (OCTEON_IS_MODEL(OCTEON_CN68XX)) max_port = 4; else if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) max_port = 2; else max_port = 1; for (i = 0; i < 2; i++) { int i2c; snprintf(name_buffer, sizeof(name_buffer), "smi%d", i); alias_prop = fdt_getprop(initial_boot_params, aliases, name_buffer, NULL); if (alias_prop) { i2c = fdt_path_offset(initial_boot_params, alias_prop); if (i2c < 0) continue; if (i >= max_port) { pr_debug("Deleting smi%d\n", i); fdt_nop_node(initial_boot_params, i2c); fdt_nop_property(initial_boot_params, aliases, name_buffer); } } } /* Serial */ uart_mask = 3; /* Right now CN52XX is the only chip with a third uart */ if (OCTEON_IS_MODEL(OCTEON_CN52XX)) uart_mask |= 4; /* uart2 */ for (i = 0; i < 3; i++) { int uart; snprintf(name_buffer, sizeof(name_buffer), "uart%d", i); alias_prop = fdt_getprop(initial_boot_params, aliases, name_buffer, NULL); if (alias_prop) { uart = fdt_path_offset(initial_boot_params, alias_prop); if (uart_mask & (1 << i)) { __be32 f; f = cpu_to_be32(octeon_get_io_clock_rate()); fdt_setprop_inplace(initial_boot_params, uart, "clock-frequency", &f, sizeof(f)); continue; } pr_debug("Deleting uart%d\n", i); fdt_nop_node(initial_boot_params, uart); fdt_nop_property(initial_boot_params, aliases, name_buffer); } } /* Compact Flash */ alias_prop = fdt_getprop(initial_boot_params, aliases, "cf0", NULL); if (alias_prop) { union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg; unsigned long base_ptr, region_base, region_size; unsigned long region1_base = 0; unsigned long region1_size = 0; int cs, bootbus; bool is_16bit = false; bool is_true_ide = false; __be32 new_reg[6]; __be32 *ranges; int len; int cf = fdt_path_offset(initial_boot_params, alias_prop); base_ptr = 0; if (octeon_bootinfo->major_version == 1 && octeon_bootinfo->minor_version >= 1) { if (octeon_bootinfo->compact_flash_common_base_addr) base_ptr = octeon_bootinfo->compact_flash_common_base_addr; } else { base_ptr = 0x1d000800; } if (!base_ptr) goto no_cf; /* Find CS0 region. */ for (cs = 0; cs < 8; cs++) { mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); region_base = mio_boot_reg_cfg.s.base << 16; region_size = (mio_boot_reg_cfg.s.size + 1) << 16; if (mio_boot_reg_cfg.s.en && base_ptr >= region_base && base_ptr < region_base + region_size) { is_16bit = mio_boot_reg_cfg.s.width; break; } } if (cs >= 7) { /* cs and cs + 1 are CS0 and CS1, both must be less than 8. */ goto no_cf; } if (!(base_ptr & 0xfffful)) { /* * Boot loader signals availability of DMA (true_ide * mode) by setting low order bits of base_ptr to * zero. */ /* Asume that CS1 immediately follows. */ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs + 1)); region1_base = mio_boot_reg_cfg.s.base << 16; region1_size = (mio_boot_reg_cfg.s.size + 1) << 16; if (!mio_boot_reg_cfg.s.en) goto no_cf; is_true_ide = true; } else { fdt_nop_property(initial_boot_params, cf, "cavium,true-ide"); fdt_nop_property(initial_boot_params, cf, "cavium,dma-engine-handle"); if (!is_16bit) { __be32 width = cpu_to_be32(8); fdt_setprop_inplace(initial_boot_params, cf, "cavium,bus-width", &width, sizeof(width)); } } new_reg[0] = cpu_to_be32(cs); new_reg[1] = cpu_to_be32(0); new_reg[2] = cpu_to_be32(0x10000); new_reg[3] = cpu_to_be32(cs + 1); new_reg[4] = cpu_to_be32(0); new_reg[5] = cpu_to_be32(0x10000); fdt_setprop_inplace(initial_boot_params, cf, "reg", new_reg, sizeof(new_reg)); bootbus = fdt_parent_offset(initial_boot_params, cf); if (bootbus < 0) goto no_cf; ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len); if (!ranges || len < (5 * 8 * sizeof(__be32))) goto no_cf; ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32); ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff); ranges[(cs * 5) + 4] = cpu_to_be32(region_size); if (is_true_ide) { cs++; ranges[(cs * 5) + 2] = cpu_to_be32(region1_base >> 32); ranges[(cs * 5) + 3] = cpu_to_be32(region1_base & 0xffffffff); ranges[(cs * 5) + 4] = cpu_to_be32(region1_size); } goto end_cf; no_cf: fdt_nop_node(initial_boot_params, cf); end_cf: ; } /* 8 char LED */ alias_prop = fdt_getprop(initial_boot_params, aliases, "led0", NULL); if (alias_prop) { union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg; unsigned long base_ptr, region_base, region_size; int cs, bootbus; __be32 new_reg[6]; __be32 *ranges; int len; int led = fdt_path_offset(initial_boot_params, alias_prop); base_ptr = octeon_bootinfo->led_display_base_addr; if (base_ptr == 0) goto no_led; /* Find CS0 region. */ for (cs = 0; cs < 8; cs++) { mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs)); region_base = mio_boot_reg_cfg.s.base << 16; region_size = (mio_boot_reg_cfg.s.size + 1) << 16; if (mio_boot_reg_cfg.s.en && base_ptr >= region_base && base_ptr < region_base + region_size) break; } if (cs > 7) goto no_led; new_reg[0] = cpu_to_be32(cs); new_reg[1] = cpu_to_be32(0x20); new_reg[2] = cpu_to_be32(0x20); new_reg[3] = cpu_to_be32(cs); new_reg[4] = cpu_to_be32(0); new_reg[5] = cpu_to_be32(0x20); fdt_setprop_inplace(initial_boot_params, led, "reg", new_reg, sizeof(new_reg)); bootbus = fdt_parent_offset(initial_boot_params, led); if (bootbus < 0) goto no_led; ranges = fdt_getprop_w(initial_boot_params, bootbus, "ranges", &len); if (!ranges || len < (5 * 8 * sizeof(__be32))) goto no_led; ranges[(cs * 5) + 2] = cpu_to_be32(region_base >> 32); ranges[(cs * 5) + 3] = cpu_to_be32(region_base & 0xffffffff); ranges[(cs * 5) + 4] = cpu_to_be32(region_size); goto end_led; no_led: fdt_nop_node(initial_boot_params, led); end_led: ; } /* OHCI/UHCI USB */ alias_prop = fdt_getprop(initial_boot_params, aliases, "uctl", NULL); if (alias_prop) { int uctl = fdt_path_offset(initial_boot_params, alias_prop); if (uctl >= 0 && (!OCTEON_IS_MODEL(OCTEON_CN6XXX) || octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC2E)) { pr_debug("Deleting uctl\n"); fdt_nop_node(initial_boot_params, uctl); fdt_nop_property(initial_boot_params, aliases, "uctl"); } else if (octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC10E || octeon_bootinfo->board_type == CVMX_BOARD_TYPE_NIC4E) { /* Missing "refclk-type" defaults to crystal. */ fdt_nop_property(initial_boot_params, uctl, "refclk-type"); } } /* DWC2 USB */ alias_prop = fdt_getprop(initial_boot_params, aliases, "usbn", NULL); if (alias_prop) { int usbn = fdt_path_offset(initial_boot_params, alias_prop); if (usbn >= 0 && (current_cpu_type() == CPU_CAVIUM_OCTEON2 || !octeon_has_feature(OCTEON_FEATURE_USB))) { pr_debug("Deleting usbn\n"); fdt_nop_node(initial_boot_params, usbn); fdt_nop_property(initial_boot_params, aliases, "usbn"); } else { __be32 new_f[1]; enum cvmx_helper_board_usb_clock_types c; c = __cvmx_helper_board_usb_get_clock_type(); switch (c) { case USB_CLOCK_TYPE_REF_48: new_f[0] = cpu_to_be32(48000000); fdt_setprop_inplace(initial_boot_params, usbn, "refclk-frequency", new_f, sizeof(new_f)); /* Fall through ...*/ case USB_CLOCK_TYPE_REF_12: /* Missing "refclk-type" defaults to external. */ fdt_nop_property(initial_boot_params, usbn, "refclk-type"); break; default: break; } } } if (octeon_bootinfo->board_type != CVMX_BOARD_TYPE_CUST_DSR1000N) { int dsr1000n_leds = fdt_path_offset(initial_boot_params, "/dsr1000n-leds"); if (dsr1000n_leds >= 0) fdt_nop_node(initial_boot_params, dsr1000n_leds); } return 0; } static int __init octeon_publish_devices(void) { return of_platform_bus_probe(NULL, octeon_ids, NULL); } device_initcall(octeon_publish_devices); MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Platform driver for Octeon SOC");
gpl-2.0
hackeran/linux-netmap
fs/eventpoll.c
49
54696
/* * fs/eventpoll.c (Efficient event retrieval implementation) * Copyright (C) 2001,...,2009 Davide Libenzi * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Davide Libenzi <davidel@xmailserver.org> * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/string.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/spinlock.h> #include <linux/syscalls.h> #include <linux/rbtree.h> #include <linux/wait.h> #include <linux/eventpoll.h> #include <linux/mount.h> #include <linux/bitops.h> #include <linux/mutex.h> #include <linux/anon_inodes.h> #include <linux/device.h> #include <asm/uaccess.h> #include <asm/io.h> #include <asm/mman.h> #include <linux/atomic.h> /* * LOCKING: * There are three level of locking required by epoll : * * 1) epmutex (mutex) * 2) ep->mtx (mutex) * 3) ep->lock (spinlock) * * The acquire order is the one listed above, from 1 to 3. * We need a spinlock (ep->lock) because we manipulate objects * from inside the poll callback, that might be triggered from * a wake_up() that in turn might be called from IRQ context. * So we can't sleep inside the poll callback and hence we need * a spinlock. During the event transfer loop (from kernel to * user space) we could end up sleeping due a copy_to_user(), so * we need a lock that will allow us to sleep. This lock is a * mutex (ep->mtx). It is acquired during the event transfer loop, * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file(). * Then we also need a global mutex to serialize eventpoll_release_file() * and ep_free(). * This mutex is acquired by ep_free() during the epoll file * cleanup path and it is also acquired by eventpoll_release_file() * if a file has been pushed inside an epoll set and it is then * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL). * It is also acquired when inserting an epoll fd onto another epoll * fd. We do this so that we walk the epoll tree and ensure that this * insertion does not create a cycle of epoll file descriptors, which * could lead to deadlock. We need a global mutex to prevent two * simultaneous inserts (A into B and B into A) from racing and * constructing a cycle without either insert observing that it is * going to. * It is necessary to acquire multiple "ep->mtx"es at once in the * case when one epoll fd is added to another. In this case, we * always acquire the locks in the order of nesting (i.e. after * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired * before e2->mtx). Since we disallow cycles of epoll file * descriptors, this ensures that the mutexes are well-ordered. In * order to communicate this nesting to lockdep, when walking a tree * of epoll file descriptors, we use the current recursion depth as * the lockdep subkey. * It is possible to drop the "ep->mtx" and to use the global * mutex "epmutex" (together with "ep->lock") to have it working, * but having "ep->mtx" will make the interface more scalable. * Events that require holding "epmutex" are very rare, while for * normal operations the epoll private "ep->mtx" will guarantee * a better scalability. */ /* Epoll private bits inside the event mask */ #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET) /* Maximum number of nesting allowed inside epoll sets */ #define EP_MAX_NESTS 4 #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) #define EP_UNACTIVE_PTR ((void *) -1L) #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry)) struct epoll_filefd { struct file *file; int fd; }; /* * Structure used to track possible nested calls, for too deep recursions * and loop cycles. */ struct nested_call_node { struct list_head llink; void *cookie; void *ctx; }; /* * This structure is used as collector for nested calls, to check for * maximum recursion dept and loop cycles. */ struct nested_calls { struct list_head tasks_call_list; spinlock_t lock; }; /* * Each file descriptor added to the eventpoll interface will * have an entry of this type linked to the "rbr" RB tree. */ struct epitem { /* RB tree node used to link this structure to the eventpoll RB tree */ struct rb_node rbn; /* List header used to link this structure to the eventpoll ready list */ struct list_head rdllink; /* * Works together "struct eventpoll"->ovflist in keeping the * single linked chain of items. */ struct epitem *next; /* The file descriptor information this item refers to */ struct epoll_filefd ffd; /* Number of active wait queue attached to poll operations */ int nwait; /* List containing poll wait queues */ struct list_head pwqlist; /* The "container" of this item */ struct eventpoll *ep; /* List header used to link this item to the "struct file" items list */ struct list_head fllink; /* wakeup_source used when EPOLLWAKEUP is set */ struct wakeup_source *ws; /* The structure that describe the interested events and the source fd */ struct epoll_event event; }; /* * This structure is stored inside the "private_data" member of the file * structure and represents the main data structure for the eventpoll * interface. */ struct eventpoll { /* Protect the access to this structure */ spinlock_t lock; /* * This mutex is used to ensure that files are not removed * while epoll is using them. This is held during the event * collection loop, the file cleanup path, the epoll file exit * code and the ctl operations. */ struct mutex mtx; /* Wait queue used by sys_epoll_wait() */ wait_queue_head_t wq; /* Wait queue used by file->poll() */ wait_queue_head_t poll_wait; /* List of ready file descriptors */ struct list_head rdllist; /* RB tree root used to store monitored fd structs */ struct rb_root rbr; /* * This is a single linked list that chains all the "struct epitem" that * happened while transferring ready events to userspace w/out * holding ->lock. */ struct epitem *ovflist; /* wakeup_source used when ep_scan_ready_list is running */ struct wakeup_source *ws; /* The user that created the eventpoll descriptor */ struct user_struct *user; struct file *file; /* used to optimize loop detection check */ int visited; struct list_head visited_list_link; }; /* Wait structure used by the poll hooks */ struct eppoll_entry { /* List header used to link this structure to the "struct epitem" */ struct list_head llink; /* The "base" pointer is set to the container "struct epitem" */ struct epitem *base; /* * Wait queue item that will be linked to the target file wait * queue head. */ wait_queue_t wait; /* The wait queue head that linked the "wait" wait queue item */ wait_queue_head_t *whead; }; /* Wrapper struct used by poll queueing */ struct ep_pqueue { poll_table pt; struct epitem *epi; }; /* Used by the ep_send_events() function as callback private data */ struct ep_send_events_data { int maxevents; struct epoll_event __user *events; }; /* * Configuration options available inside /proc/sys/fs/epoll/ */ /* Maximum number of epoll watched descriptors, per user */ static long max_user_watches __read_mostly; /* * This mutex is used to serialize ep_free() and eventpoll_release_file(). */ static DEFINE_MUTEX(epmutex); /* Used to check for epoll file descriptor inclusion loops */ static struct nested_calls poll_loop_ncalls; /* Used for safe wake up implementation */ static struct nested_calls poll_safewake_ncalls; /* Used to call file's f_op->poll() under the nested calls boundaries */ static struct nested_calls poll_readywalk_ncalls; /* Slab cache used to allocate "struct epitem" */ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; /* Visited nodes during ep_loop_check(), so we can unset them when we finish */ static LIST_HEAD(visited_list); /* * List of files with newly added links, where we may need to limit the number * of emanating paths. Protected by the epmutex. */ static LIST_HEAD(tfile_check_list); #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> static long zero; static long long_max = LONG_MAX; ctl_table epoll_table[] = { { .procname = "max_user_watches", .data = &max_user_watches, .maxlen = sizeof(max_user_watches), .mode = 0644, .proc_handler = proc_doulongvec_minmax, .extra1 = &zero, .extra2 = &long_max, }, { } }; #endif /* CONFIG_SYSCTL */ static const struct file_operations eventpoll_fops; static inline int is_file_epoll(struct file *f) { return f->f_op == &eventpoll_fops; } /* Setup the structure that is used as key for the RB tree */ static inline void ep_set_ffd(struct epoll_filefd *ffd, struct file *file, int fd) { ffd->file = file; ffd->fd = fd; } /* Compare RB tree keys */ static inline int ep_cmp_ffd(struct epoll_filefd *p1, struct epoll_filefd *p2) { return (p1->file > p2->file ? +1: (p1->file < p2->file ? -1 : p1->fd - p2->fd)); } /* Tells us if the item is currently linked */ static inline int ep_is_linked(struct list_head *p) { return !list_empty(p); } static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p) { return container_of(p, struct eppoll_entry, wait); } /* Get the "struct epitem" from a wait queue pointer */ static inline struct epitem *ep_item_from_wait(wait_queue_t *p) { return container_of(p, struct eppoll_entry, wait)->base; } /* Get the "struct epitem" from an epoll queue wrapper */ static inline struct epitem *ep_item_from_epqueue(poll_table *p) { return container_of(p, struct ep_pqueue, pt)->epi; } /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ static inline int ep_op_has_event(int op) { return op != EPOLL_CTL_DEL; } /* Initialize the poll safe wake up structure */ static void ep_nested_calls_init(struct nested_calls *ncalls) { INIT_LIST_HEAD(&ncalls->tasks_call_list); spin_lock_init(&ncalls->lock); } /** * ep_events_available - Checks if ready events might be available. * * @ep: Pointer to the eventpoll context. * * Returns: Returns a value different than zero if ready events are available, * or zero otherwise. */ static inline int ep_events_available(struct eventpoll *ep) { return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; } /** * ep_call_nested - Perform a bound (possibly) nested call, by checking * that the recursion limit is not exceeded, and that * the same nested call (by the meaning of same cookie) is * no re-entered. * * @ncalls: Pointer to the nested_calls structure to be used for this call. * @max_nests: Maximum number of allowed nesting calls. * @nproc: Nested call core function pointer. * @priv: Opaque data to be passed to the @nproc callback. * @cookie: Cookie to be used to identify this nested call. * @ctx: This instance context. * * Returns: Returns the code returned by the @nproc callback, or -1 if * the maximum recursion limit has been exceeded. */ static int ep_call_nested(struct nested_calls *ncalls, int max_nests, int (*nproc)(void *, void *, int), void *priv, void *cookie, void *ctx) { int error, call_nests = 0; unsigned long flags; struct list_head *lsthead = &ncalls->tasks_call_list; struct nested_call_node *tncur; struct nested_call_node tnode; spin_lock_irqsave(&ncalls->lock, flags); /* * Try to see if the current task is already inside this wakeup call. * We use a list here, since the population inside this set is always * very much limited. */ list_for_each_entry(tncur, lsthead, llink) { if (tncur->ctx == ctx && (tncur->cookie == cookie || ++call_nests > max_nests)) { /* * Ops ... loop detected or maximum nest level reached. * We abort this wake by breaking the cycle itself. */ error = -1; goto out_unlock; } } /* Add the current task and cookie to the list */ tnode.ctx = ctx; tnode.cookie = cookie; list_add(&tnode.llink, lsthead); spin_unlock_irqrestore(&ncalls->lock, flags); /* Call the nested function */ error = (*nproc)(priv, cookie, call_nests); /* Remove the current task from the list */ spin_lock_irqsave(&ncalls->lock, flags); list_del(&tnode.llink); out_unlock: spin_unlock_irqrestore(&ncalls->lock, flags); return error; } /* * As described in commit 0ccf831cb lockdep: annotate epoll * the use of wait queues used by epoll is done in a very controlled * manner. Wake ups can nest inside each other, but are never done * with the same locking. For example: * * dfd = socket(...); * efd1 = epoll_create(); * efd2 = epoll_create(); * epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...); * epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...); * * When a packet arrives to the device underneath "dfd", the net code will * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a * callback wakeup entry on that queue, and the wake_up() performed by the * "dfd" net code will end up in ep_poll_callback(). At this point epoll * (efd1) notices that it may have some event ready, so it needs to wake up * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake() * that ends up in another wake_up(), after having checked about the * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to * avoid stack blasting. * * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle * this special case of epoll. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) { unsigned long flags; spin_lock_irqsave_nested(&wqueue->lock, flags, subclass); wake_up_locked_poll(wqueue, events); spin_unlock_irqrestore(&wqueue->lock, flags); } #else static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, unsigned long events, int subclass) { wake_up_poll(wqueue, events); } #endif static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) { ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN, 1 + call_nests); return 0; } /* * Perform a safe wake up of the poll wait list. The problem is that * with the new callback'd wake up system, it is possible that the * poll callback is reentered from inside the call to wake_up() done * on the poll wait queue head. The rule is that we cannot reenter the * wake up code from the same task more than EP_MAX_NESTS times, * and we cannot reenter the same wait queue head at all. This will * enable to have a hierarchy of epoll file descriptor of no more than * EP_MAX_NESTS deep. */ static void ep_poll_safewake(wait_queue_head_t *wq) { int this_cpu = get_cpu(); ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); put_cpu(); } static void ep_remove_wait_queue(struct eppoll_entry *pwq) { wait_queue_head_t *whead; rcu_read_lock(); /* If it is cleared by POLLFREE, it should be rcu-safe */ whead = rcu_dereference(pwq->whead); if (whead) remove_wait_queue(whead, &pwq->wait); rcu_read_unlock(); } /* * This function unregisters poll callbacks from the associated file * descriptor. Must be called with "mtx" held (or "epmutex" if called from * ep_free). */ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) { struct list_head *lsthead = &epi->pwqlist; struct eppoll_entry *pwq; while (!list_empty(lsthead)) { pwq = list_first_entry(lsthead, struct eppoll_entry, llink); list_del(&pwq->llink); ep_remove_wait_queue(pwq); kmem_cache_free(pwq_cache, pwq); } } /** * ep_scan_ready_list - Scans the ready list in a way that makes possible for * the scan code, to call f_op->poll(). Also allows for * O(NumReady) performance. * * @ep: Pointer to the epoll private data structure. * @sproc: Pointer to the scan callback. * @priv: Private opaque data passed to the @sproc callback. * @depth: The current depth of recursive f_op->poll calls. * * Returns: The same integer error code returned by the @sproc callback. */ static int ep_scan_ready_list(struct eventpoll *ep, int (*sproc)(struct eventpoll *, struct list_head *, void *), void *priv, int depth) { int error, pwake = 0; unsigned long flags; struct epitem *epi, *nepi; LIST_HEAD(txlist); /* * We need to lock this because we could be hit by * eventpoll_release_file() and epoll_ctl(). */ mutex_lock_nested(&ep->mtx, depth); /* * Steal the ready list, and re-init the original one to the * empty list. Also, set ep->ovflist to NULL so that events * happening while looping w/out locks, are not lost. We cannot * have the poll callback to queue directly on ep->rdllist, * because we want the "sproc" callback to be able to do it * in a lockless way. */ spin_lock_irqsave(&ep->lock, flags); list_splice_init(&ep->rdllist, &txlist); ep->ovflist = NULL; spin_unlock_irqrestore(&ep->lock, flags); /* * Now call the callback function. */ error = (*sproc)(ep, &txlist, priv); spin_lock_irqsave(&ep->lock, flags); /* * During the time we spent inside the "sproc" callback, some * other events might have been queued by the poll callback. * We re-insert them inside the main ready-list here. */ for (nepi = ep->ovflist; (epi = nepi) != NULL; nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { /* * We need to check if the item is already in the list. * During the "sproc" callback execution time, items are * queued into ->ovflist but the "txlist" might already * contain them, and the list_splice() below takes care of them. */ if (!ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); } } /* * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after * releasing the lock, events will be queued in the normal way inside * ep->rdllist. */ ep->ovflist = EP_UNACTIVE_PTR; /* * Quickly re-inject items left on "txlist". */ list_splice(&txlist, &ep->rdllist); __pm_relax(ep->ws); if (!list_empty(&ep->rdllist)) { /* * Wake up (if active) both the eventpoll wait list and * the ->poll() wait list (delayed after we release the lock). */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irqrestore(&ep->lock, flags); mutex_unlock(&ep->mtx); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return error; } /* * Removes a "struct epitem" from the eventpoll RB tree and deallocates * all the associated resources. Must be called with "mtx" held. */ static int ep_remove(struct eventpoll *ep, struct epitem *epi) { unsigned long flags; struct file *file = epi->ffd.file; /* * Removes poll wait queue hooks. We _have_ to do this without holding * the "ep->lock" otherwise a deadlock might occur. This because of the * sequence of the lock acquisition. Here we do "ep->lock" then the wait * queue head lock when unregistering the wait queue. The wakeup callback * will run by holding the wait queue head lock and will call our callback * that will try to get "ep->lock". */ ep_unregister_pollwait(ep, epi); /* Remove the current item from the list of epoll hooks */ spin_lock(&file->f_lock); if (ep_is_linked(&epi->fllink)) list_del_init(&epi->fllink); spin_unlock(&file->f_lock); rb_erase(&epi->rbn, &ep->rbr); spin_lock_irqsave(&ep->lock, flags); if (ep_is_linked(&epi->rdllink)) list_del_init(&epi->rdllink); spin_unlock_irqrestore(&ep->lock, flags); wakeup_source_unregister(epi->ws); /* At this point it is safe to free the eventpoll item */ kmem_cache_free(epi_cache, epi); atomic_long_dec(&ep->user->epoll_watches); return 0; } static void ep_free(struct eventpoll *ep) { struct rb_node *rbp; struct epitem *epi; /* We need to release all tasks waiting for these file */ if (waitqueue_active(&ep->poll_wait)) ep_poll_safewake(&ep->poll_wait); /* * We need to lock this because we could be hit by * eventpoll_release_file() while we're freeing the "struct eventpoll". * We do not need to hold "ep->mtx" here because the epoll file * is on the way to be removed and no one has references to it * anymore. The only hit might come from eventpoll_release_file() but * holding "epmutex" is sufficient here. */ mutex_lock(&epmutex); /* * Walks through the whole tree by unregistering poll callbacks. */ for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); ep_unregister_pollwait(ep, epi); } /* * Walks through the whole tree by freeing each "struct epitem". At this * point we are sure no poll callbacks will be lingering around, and also by * holding "epmutex" we can be sure that no file cleanup code will hit * us during this operation. So we can avoid the lock on "ep->lock". */ while ((rbp = rb_first(&ep->rbr)) != NULL) { epi = rb_entry(rbp, struct epitem, rbn); ep_remove(ep, epi); } mutex_unlock(&epmutex); mutex_destroy(&ep->mtx); free_uid(ep->user); wakeup_source_unregister(ep->ws); kfree(ep); } static int ep_eventpoll_release(struct inode *inode, struct file *file) { struct eventpoll *ep = file->private_data; if (ep) ep_free(ep); return 0; } static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) { struct epitem *epi, *tmp; poll_table pt; init_poll_funcptr(&pt, NULL); list_for_each_entry_safe(epi, tmp, head, rdllink) { pt._key = epi->event.events; if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) & epi->event.events) return POLLIN | POLLRDNORM; else { /* * Item has been dropped into the ready list by the poll * callback, but it's not actually ready, as far as * caller requested events goes. We can remove it here. */ __pm_relax(epi->ws); list_del_init(&epi->rdllink); } } return 0; } static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests) { return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1); } static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) { int pollflags; struct eventpoll *ep = file->private_data; /* Insert inside our poll wait queue */ poll_wait(file, &ep->poll_wait, wait); /* * Proceed to find out if wanted events are really available inside * the ready list. This need to be done under ep_call_nested() * supervision, since the call to f_op->poll() done on listed files * could re-enter here. */ pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS, ep_poll_readyevents_proc, ep, ep, current); return pollflags != -1 ? pollflags : 0; } /* File callbacks that implement the eventpoll file behaviour */ static const struct file_operations eventpoll_fops = { .release = ep_eventpoll_release, .poll = ep_eventpoll_poll, .llseek = noop_llseek, }; /* * This is called from eventpoll_release() to unlink files from the eventpoll * interface. We need to have this facility to cleanup correctly files that are * closed without being removed from the eventpoll interface. */ void eventpoll_release_file(struct file *file) { struct list_head *lsthead = &file->f_ep_links; struct eventpoll *ep; struct epitem *epi; /* * We don't want to get "file->f_lock" because it is not * necessary. It is not necessary because we're in the "struct file" * cleanup path, and this means that no one is using this file anymore. * So, for example, epoll_ctl() cannot hit here since if we reach this * point, the file counter already went to zero and fget() would fail. * The only hit might come from ep_free() but by holding the mutex * will correctly serialize the operation. We do need to acquire * "ep->mtx" after "epmutex" because ep_remove() requires it when called * from anywhere but ep_free(). * * Besides, ep_remove() acquires the lock, so we can't hold it here. */ mutex_lock(&epmutex); while (!list_empty(lsthead)) { epi = list_first_entry(lsthead, struct epitem, fllink); ep = epi->ep; list_del_init(&epi->fllink); mutex_lock_nested(&ep->mtx, 0); ep_remove(ep, epi); mutex_unlock(&ep->mtx); } mutex_unlock(&epmutex); } static int ep_alloc(struct eventpoll **pep) { int error; struct user_struct *user; struct eventpoll *ep; user = get_current_user(); error = -ENOMEM; ep = kzalloc(sizeof(*ep), GFP_KERNEL); if (unlikely(!ep)) goto free_uid; spin_lock_init(&ep->lock); mutex_init(&ep->mtx); init_waitqueue_head(&ep->wq); init_waitqueue_head(&ep->poll_wait); INIT_LIST_HEAD(&ep->rdllist); ep->rbr = RB_ROOT; ep->ovflist = EP_UNACTIVE_PTR; ep->user = user; *pep = ep; return 0; free_uid: free_uid(user); return error; } /* * Search the file inside the eventpoll tree. The RB tree operations * are protected by the "mtx" mutex, and ep_find() must be called with * "mtx" held. */ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) { int kcmp; struct rb_node *rbp; struct epitem *epi, *epir = NULL; struct epoll_filefd ffd; ep_set_ffd(&ffd, file, fd); for (rbp = ep->rbr.rb_node; rbp; ) { epi = rb_entry(rbp, struct epitem, rbn); kcmp = ep_cmp_ffd(&ffd, &epi->ffd); if (kcmp > 0) rbp = rbp->rb_right; else if (kcmp < 0) rbp = rbp->rb_left; else { epir = epi; break; } } return epir; } /* * This is the callback that is passed to the wait queue wakeup * mechanism. It is called by the stored file descriptors when they * have events to report. */ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) { int pwake = 0; unsigned long flags; struct epitem *epi = ep_item_from_wait(wait); struct eventpoll *ep = epi->ep; if ((unsigned long)key & POLLFREE) { ep_pwq_from_wait(wait)->whead = NULL; /* * whead = NULL above can race with ep_remove_wait_queue() * which can do another remove_wait_queue() after us, so we * can't use __remove_wait_queue(). whead->lock is held by * the caller. */ list_del_init(&wait->task_list); } spin_lock_irqsave(&ep->lock, flags); /* * If the event mask does not contain any poll(2) event, we consider the * descriptor to be disabled. This condition is likely the effect of the * EPOLLONESHOT bit that disables the descriptor when an event is received, * until the next EPOLL_CTL_MOD will be issued. */ if (!(epi->event.events & ~EP_PRIVATE_BITS)) goto out_unlock; /* * Check the events coming with the callback. At this stage, not * every device reports the events in the "key" parameter of the * callback. We need to be able to handle both cases here, hence the * test for "key" != NULL before the event match test. */ if (key && !((unsigned long) key & epi->event.events)) goto out_unlock; /* * If we are transferring events to userspace, we can hold no locks * (because we're accessing user memory, and because of linux f_op->poll() * semantics). All the events that happen during that period of time are * chained in ep->ovflist and requeued later on. */ if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) { if (epi->next == EP_UNACTIVE_PTR) { epi->next = ep->ovflist; ep->ovflist = epi; if (epi->ws) { /* * Activate ep->ws since epi->ws may get * deactivated at any time. */ __pm_stay_awake(ep->ws); } } goto out_unlock; } /* If this file is already in the ready list we exit soon */ if (!ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); } /* * Wake up ( if active ) both the eventpoll wait list and the ->poll() * wait list. */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; out_unlock: spin_unlock_irqrestore(&ep->lock, flags); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return 1; } /* * This is the callback that is used to add our wait queue to the * target file wakeup lists. */ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, poll_table *pt) { struct epitem *epi = ep_item_from_epqueue(pt); struct eppoll_entry *pwq; if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); pwq->whead = whead; pwq->base = epi; add_wait_queue(whead, &pwq->wait); list_add_tail(&pwq->llink, &epi->pwqlist); epi->nwait++; } else { /* We have to signal that an error occurred */ epi->nwait = -1; } } static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) { int kcmp; struct rb_node **p = &ep->rbr.rb_node, *parent = NULL; struct epitem *epic; while (*p) { parent = *p; epic = rb_entry(parent, struct epitem, rbn); kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd); if (kcmp > 0) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&epi->rbn, parent, p); rb_insert_color(&epi->rbn, &ep->rbr); } #define PATH_ARR_SIZE 5 /* * These are the number paths of length 1 to 5, that we are allowing to emanate * from a single file of interest. For example, we allow 1000 paths of length * 1, to emanate from each file of interest. This essentially represents the * potential wakeup paths, which need to be limited in order to avoid massive * uncontrolled wakeup storms. The common use case should be a single ep which * is connected to n file sources. In this case each file source has 1 path * of length 1. Thus, the numbers below should be more than sufficient. These * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify * and delete can't add additional paths. Protected by the epmutex. */ static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 }; static int path_count[PATH_ARR_SIZE]; static int path_count_inc(int nests) { /* Allow an arbitrary number of depth 1 paths */ if (nests == 0) return 0; if (++path_count[nests] > path_limits[nests]) return -1; return 0; } static void path_count_init(void) { int i; for (i = 0; i < PATH_ARR_SIZE; i++) path_count[i] = 0; } static int reverse_path_check_proc(void *priv, void *cookie, int call_nests) { int error = 0; struct file *file = priv; struct file *child_file; struct epitem *epi; list_for_each_entry(epi, &file->f_ep_links, fllink) { child_file = epi->ep->file; if (is_file_epoll(child_file)) { if (list_empty(&child_file->f_ep_links)) { if (path_count_inc(call_nests)) { error = -1; break; } } else { error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, reverse_path_check_proc, child_file, child_file, current); } if (error != 0) break; } else { printk(KERN_ERR "reverse_path_check_proc: " "file is not an ep!\n"); } } return error; } /** * reverse_path_check - The tfile_check_list is list of file *, which have * links that are proposed to be newly added. We need to * make sure that those added links don't add too many * paths such that we will spend all our time waking up * eventpoll objects. * * Returns: Returns zero if the proposed links don't create too many paths, * -1 otherwise. */ static int reverse_path_check(void) { int error = 0; struct file *current_file; /* let's call this for all tfiles */ list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) { path_count_init(); error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, reverse_path_check_proc, current_file, current_file, current); if (error) break; } return error; } static int ep_create_wakeup_source(struct epitem *epi) { const char *name; if (!epi->ep->ws) { epi->ep->ws = wakeup_source_register("eventpoll"); if (!epi->ep->ws) return -ENOMEM; } name = epi->ffd.file->f_path.dentry->d_name.name; epi->ws = wakeup_source_register(name); if (!epi->ws) return -ENOMEM; return 0; } static void ep_destroy_wakeup_source(struct epitem *epi) { wakeup_source_unregister(epi->ws); epi->ws = NULL; } /* * Must be called with "mtx" held. */ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, struct file *tfile, int fd) { int error, revents, pwake = 0; unsigned long flags; long user_watches; struct epitem *epi; struct ep_pqueue epq; user_watches = atomic_long_read(&ep->user->epoll_watches); if (unlikely(user_watches >= max_user_watches)) return -ENOSPC; if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) return -ENOMEM; /* Item initialization follow here ... */ INIT_LIST_HEAD(&epi->rdllink); INIT_LIST_HEAD(&epi->fllink); INIT_LIST_HEAD(&epi->pwqlist); epi->ep = ep; ep_set_ffd(&epi->ffd, tfile, fd); epi->event = *event; epi->nwait = 0; epi->next = EP_UNACTIVE_PTR; if (epi->event.events & EPOLLWAKEUP) { error = ep_create_wakeup_source(epi); if (error) goto error_create_wakeup_source; } else { epi->ws = NULL; } /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); epq.pt._key = event->events; /* * Attach the item to the poll hooks and get current event bits. * We can safely use the file* here because its usage count has * been increased by the caller of this function. Note that after * this operation completes, the poll callback can start hitting * the new item. */ revents = tfile->f_op->poll(tfile, &epq.pt); /* * We have to check if something went wrong during the poll wait queue * install process. Namely an allocation for a wait queue failed due * high memory pressure. */ error = -ENOMEM; if (epi->nwait < 0) goto error_unregister; /* Add the current item to the list of active epoll hook for this file */ spin_lock(&tfile->f_lock); list_add_tail(&epi->fllink, &tfile->f_ep_links); spin_unlock(&tfile->f_lock); /* * Add the current item to the RB tree. All RB tree operations are * protected by "mtx", and ep_insert() is called with "mtx" held. */ ep_rbtree_insert(ep, epi); /* now check if we've created too many backpaths */ error = -EINVAL; if (reverse_path_check()) goto error_remove_epi; /* We have to drop the new item inside our item list to keep track of it */ spin_lock_irqsave(&ep->lock, flags); /* If the file is already "ready" we drop it inside the ready list */ if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irqrestore(&ep->lock, flags); atomic_long_inc(&ep->user->epoll_watches); /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return 0; error_remove_epi: spin_lock(&tfile->f_lock); if (ep_is_linked(&epi->fllink)) list_del_init(&epi->fllink); spin_unlock(&tfile->f_lock); rb_erase(&epi->rbn, &ep->rbr); error_unregister: ep_unregister_pollwait(ep, epi); /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist * list, since that is used/cleaned only inside a section bound by "mtx". * And ep_insert() is called with "mtx" held. */ spin_lock_irqsave(&ep->lock, flags); if (ep_is_linked(&epi->rdllink)) list_del_init(&epi->rdllink); spin_unlock_irqrestore(&ep->lock, flags); wakeup_source_unregister(epi->ws); error_create_wakeup_source: kmem_cache_free(epi_cache, epi); return error; } /* * Modify the interest event mask by dropping an event if the new mask * has a match in the current file status. Must be called with "mtx" held. */ static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) { int pwake = 0; unsigned int revents; poll_table pt; init_poll_funcptr(&pt, NULL); /* * Set the new event interest mask before calling f_op->poll(); * otherwise we might miss an event that happens between the * f_op->poll() call and the new event set registering. */ epi->event.events = event->events; pt._key = event->events; epi->event.data = event->data; /* protected by mtx */ if (epi->event.events & EPOLLWAKEUP) { if (!epi->ws) ep_create_wakeup_source(epi); } else if (epi->ws) { ep_destroy_wakeup_source(epi); } /* * Get current event bits. We can safely use the file* here because * its usage count has been increased by the caller of this function. */ revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt); /* * If the item is "hot" and it is not registered inside the ready * list, push it inside. */ if (revents & event->events) { spin_lock_irq(&ep->lock); if (!ep_is_linked(&epi->rdllink)) { list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); /* Notify waiting tasks that events are available */ if (waitqueue_active(&ep->wq)) wake_up_locked(&ep->wq); if (waitqueue_active(&ep->poll_wait)) pwake++; } spin_unlock_irq(&ep->lock); } /* We have to call this outside the lock */ if (pwake) ep_poll_safewake(&ep->poll_wait); return 0; } static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, void *priv) { struct ep_send_events_data *esed = priv; int eventcnt; unsigned int revents; struct epitem *epi; struct epoll_event __user *uevent; poll_table pt; init_poll_funcptr(&pt, NULL); /* * We can loop without lock because we are passed a task private list. * Items cannot vanish during the loop because ep_scan_ready_list() is * holding "mtx" during this call. */ for (eventcnt = 0, uevent = esed->events; !list_empty(head) && eventcnt < esed->maxevents;) { epi = list_first_entry(head, struct epitem, rdllink); /* * Activate ep->ws before deactivating epi->ws to prevent * triggering auto-suspend here (in case we reactive epi->ws * below). * * This could be rearranged to delay the deactivation of epi->ws * instead, but then epi->ws would temporarily be out of sync * with ep_is_linked(). */ if (epi->ws && epi->ws->active) __pm_stay_awake(ep->ws); __pm_relax(epi->ws); list_del_init(&epi->rdllink); pt._key = epi->event.events; revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) & epi->event.events; /* * If the event mask intersect the caller-requested one, * deliver the event to userspace. Again, ep_scan_ready_list() * is holding "mtx", so no operations coming from userspace * can change the item. */ if (revents) { if (__put_user(revents, &uevent->events) || __put_user(epi->event.data, &uevent->data)) { list_add(&epi->rdllink, head); __pm_stay_awake(epi->ws); return eventcnt ? eventcnt : -EFAULT; } eventcnt++; uevent++; if (epi->event.events & EPOLLONESHOT) epi->event.events &= EP_PRIVATE_BITS; else if (!(epi->event.events & EPOLLET)) { /* * If this file has been added with Level * Trigger mode, we need to insert back inside * the ready list, so that the next call to * epoll_wait() will check again the events * availability. At this point, no one can insert * into ep->rdllist besides us. The epoll_ctl() * callers are locked out by * ep_scan_ready_list() holding "mtx" and the * poll callback will queue them in ep->ovflist. */ list_add_tail(&epi->rdllink, &ep->rdllist); __pm_stay_awake(epi->ws); } } } return eventcnt; } static int ep_send_events(struct eventpoll *ep, struct epoll_event __user *events, int maxevents) { struct ep_send_events_data esed; esed.maxevents = maxevents; esed.events = events; return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0); } static inline struct timespec ep_set_mstimeout(long ms) { struct timespec now, ts = { .tv_sec = ms / MSEC_PER_SEC, .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC), }; ktime_get_ts(&now); return timespec_add_safe(now, ts); } /** * ep_poll - Retrieves ready events, and delivers them to the caller supplied * event buffer. * * @ep: Pointer to the eventpoll context. * @events: Pointer to the userspace buffer where the ready events should be * stored. * @maxevents: Size (in terms of number of events) of the caller event buffer. * @timeout: Maximum timeout for the ready events fetch operation, in * milliseconds. If the @timeout is zero, the function will not block, * while if the @timeout is less than zero, the function will block * until at least one event has been retrieved (or an error * occurred). * * Returns: Returns the number of ready events which have been fetched, or an * error code, in case of error. */ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { int res = 0, eavail, timed_out = 0; unsigned long flags; long slack = 0; wait_queue_t wait; ktime_t expires, *to = NULL; if (timeout > 0) { struct timespec end_time = ep_set_mstimeout(timeout); slack = select_estimate_accuracy(&end_time); to = &expires; *to = timespec_to_ktime(end_time); } else if (timeout == 0) { /* * Avoid the unnecessary trip to the wait queue loop, if the * caller specified a non blocking operation. */ timed_out = 1; spin_lock_irqsave(&ep->lock, flags); goto check_events; } fetch_events: spin_lock_irqsave(&ep->lock, flags); if (!ep_events_available(ep)) { /* * We don't have any available event to return to the caller. * We need to sleep here, and we will be wake up by * ep_poll_callback() when events will become available. */ init_waitqueue_entry(&wait, current); __add_wait_queue_exclusive(&ep->wq, &wait); for (;;) { /* * We don't want to sleep if the ep_poll_callback() sends us * a wakeup in between. That's why we set the task state * to TASK_INTERRUPTIBLE before doing the checks. */ set_current_state(TASK_INTERRUPTIBLE); if (ep_events_available(ep) || timed_out) break; if (signal_pending(current)) { res = -EINTR; break; } spin_unlock_irqrestore(&ep->lock, flags); if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) timed_out = 1; spin_lock_irqsave(&ep->lock, flags); } __remove_wait_queue(&ep->wq, &wait); set_current_state(TASK_RUNNING); } check_events: /* Is it worth to try to dig for events ? */ eavail = ep_events_available(ep); spin_unlock_irqrestore(&ep->lock, flags); /* * Try to transfer events to user space. In case we get 0 events and * there's still timeout left over, we go trying again in search of * more luck. */ if (!res && eavail && !(res = ep_send_events(ep, events, maxevents)) && !timed_out) goto fetch_events; return res; } /** * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() * API, to verify that adding an epoll file inside another * epoll structure, does not violate the constraints, in * terms of closed loops, or too deep chains (which can * result in excessive stack usage). * * @priv: Pointer to the epoll file to be currently checked. * @cookie: Original cookie for this call. This is the top-of-the-chain epoll * data structure pointer. * @call_nests: Current dept of the @ep_call_nested() call stack. * * Returns: Returns zero if adding the epoll @file inside current epoll * structure @ep does not violate the constraints, or -1 otherwise. */ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) { int error = 0; struct file *file = priv; struct eventpoll *ep = file->private_data; struct eventpoll *ep_tovisit; struct rb_node *rbp; struct epitem *epi; mutex_lock_nested(&ep->mtx, call_nests + 1); ep->visited = 1; list_add(&ep->visited_list_link, &visited_list); for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); if (unlikely(is_file_epoll(epi->ffd.file))) { ep_tovisit = epi->ffd.file->private_data; if (ep_tovisit->visited) continue; error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, epi->ffd.file, ep_tovisit, current); if (error != 0) break; } else { /* * If we've reached a file that is not associated with * an ep, then we need to check if the newly added * links are going to add too many wakeup paths. We do * this by adding it to the tfile_check_list, if it's * not already there, and calling reverse_path_check() * during ep_insert(). */ if (list_empty(&epi->ffd.file->f_tfile_llink)) list_add(&epi->ffd.file->f_tfile_llink, &tfile_check_list); } } mutex_unlock(&ep->mtx); return error; } /** * ep_loop_check - Performs a check to verify that adding an epoll file (@file) * another epoll file (represented by @ep) does not create * closed loops or too deep chains. * * @ep: Pointer to the epoll private data structure. * @file: Pointer to the epoll file to be checked. * * Returns: Returns zero if adding the epoll @file inside current epoll * structure @ep does not violate the constraints, or -1 otherwise. */ static int ep_loop_check(struct eventpoll *ep, struct file *file) { int ret; struct eventpoll *ep_cur, *ep_next; ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ep_loop_check_proc, file, ep, current); /* clear visited list */ list_for_each_entry_safe(ep_cur, ep_next, &visited_list, visited_list_link) { ep_cur->visited = 0; list_del(&ep_cur->visited_list_link); } return ret; } static void clear_tfile_check_list(void) { struct file *file; /* first clear the tfile_check_list */ while (!list_empty(&tfile_check_list)) { file = list_first_entry(&tfile_check_list, struct file, f_tfile_llink); list_del_init(&file->f_tfile_llink); } INIT_LIST_HEAD(&tfile_check_list); } /* * Open an eventpoll file descriptor. */ SYSCALL_DEFINE1(epoll_create1, int, flags) { int error, fd; struct eventpoll *ep = NULL; struct file *file; /* Check the EPOLL_* constant for consistency. */ BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); if (flags & ~EPOLL_CLOEXEC) return -EINVAL; /* * Create the internal data structure ("struct eventpoll"). */ error = ep_alloc(&ep); if (error < 0) return error; /* * Creates all the items needed to setup an eventpoll file. That is, * a file structure and a free file descriptor. */ fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC)); if (fd < 0) { error = fd; goto out_free_ep; } file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep, O_RDWR | (flags & O_CLOEXEC)); if (IS_ERR(file)) { error = PTR_ERR(file); goto out_free_fd; } fd_install(fd, file); ep->file = file; return fd; out_free_fd: put_unused_fd(fd); out_free_ep: ep_free(ep); return error; } SYSCALL_DEFINE1(epoll_create, int, size) { if (size <= 0) return -EINVAL; return sys_epoll_create1(0); } /* * The following function implements the controller interface for * the eventpoll file that enables the insertion/removal/change of * file descriptors inside the interest set. */ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event) { int error; int did_lock_epmutex = 0; struct file *file, *tfile; struct eventpoll *ep; struct epitem *epi; struct epoll_event epds; error = -EFAULT; if (ep_op_has_event(op) && copy_from_user(&epds, event, sizeof(struct epoll_event))) goto error_return; /* Get the "struct file *" for the eventpoll file */ error = -EBADF; file = fget(epfd); if (!file) goto error_return; /* Get the "struct file *" for the target file */ tfile = fget(fd); if (!tfile) goto error_fput; /* The target file descriptor must support poll */ error = -EPERM; if (!tfile->f_op || !tfile->f_op->poll) goto error_tgt_fput; /* Check if EPOLLWAKEUP is allowed */ if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND)) epds.events &= ~EPOLLWAKEUP; /* * We have to check that the file structure underneath the file descriptor * the user passed to us _is_ an eventpoll file. And also we do not permit * adding an epoll file descriptor inside itself. */ error = -EINVAL; if (file == tfile || !is_file_epoll(file)) goto error_tgt_fput; /* * At this point it is safe to assume that the "private_data" contains * our own data structure. */ ep = file->private_data; /* * When we insert an epoll file descriptor, inside another epoll file * descriptor, there is the change of creating closed loops, which are * better be handled here, than in more critical paths. While we are * checking for loops we also determine the list of files reachable * and hang them on the tfile_check_list, so we can check that we * haven't created too many possible wakeup paths. * * We need to hold the epmutex across both ep_insert and ep_remove * b/c we want to make sure we are looking at a coherent view of * epoll network. */ if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) { mutex_lock(&epmutex); did_lock_epmutex = 1; } if (op == EPOLL_CTL_ADD) { if (is_file_epoll(tfile)) { error = -ELOOP; if (ep_loop_check(ep, tfile) != 0) { clear_tfile_check_list(); goto error_tgt_fput; } } else list_add(&tfile->f_tfile_llink, &tfile_check_list); } mutex_lock_nested(&ep->mtx, 0); /* * Try to lookup the file inside our RB tree, Since we grabbed "mtx" * above, we can be sure to be able to use the item looked up by * ep_find() till we release the mutex. */ epi = ep_find(ep, tfile, fd); error = -EINVAL; switch (op) { case EPOLL_CTL_ADD: if (!epi) { epds.events |= POLLERR | POLLHUP; error = ep_insert(ep, &epds, tfile, fd); } else error = -EEXIST; clear_tfile_check_list(); break; case EPOLL_CTL_DEL: if (epi) error = ep_remove(ep, epi); else error = -ENOENT; break; case EPOLL_CTL_MOD: if (epi) { epds.events |= POLLERR | POLLHUP; error = ep_modify(ep, epi, &epds); } else error = -ENOENT; break; } mutex_unlock(&ep->mtx); error_tgt_fput: if (did_lock_epmutex) mutex_unlock(&epmutex); fput(tfile); error_fput: fput(file); error_return: return error; } /* * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_wait(2). */ SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout) { int error; struct file *file; struct eventpoll *ep; /* The maximum number of event must be greater than zero */ if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) return -EINVAL; /* Verify that the area passed by the user is writeable */ if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) { error = -EFAULT; goto error_return; } /* Get the "struct file *" for the eventpoll file */ error = -EBADF; file = fget(epfd); if (!file) goto error_return; /* * We have to check that the file structure underneath the fd * the user passed to us _is_ an eventpoll file. */ error = -EINVAL; if (!is_file_epoll(file)) goto error_fput; /* * At this point it is safe to assume that the "private_data" contains * our own data structure. */ ep = file->private_data; /* Time to fish for events ... */ error = ep_poll(ep, events, maxevents, timeout); error_fput: fput(file); error_return: return error; } /* * Implement the event wait interface for the eventpoll file. It is the kernel * part of the user space epoll_pwait(2). */ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, int, maxevents, int, timeout, const sigset_t __user *, sigmask, size_t, sigsetsize) { int error; sigset_t ksigmask, sigsaved; /* * If the caller wants a certain signal mask to be set during the wait, * we apply it here. */ if (sigmask) { if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) return -EFAULT; sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); } error = sys_epoll_wait(epfd, events, maxevents, timeout); /* * If we changed the signal mask, we need to restore the original one. * In case we've got a signal while waiting, we do not restore the * signal mask yet, and we allow do_signal() to deliver the signal on * the way back to userspace, before the signal mask is restored. */ if (sigmask) { if (error == -EINTR) { memcpy(&current->saved_sigmask, &sigsaved, sizeof(sigsaved)); set_restore_sigmask(); } else sigprocmask(SIG_SETMASK, &sigsaved, NULL); } return error; } static int __init eventpoll_init(void) { struct sysinfo si; si_meminfo(&si); /* * Allows top 4% of lomem to be allocated for epoll watches (per user). */ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / EP_ITEM_COST; BUG_ON(max_user_watches < 0); /* * Initialize the structure used to perform epoll file descriptor * inclusion loops checks. */ ep_nested_calls_init(&poll_loop_ncalls); /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); /* Initialize the structure used to perform file's f_op->poll() calls */ ep_nested_calls_init(&poll_readywalk_ncalls); /* Allocates slab cache used to allocate "struct epitem" items */ epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); /* Allocates slab cache used to allocate "struct eppoll_entry" */ pwq_cache = kmem_cache_create("eventpoll_pwq", sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL); return 0; } fs_initcall(eventpoll_init);
gpl-2.0
yang0508/kernel_samsung_smdk4412
drivers/media/video/samsung/jpeg/jpeg_dev.c
305
11651
/* linux/drivers/media/video/samsung/jpeg/jpeg_dev.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * Core file for Samsung Jpeg Interface driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/miscdevice.h> #include <linux/platform_device.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/time.h> #include <linux/clk.h> #include <linux/semaphore.h> #include <linux/vmalloc.h> #include <asm/page.h> #include <linux/sched.h> #include <plat/regs_jpeg.h> #include <mach/irqs.h> #if defined(CONFIG_CPU_S5PV210) #include <mach/pd.h> #endif #if defined(CONFIG_S5P_SYSMMU_JPEG) #include <plat/sysmmu.h> #endif #ifdef CONFIG_PM_RUNTIME #include <linux/pm_runtime.h> #endif #include "jpeg_core.h" #include "jpeg_dev.h" #include "jpeg_mem.h" struct jpeg_control *jpeg_ctrl; static struct device *jpeg_pm; static int jpeg_open(struct inode *inode, struct file *file) { int ret; int in_use; mutex_lock(&jpeg_ctrl->lock); in_use = atomic_read(&jpeg_ctrl->in_use); if (in_use > JPEG_MAX_INSTANCE) { ret = -EBUSY; goto resource_busy; } else { atomic_inc(&jpeg_ctrl->in_use); jpeg_info("jpeg driver opened.\n"); } mutex_unlock(&jpeg_ctrl->lock); #if defined(CONFIG_CPU_S5PV210) ret = s5pv210_pd_enable("jpeg_pd"); if (ret < 0) { jpeg_err("failed to enable jpeg power domain\n"); return -EINVAL; } #endif /* clock enable */ clk_enable(jpeg_ctrl->clk); file->private_data = (struct jpeg_control *)jpeg_ctrl; #ifdef CONFIG_PM_RUNTIME pm_runtime_get_sync(jpeg_pm); #endif return 0; resource_busy: mutex_unlock(&jpeg_ctrl->lock); return ret; } static int jpeg_release(struct inode *inode, struct file *file) { atomic_dec(&jpeg_ctrl->in_use); jpeg_mem_free(); clk_disable(jpeg_ctrl->clk); #if defined(CONFIG_CPU_S5PV210) if (s5pv210_pd_disable("jpeg_pd") < 0) { jpeg_err("failed to disable jpeg power domain\n"); return -EINVAL; } #endif #ifdef CONFIG_PM_RUNTIME pm_runtime_put_sync(jpeg_pm); #endif return 0; } static long jpeg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; struct jpeg_control *ctrl; ctrl = (struct jpeg_control *)file->private_data; if (!ctrl) { jpeg_err("jpeg invalid input argument\n"); return -1; } switch (cmd) { case IOCTL_JPEG_DEC_EXE: ret = copy_from_user(&ctrl->dec_param, (struct jpeg_dec_param *)arg, sizeof(struct jpeg_dec_param)); jpeg_exe_dec(ctrl); ret = copy_to_user((void *)arg, (void *) &ctrl->dec_param, sizeof(struct jpeg_dec_param)); break; case IOCTL_JPEG_ENC_EXE: ret = copy_from_user(&ctrl->enc_param, (struct jpeg_enc_param *)arg, sizeof(struct jpeg_enc_param)); jpeg_exe_enc(ctrl); ret = copy_to_user((void *)arg, (void *) &ctrl->enc_param, sizeof(struct jpeg_enc_param)); break; case IOCTL_GET_DEC_IN_BUF: case IOCTL_GET_ENC_OUT_BUF: return jpeg_get_stream_buf(arg); case IOCTL_GET_DEC_OUT_BUF: case IOCTL_GET_ENC_IN_BUF: return jpeg_get_frame_buf(arg); case IOCTL_GET_PHYADDR: return jpeg_ctrl->mem.frame_data_addr; case IOCTL_GET_PHYMEM_BASE: #ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_JPEG if (copy_to_user((void *)arg, &jpeg_ctrl->mem.base, sizeof(unsigned int))) { jpeg_err("IOCTL_GET_PHYMEM_BASE:::copy_to_user error\n"); return -1; } return 0; #else return -1; #endif case IOCTL_GET_PHYMEM_SIZE: #ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_JPEG ret = CONFIG_VIDEO_SAMSUNG_MEMSIZE_JPEG * 1024; if (copy_to_user((void *)arg, &ret, sizeof(unsigned int))) { jpeg_err("IOCTL_GET_PHYMEM_SIZE:::copy_to_user error\n"); return -1; } return 0; #else return -1; #endif case IOCTL_SET_DEC_PARAM: ret = copy_from_user(&ctrl->dec_param, (struct jpeg_dec_param *)arg, sizeof(struct jpeg_dec_param)); ret = jpeg_set_dec_param(ctrl); break; case IOCTL_SET_ENC_PARAM: ret = copy_from_user(&ctrl->enc_param, (struct jpeg_enc_param *)arg, sizeof(struct jpeg_enc_param)); ret = jpeg_set_enc_param(ctrl); break; default: break; } return 0; } int jpeg_mmap(struct file *filp, struct vm_area_struct *vma) { #if defined(CONFIG_S5P_SYSMMU_JPEG) #if !defined(CONFIG_S5P_VMEM) unsigned long page_frame_no; unsigned long start; unsigned long size; char *ptr; /* vmalloc */ size = vma->vm_end - vma->vm_start; ptr = (char *)jpeg_ctrl->mem.base; start = 0; vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); while (size > 0) { page_frame_no = vmalloc_to_pfn(ptr); if (remap_pfn_range(vma, vma->vm_start + start, page_frame_no, PAGE_SIZE, vma->vm_page_prot)) { jpeg_err("failed to remap jpeg pfn range.\n"); return -ENOMEM; } start += PAGE_SIZE; ptr += PAGE_SIZE; size -= PAGE_SIZE; } #endif /* CONFIG_S5P_VMEM */ #else unsigned long page_frame_no; unsigned long size; int ret; size = vma->vm_end - vma->vm_start; vma->vm_flags |= VM_RESERVED | VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); page_frame_no = __phys_to_pfn(jpeg_ctrl->mem.base); ret = remap_pfn_range(vma, vma->vm_start, page_frame_no, size, vma->vm_page_prot); if (ret != 0) { jpeg_err("failed to remap jpeg pfn range.\n"); return -ENOMEM; } #endif /* SYSMMU_JPEG_ON */ return 0; } static const struct file_operations jpeg_fops = { .owner = THIS_MODULE, .open = jpeg_open, .release = jpeg_release, .unlocked_ioctl = jpeg_ioctl, .mmap = jpeg_mmap, }; static struct miscdevice jpeg_miscdev = { .minor = JPEG_MINOR_NUMBER, .name = JPEG_NAME, .fops = &jpeg_fops, }; static irqreturn_t jpeg_irq(int irq, void *dev_id) { unsigned int int_status; struct jpeg_control *ctrl = (struct jpeg_control *) dev_id; int_status = jpeg_int_pending(ctrl); if (int_status) { switch (int_status) { case 0x40: ctrl->irq_ret = OK_ENC_OR_DEC; break; case 0x20: ctrl->irq_ret = ERR_ENC_OR_DEC; break; default: ctrl->irq_ret = ERR_UNKNOWN; } wake_up_interruptible(&ctrl->wq); } else { ctrl->irq_ret = ERR_UNKNOWN; wake_up_interruptible(&ctrl->wq); } return IRQ_HANDLED; } static int jpeg_setup_controller(struct jpeg_control *ctrl) { #if defined(CONFIG_S5P_SYSMMU_JPEG) s5p_sysmmu_enable(jpeg_pm); jpeg_dbg("sysmmu on\n"); /* jpeg hw uses kernel virtual address */ s5p_sysmmu_set_tablebase_pgd(jpeg_pm, __pa(swapper_pg_dir)); #endif atomic_set(&ctrl->in_use, 0); mutex_init(&ctrl->lock); init_waitqueue_head(&ctrl->wq); return 0; } static int jpeg_probe(struct platform_device *pdev) { struct resource *res; int ret; /* global structure */ jpeg_ctrl = kzalloc(sizeof(*jpeg_ctrl), GFP_KERNEL); if (!jpeg_ctrl) { dev_err(&pdev->dev, "%s: not enough memory\n", __func__); ret = -ENOMEM; goto err_alloc; } /* setup jpeg control */ ret = jpeg_setup_controller(jpeg_ctrl); if (ret) { jpeg_err("failed to setup controller\n"); goto err_setup; } /* memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { jpeg_err("failed to get jpeg memory region resource\n"); ret = -ENOENT; goto err_res; } res = request_mem_region(res->start, res->end - res->start + 1, pdev->name); if (!res) { jpeg_err("failed to request jpeg io memory region\n"); ret = -ENOMEM; goto err_region; } /* ioremap */ jpeg_ctrl->reg_base = ioremap(res->start, res->end - res->start + 1); if (!jpeg_ctrl->reg_base) { jpeg_err("failed to remap jpeg io region\n"); ret = -ENOENT; goto err_map; } /* irq */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { jpeg_err("failed to request jpeg irq resource\n"); ret = -ENOENT; goto err_irq; } jpeg_ctrl->irq_no = res->start; ret = request_irq(jpeg_ctrl->irq_no, (void *)jpeg_irq, IRQF_DISABLED, pdev->name, jpeg_ctrl); if (ret != 0) { jpeg_err("failed to jpeg request irq\n"); ret = -ENOENT; goto err_irq; } /* clock */ jpeg_ctrl->clk = clk_get(&pdev->dev, "jpeg"); if (IS_ERR(jpeg_ctrl->clk)) { jpeg_err("failed to find jpeg clock source\n"); ret = -ENOENT; goto err_clk; } ret = jpeg_init_mem(&pdev->dev, &jpeg_ctrl->mem.base); if (ret != 0) { jpeg_err("failed to init. jpeg mem"); ret = -ENOMEM; goto err_mem; } ret = misc_register(&jpeg_miscdev); if (ret) { jpeg_err("failed to register misc driver\n"); goto err_reg; } jpeg_pm = &pdev->dev; #ifdef CONFIG_PM_RUNTIME pm_runtime_enable(jpeg_pm); #endif return 0; err_reg: clk_put(jpeg_ctrl->clk); err_mem: err_clk: free_irq(jpeg_ctrl->irq_no, NULL); err_irq: iounmap(jpeg_ctrl->reg_base); err_map: err_region: kfree(res); err_res: mutex_destroy(&jpeg_ctrl->lock); err_setup: kfree(jpeg_ctrl); err_alloc: return ret; } static int jpeg_remove(struct platform_device *dev) { #if defined(CONFIG_S5P_SYSMMU_JPEG) s5p_sysmmu_disable(jpeg_pm); jpeg_dbg("sysmmu off\n"); #endif free_irq(jpeg_ctrl->irq_no, dev); mutex_destroy(&jpeg_ctrl->lock); iounmap(jpeg_ctrl->reg_base); kfree(jpeg_ctrl); misc_deregister(&jpeg_miscdev); #ifdef CONFIG_PM_RUNTIME pm_runtime_disable(jpeg_pm); #endif return 0; } static int jpeg_suspend(struct platform_device *pdev, pm_message_t state) { /* clock disable */ clk_disable(jpeg_ctrl->clk); #if defined(CONFIG_CPU_S5PV210) if (s5pv210_pd_disable("jpeg_pd") < 0) { jpeg_err("failed to disable jpeg power domain\n"); return -EINVAL; } #endif return 0; } static int jpeg_resume(struct platform_device *pdev) { #if defined(CONFIG_CPU_S5PV210) if (s5pv210_pd_enable("jpeg_pd") < 0) { jpeg_err("failed to enable jpeg power domain\n"); return -EINVAL; } #endif /* clock enable */ clk_enable(jpeg_ctrl->clk); return 0; } int jpeg_suspend_pd(struct device *dev) { struct platform_device *pdev; int ret; pm_message_t state; state.event = 0; pdev = to_platform_device(dev); ret = jpeg_suspend(pdev, state); return 0; } int jpeg_resume_pd(struct device *dev) { struct platform_device *pdev; int ret; pdev = to_platform_device(dev); ret = jpeg_resume(pdev); return 0; } #ifdef CONFIG_PM_RUNTIME static int jpeg_runtime_suspend(struct device *dev) { return 0; } static int jpeg_runtime_resume(struct device *dev) { return 0; } #endif static const struct dev_pm_ops jpeg_pm_ops = { .suspend = jpeg_suspend_pd, .resume = jpeg_resume_pd, #ifdef CONFIG_PM_RUNTIME .runtime_suspend = jpeg_runtime_suspend, .runtime_resume = jpeg_runtime_resume, #endif }; static struct platform_driver jpeg_driver = { .probe = jpeg_probe, .remove = jpeg_remove, #if (!defined(CONFIG_S5PV310_DEV_PD) || !defined(CONFIG_PM_RUNTIME)) .suspend = jpeg_suspend, .resume = jpeg_resume, #endif .driver = { .owner = THIS_MODULE, .name = JPEG_NAME, #if (defined(CONFIG_S5PV310_DEV_PD) && defined(CONFIG_PM_RUNTIME)) .pm = &jpeg_pm_ops, #else .pm = NULL, #endif }, }; static int __init jpeg_init(void) { printk("Initialize JPEG driver\n"); platform_driver_register(&jpeg_driver); return 0; } static void __exit jpeg_exit(void) { platform_driver_unregister(&jpeg_driver); } module_init(jpeg_init); module_exit(jpeg_exit); MODULE_AUTHOR("Hyunmin, Kwak <hyunmin.kwak@samsung.com>"); MODULE_DESCRIPTION("JPEG Codec Device Driver"); MODULE_LICENSE("GPL");
gpl-2.0
amir73il/ext4-snapshots
drivers/staging/intel_sst/intel_sst_app_interface.c
561
38554
/* * intel_sst_interface.c - Intel SST Driver for audio engine * * Copyright (C) 2008-10 Intel Corp * Authors: Vinod Koul <vinod.koul@intel.com> * Harsha Priya <priya.harsha@intel.com> * Dharageswari R <dharageswari.r@intel.com> * Jeeja KP <jeeja.kp@intel.com> * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * This driver exposes the audio engine functionalities to the ALSA * and middleware. * Upper layer interfaces (MAD driver, MMF) to SST driver */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/pci.h> #include <linux/fs.h> #include <linux/uio.h> #include <linux/aio.h> #include <linux/uaccess.h> #include <linux/firmware.h> #include <linux/pm_runtime.h> #include <linux/ioctl.h> #ifdef CONFIG_MRST_RAR_HANDLER #include <linux/rar_register.h> #include "../../../drivers/staging/memrar/memrar.h" #endif #include "intel_sst.h" #include "intel_sst_ioctl.h" #include "intel_sst_fw_ipc.h" #include "intel_sst_common.h" #define AM_MODULE 1 #define STREAM_MODULE 0 /** * intel_sst_check_device - checks SST device * * This utility function checks the state of SST device and downlaods FW if * not done, or resumes the device if suspended */ static int intel_sst_check_device(void) { int retval = 0; if (sst_drv_ctx->pmic_state != SND_MAD_INIT_DONE) { pr_warn("Sound card not available\n"); return -EIO; } if (sst_drv_ctx->sst_state == SST_SUSPENDED) { pr_debug("Resuming from Suspended state\n"); retval = intel_sst_resume(sst_drv_ctx->pci); if (retval) { pr_debug("Resume Failed= %#x,abort\n", retval); return retval; } } if (sst_drv_ctx->sst_state == SST_UN_INIT) { /* FW is not downloaded */ retval = sst_download_fw(); if (retval) return -ENODEV; if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) { retval = sst_drv_ctx->rx_time_slot_status; if (retval != RX_TIMESLOT_UNINIT && sst_drv_ctx->pmic_vendor != SND_NC) sst_enable_rx_timeslot(retval); } } return 0; } /** * intel_sst_open - opens a handle to driver * * @i_node: inode structure * @file_ptr:pointer to file * * This function is called by OS when a user space component * tries to get a driver handle. Only one handle at a time * will be allowed */ int intel_sst_open(struct inode *i_node, struct file *file_ptr) { unsigned int retval; mutex_lock(&sst_drv_ctx->stream_lock); pm_runtime_get_sync(&sst_drv_ctx->pci->dev); retval = intel_sst_check_device(); if (retval) { pm_runtime_put(&sst_drv_ctx->pci->dev); mutex_unlock(&sst_drv_ctx->stream_lock); return retval; } if (sst_drv_ctx->encoded_cnt < MAX_ENC_STREAM) { struct ioctl_pvt_data *data = kzalloc(sizeof(struct ioctl_pvt_data), GFP_KERNEL); if (!data) { pm_runtime_put(&sst_drv_ctx->pci->dev); mutex_unlock(&sst_drv_ctx->stream_lock); return -ENOMEM; } sst_drv_ctx->encoded_cnt++; mutex_unlock(&sst_drv_ctx->stream_lock); data->pvt_id = sst_assign_pvt_id(sst_drv_ctx); data->str_id = 0; file_ptr->private_data = (void *)data; pr_debug("pvt_id handle = %d!\n", data->pvt_id); } else { retval = -EUSERS; pm_runtime_put(&sst_drv_ctx->pci->dev); mutex_unlock(&sst_drv_ctx->stream_lock); } return retval; } /** * intel_sst_open_cntrl - opens a handle to driver * * @i_node: inode structure * @file_ptr:pointer to file * * This function is called by OS when a user space component * tries to get a driver handle to /dev/intel_sst_control. * Only one handle at a time will be allowed * This is for control operations only */ int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr) { unsigned int retval; /* audio manager open */ mutex_lock(&sst_drv_ctx->stream_lock); pm_runtime_get_sync(&sst_drv_ctx->pci->dev); retval = intel_sst_check_device(); if (retval) { pm_runtime_put(&sst_drv_ctx->pci->dev); mutex_unlock(&sst_drv_ctx->stream_lock); return retval; } if (sst_drv_ctx->am_cnt < MAX_AM_HANDLES) { sst_drv_ctx->am_cnt++; pr_debug("AM handle opened...\n"); file_ptr->private_data = NULL; } else { retval = -EACCES; pm_runtime_put(&sst_drv_ctx->pci->dev); } mutex_unlock(&sst_drv_ctx->stream_lock); return retval; } /** * intel_sst_release - releases a handle to driver * * @i_node: inode structure * @file_ptr: pointer to file * * This function is called by OS when a user space component * tries to release a driver handle. */ int intel_sst_release(struct inode *i_node, struct file *file_ptr) { struct ioctl_pvt_data *data = file_ptr->private_data; pr_debug("Release called, closing app handle\n"); mutex_lock(&sst_drv_ctx->stream_lock); sst_drv_ctx->encoded_cnt--; sst_drv_ctx->stream_cnt--; pm_runtime_put(&sst_drv_ctx->pci->dev); mutex_unlock(&sst_drv_ctx->stream_lock); free_stream_context(data->str_id); kfree(data); return 0; } int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr) { /* audio manager close */ mutex_lock(&sst_drv_ctx->stream_lock); sst_drv_ctx->am_cnt--; pm_runtime_put(&sst_drv_ctx->pci->dev); mutex_unlock(&sst_drv_ctx->stream_lock); pr_debug("AM handle closed\n"); return 0; } /** * intel_sst_mmap - mmaps a kernel buffer to user space for copying data * * @vma: vm area structure instance * @file_ptr: pointer to file * * This function is called by OS when a user space component * tries to get mmap memory from driver */ int intel_sst_mmap(struct file *file_ptr, struct vm_area_struct *vma) { int retval, length; struct ioctl_pvt_data *data = (struct ioctl_pvt_data *)file_ptr->private_data; int str_id = data->str_id; void *mem_area; retval = sst_validate_strid(str_id); if (retval) return -EINVAL; length = vma->vm_end - vma->vm_start; pr_debug("called for stream %d length 0x%x\n", str_id, length); if (length > sst_drv_ctx->mmap_len) return -ENOMEM; if (!sst_drv_ctx->mmap_mem) return -EIO; /* round it up to the page boundary */ /*mem_area = (void *)((((unsigned long)sst_drv_ctx->mmap_mem) + PAGE_SIZE - 1) & PAGE_MASK);*/ mem_area = (void *) PAGE_ALIGN((unsigned int) sst_drv_ctx->mmap_mem); /* map the whole physically contiguous area in one piece */ retval = remap_pfn_range(vma, vma->vm_start, virt_to_phys((void *)mem_area) >> PAGE_SHIFT, length, vma->vm_page_prot); if (retval) sst_drv_ctx->streams[str_id].mmapped = false; else sst_drv_ctx->streams[str_id].mmapped = true; pr_debug("mmap ret 0x%x\n", retval); return retval; } /* sets mmap data buffers to play/capture*/ static int intel_sst_mmap_play_capture(u32 str_id, struct snd_sst_mmap_buffs *mmap_buf) { struct sst_stream_bufs *bufs; int retval, i; struct stream_info *stream; struct snd_sst_mmap_buff_entry *buf_entry; struct snd_sst_mmap_buff_entry *tmp_buf; pr_debug("called for str_id %d\n", str_id); retval = sst_validate_strid(str_id); if (retval) return -EINVAL; stream = &sst_drv_ctx->streams[str_id]; if (stream->mmapped != true) return -EIO; if (stream->status == STREAM_UN_INIT || stream->status == STREAM_DECODE) { return -EBADRQC; } stream->curr_bytes = 0; stream->cumm_bytes = 0; tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL); if (!tmp_buf) return -ENOMEM; if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff, mmap_buf->entries * sizeof(*tmp_buf))) { retval = -EFAULT; goto out_free; } pr_debug("new buffers count %d status %d\n", mmap_buf->entries, stream->status); buf_entry = tmp_buf; for (i = 0; i < mmap_buf->entries; i++) { bufs = kzalloc(sizeof(*bufs), GFP_KERNEL); if (!bufs) { retval = -ENOMEM; goto out_free; } bufs->size = buf_entry->size; bufs->offset = buf_entry->offset; bufs->addr = sst_drv_ctx->mmap_mem; bufs->in_use = false; buf_entry++; /* locking here */ mutex_lock(&stream->lock); list_add_tail(&bufs->node, &stream->bufs); mutex_unlock(&stream->lock); } mutex_lock(&stream->lock); stream->data_blk.condition = false; stream->data_blk.ret_code = 0; if (stream->status == STREAM_INIT && stream->prev != STREAM_UN_INIT && stream->need_draining != true) { stream->prev = stream->status; stream->status = STREAM_RUNNING; if (stream->ops == STREAM_OPS_PLAYBACK) { if (sst_play_frame(str_id) < 0) { pr_warn("play frames fail\n"); mutex_unlock(&stream->lock); retval = -EIO; goto out_free; } } else if (stream->ops == STREAM_OPS_CAPTURE) { if (sst_capture_frame(str_id) < 0) { pr_warn("capture frame fail\n"); mutex_unlock(&stream->lock); retval = -EIO; goto out_free; } } } mutex_unlock(&stream->lock); /* Block the call for reply */ if (!list_empty(&stream->bufs)) { stream->data_blk.on = true; retval = sst_wait_interruptible(sst_drv_ctx, &stream->data_blk); } if (retval >= 0) retval = stream->cumm_bytes; pr_debug("end of play/rec ioctl bytes = %d!!\n", retval); out_free: kfree(tmp_buf); return retval; } /*sets user data buffers to play/capture*/ static int intel_sst_play_capture(struct stream_info *stream, int str_id) { int retval; stream->data_blk.ret_code = 0; stream->data_blk.on = true; stream->data_blk.condition = false; mutex_lock(&stream->lock); if (stream->status == STREAM_INIT && stream->prev != STREAM_UN_INIT) { /* stream is started */ stream->prev = stream->status; stream->status = STREAM_RUNNING; } if (stream->status == STREAM_INIT && stream->prev == STREAM_UN_INIT) { /* stream is not started yet */ pr_debug("Stream isn't in started state %d, prev %d\n", stream->status, stream->prev); } else if ((stream->status == STREAM_RUNNING || stream->status == STREAM_PAUSED) && stream->need_draining != true) { /* stream is started */ if (stream->ops == STREAM_OPS_PLAYBACK || stream->ops == STREAM_OPS_PLAYBACK_DRM) { if (sst_play_frame(str_id) < 0) { pr_warn("play frames failed\n"); mutex_unlock(&stream->lock); return -EIO; } } else if (stream->ops == STREAM_OPS_CAPTURE) { if (sst_capture_frame(str_id) < 0) { pr_warn("capture frames failed\n"); mutex_unlock(&stream->lock); return -EIO; } } } else { mutex_unlock(&stream->lock); return -EIO; } mutex_unlock(&stream->lock); /* Block the call for reply */ retval = sst_wait_interruptible(sst_drv_ctx, &stream->data_blk); if (retval) { stream->status = STREAM_INIT; pr_debug("wait returned error...\n"); } return retval; } /* fills kernel list with buffer addresses for SST DSP driver to process*/ static int snd_sst_fill_kernel_list(struct stream_info *stream, const struct iovec *iovec, unsigned long nr_segs, struct list_head *copy_to_list) { struct sst_stream_bufs *stream_bufs; unsigned long index, mmap_len; unsigned char __user *bufp; unsigned long size, copied_size; int retval = 0, add_to_list = 0; static int sent_offset; static unsigned long sent_index; #ifdef CONFIG_MRST_RAR_HANDLER if (stream->ops == STREAM_OPS_PLAYBACK_DRM) { for (index = stream->sg_index; index < nr_segs; index++) { __u32 rar_handle; struct sst_stream_bufs *stream_bufs = kzalloc(sizeof(*stream_bufs), GFP_KERNEL); stream->sg_index = index; if (!stream_bufs) return -ENOMEM; if (copy_from_user((void *) &rar_handle, iovec[index].iov_base, sizeof(__u32))) { kfree(stream_bufs); return -EFAULT; } stream_bufs->addr = (char *)rar_handle; stream_bufs->in_use = false; stream_bufs->size = iovec[0].iov_len; /* locking here */ mutex_lock(&stream->lock); list_add_tail(&stream_bufs->node, &stream->bufs); mutex_unlock(&stream->lock); } stream->sg_index = index; return retval; } #endif stream_bufs = kzalloc(sizeof(*stream_bufs), GFP_KERNEL); if (!stream_bufs) return -ENOMEM; stream_bufs->addr = sst_drv_ctx->mmap_mem; mmap_len = sst_drv_ctx->mmap_len; stream_bufs->addr = sst_drv_ctx->mmap_mem; bufp = stream->cur_ptr; copied_size = 0; if (!stream->sg_index) sent_index = sent_offset = 0; for (index = stream->sg_index; index < nr_segs; index++) { stream->sg_index = index; if (!stream->cur_ptr) bufp = iovec[index].iov_base; size = ((unsigned long)iovec[index].iov_base + iovec[index].iov_len) - (unsigned long) bufp; if ((copied_size + size) > mmap_len) size = mmap_len - copied_size; if (stream->ops == STREAM_OPS_PLAYBACK) { if (copy_from_user((void *) (stream_bufs->addr + copied_size), bufp, size)) { /* Clean up the list and return error code */ retval = -EFAULT; break; } } else if (stream->ops == STREAM_OPS_CAPTURE) { struct snd_sst_user_cap_list *entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { kfree(stream_bufs); return -ENOMEM; } entry->iov_index = index; entry->iov_offset = (unsigned long) bufp - (unsigned long)iovec[index].iov_base; entry->offset = copied_size; entry->size = size; list_add_tail(&entry->node, copy_to_list); } stream->cur_ptr = bufp + size; if (((unsigned long)iovec[index].iov_base + iovec[index].iov_len) < ((unsigned long)iovec[index].iov_base)) { pr_debug("Buffer overflows\n"); kfree(stream_bufs); return -EINVAL; } if (((unsigned long)iovec[index].iov_base + iovec[index].iov_len) == (unsigned long)stream->cur_ptr) { stream->cur_ptr = NULL; stream->sg_index++; } copied_size += size; pr_debug("copied_size - %lx\n", copied_size); if ((copied_size >= mmap_len) || (stream->sg_index == nr_segs)) { add_to_list = 1; } if (add_to_list) { stream_bufs->in_use = false; stream_bufs->size = copied_size; /* locking here */ mutex_lock(&stream->lock); list_add_tail(&stream_bufs->node, &stream->bufs); mutex_unlock(&stream->lock); break; } } return retval; } /* This function copies the captured data returned from SST DSP engine * to the user buffers*/ static int snd_sst_copy_userbuf_capture(struct stream_info *stream, const struct iovec *iovec, struct list_head *copy_to_list) { struct snd_sst_user_cap_list *entry, *_entry; struct sst_stream_bufs *kbufs = NULL, *_kbufs; int retval = 0; /* copy sent buffers */ pr_debug("capture stream copying to user now...\n"); list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) { if (kbufs->in_use == true) { /* copy to user */ list_for_each_entry_safe(entry, _entry, copy_to_list, node) { if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset, kbufs->addr + entry->offset, entry->size)) { /* Clean up the list and return error */ retval = -EFAULT; break; } list_del(&entry->node); kfree(entry); } } } pr_debug("end of cap copy\n"); return retval; } /* * snd_sst_userbufs_play_cap - constructs the list from user buffers * * @iovec:pointer to iovec structure * @nr_segs:number entries in the iovec structure * @str_id:stream id * @stream:pointer to stream_info structure * * This function will traverse the user list and copy the data to the kernel * space buffers. */ static int snd_sst_userbufs_play_cap(const struct iovec *iovec, unsigned long nr_segs, unsigned int str_id, struct stream_info *stream) { int retval; LIST_HEAD(copy_to_list); retval = snd_sst_fill_kernel_list(stream, iovec, nr_segs, &copy_to_list); retval = intel_sst_play_capture(stream, str_id); if (retval < 0) return retval; if (stream->ops == STREAM_OPS_CAPTURE) { retval = snd_sst_copy_userbuf_capture(stream, iovec, &copy_to_list); } return retval; } /* This function is common function across read/write for user buffers called from system calls*/ static int intel_sst_read_write(unsigned int str_id, char __user *buf, size_t count) { int retval; struct stream_info *stream; struct iovec iovec; unsigned long nr_segs; retval = sst_validate_strid(str_id); if (retval) return -EINVAL; stream = &sst_drv_ctx->streams[str_id]; if (stream->mmapped == true) { pr_warn("user write and stream is mapped\n"); return -EIO; } if (!count) return -EINVAL; stream->curr_bytes = 0; stream->cumm_bytes = 0; /* copy user buf details */ pr_debug("new buffers %p, copy size %d, status %d\n" , buf, (int) count, (int) stream->status); stream->buf_type = SST_BUF_USER_STATIC; iovec.iov_base = buf; iovec.iov_len = count; nr_segs = 1; do { retval = snd_sst_userbufs_play_cap( &iovec, nr_segs, str_id, stream); if (retval < 0) break; } while (stream->sg_index < nr_segs); stream->sg_index = 0; stream->cur_ptr = NULL; if (retval >= 0) retval = stream->cumm_bytes; pr_debug("end of play/rec bytes = %d!!\n", retval); return retval; } /*** * intel_sst_write - This function is called when user tries to play out data * * @file_ptr:pointer to file * @buf:user buffer to be played out * @count:size of tthe buffer * @offset:offset to start from * * writes the encoded data into DSP */ int intel_sst_write(struct file *file_ptr, const char __user *buf, size_t count, loff_t *offset) { struct ioctl_pvt_data *data = file_ptr->private_data; int str_id = data->str_id; struct stream_info *stream = &sst_drv_ctx->streams[str_id]; pr_debug("called for %d\n", str_id); if (stream->status == STREAM_UN_INIT || stream->status == STREAM_DECODE) { return -EBADRQC; } return intel_sst_read_write(str_id, (char __user *)buf, count); } /* * intel_sst_aio_write - write buffers * * @kiocb:pointer to a structure containing file pointer * @iov:list of user buffer to be played out * @nr_segs:number of entries * @offset:offset to start from * * This function is called when user tries to play out multiple data buffers */ ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov, unsigned long nr_segs, loff_t offset) { int retval; struct ioctl_pvt_data *data = kiocb->ki_filp->private_data; int str_id = data->str_id; struct stream_info *stream; pr_debug("entry - %ld\n", nr_segs); if (is_sync_kiocb(kiocb) == false) return -EINVAL; pr_debug("called for str_id %d\n", str_id); retval = sst_validate_strid(str_id); if (retval) return -EINVAL; stream = &sst_drv_ctx->streams[str_id]; if (stream->mmapped == true) return -EIO; if (stream->status == STREAM_UN_INIT || stream->status == STREAM_DECODE) { return -EBADRQC; } stream->curr_bytes = 0; stream->cumm_bytes = 0; pr_debug("new segs %ld, offset %d, status %d\n" , nr_segs, (int) offset, (int) stream->status); stream->buf_type = SST_BUF_USER_STATIC; do { retval = snd_sst_userbufs_play_cap(iov, nr_segs, str_id, stream); if (retval < 0) break; } while (stream->sg_index < nr_segs); stream->sg_index = 0; stream->cur_ptr = NULL; if (retval >= 0) retval = stream->cumm_bytes; pr_debug("end of play/rec bytes = %d!!\n", retval); return retval; } /* * intel_sst_read - read the encoded data * * @file_ptr: pointer to file * @buf: user buffer to be filled with captured data * @count: size of tthe buffer * @offset: offset to start from * * This function is called when user tries to capture data */ int intel_sst_read(struct file *file_ptr, char __user *buf, size_t count, loff_t *offset) { struct ioctl_pvt_data *data = file_ptr->private_data; int str_id = data->str_id; struct stream_info *stream = &sst_drv_ctx->streams[str_id]; pr_debug("called for %d\n", str_id); if (stream->status == STREAM_UN_INIT || stream->status == STREAM_DECODE) return -EBADRQC; return intel_sst_read_write(str_id, buf, count); } /* * intel_sst_aio_read - aio read * * @kiocb: pointer to a structure containing file pointer * @iov: list of user buffer to be filled with captured * @nr_segs: number of entries * @offset: offset to start from * * This function is called when user tries to capture out multiple data buffers */ ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov, unsigned long nr_segs, loff_t offset) { int retval; struct ioctl_pvt_data *data = kiocb->ki_filp->private_data; int str_id = data->str_id; struct stream_info *stream; pr_debug("entry - %ld\n", nr_segs); if (is_sync_kiocb(kiocb) == false) { pr_debug("aio_read from user space is not allowed\n"); return -EINVAL; } pr_debug("called for str_id %d\n", str_id); retval = sst_validate_strid(str_id); if (retval) return -EINVAL; stream = &sst_drv_ctx->streams[str_id]; if (stream->mmapped == true) return -EIO; if (stream->status == STREAM_UN_INIT || stream->status == STREAM_DECODE) return -EBADRQC; stream->curr_bytes = 0; stream->cumm_bytes = 0; pr_debug("new segs %ld, offset %d, status %d\n" , nr_segs, (int) offset, (int) stream->status); stream->buf_type = SST_BUF_USER_STATIC; do { retval = snd_sst_userbufs_play_cap(iov, nr_segs, str_id, stream); if (retval < 0) break; } while (stream->sg_index < nr_segs); stream->sg_index = 0; stream->cur_ptr = NULL; if (retval >= 0) retval = stream->cumm_bytes; pr_debug("end of play/rec bytes = %d!!\n", retval); return retval; } /* sst_print_stream_params - prints the stream parameters (debug fn)*/ static void sst_print_stream_params(struct snd_sst_get_stream_params *get_prm) { pr_debug("codec params:result = %d\n", get_prm->codec_params.result); pr_debug("codec params:stream = %d\n", get_prm->codec_params.stream_id); pr_debug("codec params:codec = %d\n", get_prm->codec_params.codec); pr_debug("codec params:ops = %d\n", get_prm->codec_params.ops); pr_debug("codec params:stream_type = %d\n", get_prm->codec_params.stream_type); pr_debug("pcmparams:sfreq = %d\n", get_prm->pcm_params.sfreq); pr_debug("pcmparams:num_chan = %d\n", get_prm->pcm_params.num_chan); pr_debug("pcmparams:pcm_wd_sz = %d\n", get_prm->pcm_params.pcm_wd_sz); return; } /** * sst_create_algo_ipc - create ipc msg for algorithm parameters * * @algo_params: Algorithm parameters * @msg: post msg pointer * * This function is called to create ipc msg */ int sst_create_algo_ipc(struct snd_ppp_params *algo_params, struct ipc_post **msg) { if (sst_create_large_msg(msg)) return -ENOMEM; sst_fill_header(&(*msg)->header, IPC_IA_ALG_PARAMS, 1, algo_params->str_id); (*msg)->header.part.data = sizeof(u32) + sizeof(*algo_params) + algo_params->size; memcpy((*msg)->mailbox_data, &(*msg)->header, sizeof(u32)); memcpy((*msg)->mailbox_data + sizeof(u32), algo_params, sizeof(*algo_params)); return 0; } /** * sst_send_algo_ipc - send ipc msg for algorithm parameters * * @msg: post msg pointer * * This function is called to send ipc msg */ int sst_send_algo_ipc(struct ipc_post **msg) { sst_drv_ctx->ppp_params_blk.condition = false; sst_drv_ctx->ppp_params_blk.ret_code = 0; sst_drv_ctx->ppp_params_blk.on = true; sst_drv_ctx->ppp_params_blk.data = NULL; spin_lock(&sst_drv_ctx->list_spin_lock); list_add_tail(&(*msg)->node, &sst_drv_ctx->ipc_dispatch_list); spin_unlock(&sst_drv_ctx->list_spin_lock); sst_post_message(&sst_drv_ctx->ipc_post_msg_wq); return sst_wait_interruptible_timeout(sst_drv_ctx, &sst_drv_ctx->ppp_params_blk, SST_BLOCK_TIMEOUT); } /** * intel_sst_ioctl_dsp - receives the device ioctl's * * @cmd:Ioctl cmd * @arg:data * * This function is called when a user space component * sends a DSP Ioctl to SST driver */ long intel_sst_ioctl_dsp(unsigned int cmd, unsigned long arg) { int retval = 0; struct snd_ppp_params algo_params; struct snd_ppp_params *algo_params_copied; struct ipc_post *msg; switch (_IOC_NR(cmd)) { case _IOC_NR(SNDRV_SST_SET_ALGO): if (copy_from_user(&algo_params, (void __user *)arg, sizeof(algo_params))) return -EFAULT; if (algo_params.size > SST_MAILBOX_SIZE) return -EMSGSIZE; pr_debug("Algo ID %d Str id %d Enable %d Size %d\n", algo_params.algo_id, algo_params.str_id, algo_params.enable, algo_params.size); retval = sst_create_algo_ipc(&algo_params, &msg); if (retval) break; algo_params.reserved = 0; if (copy_from_user(msg->mailbox_data + sizeof(algo_params), algo_params.params, algo_params.size)) return -EFAULT; retval = sst_send_algo_ipc(&msg); if (retval) { pr_debug("Error in sst_set_algo = %d\n", retval); retval = -EIO; } break; case _IOC_NR(SNDRV_SST_GET_ALGO): if (copy_from_user(&algo_params, (void __user *)arg, sizeof(algo_params))) return -EFAULT; pr_debug("Algo ID %d Str id %d Enable %d Size %d\n", algo_params.algo_id, algo_params.str_id, algo_params.enable, algo_params.size); retval = sst_create_algo_ipc(&algo_params, &msg); if (retval) break; algo_params.reserved = 1; retval = sst_send_algo_ipc(&msg); if (retval) { pr_debug("Error in sst_get_algo = %d\n", retval); retval = -EIO; break; } algo_params_copied = (struct snd_ppp_params *) sst_drv_ctx->ppp_params_blk.data; if (algo_params_copied->size > algo_params.size) { pr_debug("mem insufficient to copy\n"); retval = -EMSGSIZE; goto free_mem; } else { char __user *tmp; if (copy_to_user(algo_params.params, algo_params_copied->params, algo_params_copied->size)) { retval = -EFAULT; goto free_mem; } tmp = (char __user *)arg + offsetof( struct snd_ppp_params, size); if (copy_to_user(tmp, &algo_params_copied->size, sizeof(__u32))) { retval = -EFAULT; goto free_mem; } } free_mem: kfree(algo_params_copied->params); kfree(algo_params_copied); break; } return retval; } int sst_ioctl_tuning_params(unsigned long arg) { struct snd_sst_tuning_params params; struct ipc_post *msg; if (copy_from_user(&params, (void __user *)arg, sizeof(params))) return -EFAULT; if (params.size > SST_MAILBOX_SIZE) return -ENOMEM; pr_debug("Parameter %d, Stream %d, Size %d\n", params.type, params.str_id, params.size); if (sst_create_large_msg(&msg)) return -ENOMEM; sst_fill_header(&msg->header, IPC_IA_TUNING_PARAMS, 1, params.str_id); msg->header.part.data = sizeof(u32) + sizeof(params) + params.size; memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32)); memcpy(msg->mailbox_data + sizeof(u32), &params, sizeof(params)); if (copy_from_user(msg->mailbox_data + sizeof(params), (void __user *)(unsigned long)params.addr, params.size)) { kfree(msg->mailbox_data); kfree(msg); return -EFAULT; } return sst_send_algo_ipc(&msg); } /** * intel_sst_ioctl - receives the device ioctl's * @file_ptr:pointer to file * @cmd:Ioctl cmd * @arg:data * * This function is called by OS when a user space component * sends an Ioctl to SST driver */ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) { int retval = 0; struct ioctl_pvt_data *data = NULL; int str_id = 0, minor = 0; data = file_ptr->private_data; if (data) { minor = 0; str_id = data->str_id; } else minor = 1; if (sst_drv_ctx->sst_state != SST_FW_RUNNING) return -EBUSY; switch (_IOC_NR(cmd)) { case _IOC_NR(SNDRV_SST_STREAM_PAUSE): pr_debug("IOCTL_PAUSE received for %d!\n", str_id); if (minor != STREAM_MODULE) { retval = -EBADRQC; break; } retval = sst_pause_stream(str_id); break; case _IOC_NR(SNDRV_SST_STREAM_RESUME): pr_debug("SNDRV_SST_IOCTL_RESUME received!\n"); if (minor != STREAM_MODULE) { retval = -EBADRQC; break; } retval = sst_resume_stream(str_id); break; case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): { struct snd_sst_params str_param; pr_debug("IOCTL_SET_PARAMS received!\n"); if (minor != STREAM_MODULE) { retval = -EBADRQC; break; } if (copy_from_user(&str_param, (void __user *)arg, sizeof(str_param))) { retval = -EFAULT; break; } if (!str_id) { retval = sst_get_stream(&str_param); if (retval > 0) { struct stream_info *str_info; char __user *dest; sst_drv_ctx->stream_cnt++; data->str_id = retval; str_info = &sst_drv_ctx->streams[retval]; str_info->src = SST_DRV; dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id); retval = copy_to_user(dest, &retval, sizeof(__u32)); if (retval) retval = -EFAULT; } else { if (retval == -SST_ERR_INVALID_PARAMS) retval = -EINVAL; } } else { pr_debug("SET_STREAM_PARAMS received!\n"); /* allocated set params only */ retval = sst_set_stream_param(str_id, &str_param); /* Block the call for reply */ if (!retval) { int sfreq = 0, word_size = 0, num_channel = 0; sfreq = str_param.sparams.uc.pcm_params.sfreq; word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz; num_channel = str_param.sparams.uc.pcm_params.num_chan; if (str_param.ops == STREAM_OPS_CAPTURE) { sst_drv_ctx->scard_ops->\ set_pcm_audio_params(sfreq, word_size, num_channel); } } } break; } case _IOC_NR(SNDRV_SST_SET_VOL): { struct snd_sst_vol set_vol; if (copy_from_user(&set_vol, (void __user *)arg, sizeof(set_vol))) { pr_debug("copy failed\n"); retval = -EFAULT; break; } pr_debug("SET_VOLUME received for %d!\n", set_vol.stream_id); if (minor == STREAM_MODULE && set_vol.stream_id == 0) { pr_debug("invalid operation!\n"); retval = -EPERM; break; } retval = sst_set_vol(&set_vol); break; } case _IOC_NR(SNDRV_SST_GET_VOL): { struct snd_sst_vol get_vol; if (copy_from_user(&get_vol, (void __user *)arg, sizeof(get_vol))) { retval = -EFAULT; break; } pr_debug("IOCTL_GET_VOLUME received for stream = %d!\n", get_vol.stream_id); if (minor == STREAM_MODULE && get_vol.stream_id == 0) { pr_debug("invalid operation!\n"); retval = -EPERM; break; } retval = sst_get_vol(&get_vol); if (retval) { retval = -EIO; break; } pr_debug("id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n", get_vol.stream_id, get_vol.volume, get_vol.ramp_duration, get_vol.ramp_type); if (copy_to_user((struct snd_sst_vol __user *)arg, &get_vol, sizeof(get_vol))) { retval = -EFAULT; break; } /*sst_print_get_vol_info(str_id, &get_vol);*/ break; } case _IOC_NR(SNDRV_SST_MUTE): { struct snd_sst_mute set_mute; if (copy_from_user(&set_mute, (void __user *)arg, sizeof(set_mute))) { retval = -EFAULT; break; } pr_debug("SNDRV_SST_SET_VOLUME received for %d!\n", set_mute.stream_id); if (minor == STREAM_MODULE && set_mute.stream_id == 0) { retval = -EPERM; break; } retval = sst_set_mute(&set_mute); break; } case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): { struct snd_sst_get_stream_params get_params; pr_debug("IOCTL_GET_PARAMS received!\n"); if (minor != 0) { retval = -EBADRQC; break; } retval = sst_get_stream_params(str_id, &get_params); if (retval) { retval = -EIO; break; } if (copy_to_user((struct snd_sst_get_stream_params __user *)arg, &get_params, sizeof(get_params))) { retval = -EFAULT; break; } sst_print_stream_params(&get_params); break; } case _IOC_NR(SNDRV_SST_MMAP_PLAY): case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): { struct snd_sst_mmap_buffs mmap_buf; pr_debug("SNDRV_SST_MMAP_PLAY/CAPTURE received!\n"); if (minor != STREAM_MODULE) { retval = -EBADRQC; break; } if (copy_from_user(&mmap_buf, (void __user *)arg, sizeof(mmap_buf))) { retval = -EFAULT; break; } retval = intel_sst_mmap_play_capture(str_id, &mmap_buf); break; } case _IOC_NR(SNDRV_SST_STREAM_DROP): pr_debug("SNDRV_SST_IOCTL_DROP received!\n"); if (minor != STREAM_MODULE) { retval = -EINVAL; break; } retval = sst_drop_stream(str_id); break; case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): { struct snd_sst_tstamp tstamp = {0}; unsigned long long time, freq, mod; pr_debug("SNDRV_SST_STREAM_GET_TSTAMP received!\n"); if (minor != STREAM_MODULE) { retval = -EBADRQC; break; } memcpy_fromio(&tstamp, sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp), sizeof(tstamp)); time = tstamp.samples_rendered; freq = (unsigned long long) tstamp.sampling_frequency; time = time * 1000; /* converting it to ms */ mod = do_div(time, freq); if (copy_to_user((void __user *)arg, &time, sizeof(unsigned long long))) retval = -EFAULT; break; } case _IOC_NR(SNDRV_SST_STREAM_START):{ struct stream_info *stream; pr_debug("SNDRV_SST_STREAM_START received!\n"); if (minor != STREAM_MODULE) { retval = -EINVAL; break; } retval = sst_validate_strid(str_id); if (retval) break; stream = &sst_drv_ctx->streams[str_id]; mutex_lock(&stream->lock); if (stream->status == STREAM_INIT && stream->need_draining != true) { stream->prev = stream->status; stream->status = STREAM_RUNNING; if (stream->ops == STREAM_OPS_PLAYBACK || stream->ops == STREAM_OPS_PLAYBACK_DRM) { retval = sst_play_frame(str_id); } else if (stream->ops == STREAM_OPS_CAPTURE) retval = sst_capture_frame(str_id); else { retval = -EINVAL; mutex_unlock(&stream->lock); break; } if (retval < 0) { stream->status = STREAM_INIT; mutex_unlock(&stream->lock); break; } } else { retval = -EINVAL; } mutex_unlock(&stream->lock); break; } case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): { struct snd_sst_target_device target_device; pr_debug("SET_TARGET_DEVICE received!\n"); if (copy_from_user(&target_device, (void __user *)arg, sizeof(target_device))) { retval = -EFAULT; break; } if (minor != AM_MODULE) { retval = -EBADRQC; break; } retval = sst_target_device_select(&target_device); break; } case _IOC_NR(SNDRV_SST_DRIVER_INFO): { struct snd_sst_driver_info info; pr_debug("SNDRV_SST_DRIVER_INFO received\n"); info.version = SST_VERSION_NUM; /* hard coding, shud get sumhow later */ info.active_pcm_streams = sst_drv_ctx->stream_cnt - sst_drv_ctx->encoded_cnt; info.active_enc_streams = sst_drv_ctx->encoded_cnt; info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM; info.max_enc_streams = MAX_ENC_STREAM; info.buf_per_stream = sst_drv_ctx->mmap_len; if (copy_to_user((void __user *)arg, &info, sizeof(info))) retval = -EFAULT; break; } case _IOC_NR(SNDRV_SST_STREAM_DECODE): { struct snd_sst_dbufs param; struct snd_sst_dbufs dbufs_local; struct snd_sst_buffs ibufs, obufs; struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp; char __user *dest; pr_debug("SNDRV_SST_STREAM_DECODE received\n"); if (minor != STREAM_MODULE) { retval = -EBADRQC; break; } if (copy_from_user(&param, (void __user *)arg, sizeof(param))) { retval = -EFAULT; break; } dbufs_local.input_bytes_consumed = param.input_bytes_consumed; dbufs_local.output_bytes_produced = param.output_bytes_produced; if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) { retval = -EFAULT; break; } if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) { retval = -EFAULT; break; } ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL); obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL); if (!ibuf_tmp || !obuf_tmp) { retval = -ENOMEM; goto free_iobufs; } if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry, ibufs.entries * sizeof(*ibuf_tmp))) { retval = -EFAULT; goto free_iobufs; } ibufs.buff_entry = ibuf_tmp; dbufs_local.ibufs = &ibufs; if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry, obufs.entries * sizeof(*obuf_tmp))) { retval = -EFAULT; goto free_iobufs; } obufs.buff_entry = obuf_tmp; dbufs_local.obufs = &obufs; retval = sst_decode(str_id, &dbufs_local); if (retval) { retval = -EAGAIN; goto free_iobufs; } dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed); if (copy_to_user(dest, &dbufs_local.input_bytes_consumed, sizeof(unsigned long long))) { retval = -EFAULT; goto free_iobufs; } dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed); if (copy_to_user(dest, &dbufs_local.output_bytes_produced, sizeof(unsigned long long))) { retval = -EFAULT; goto free_iobufs; } free_iobufs: kfree(ibuf_tmp); kfree(obuf_tmp); break; } case _IOC_NR(SNDRV_SST_STREAM_DRAIN): pr_debug("SNDRV_SST_STREAM_DRAIN received\n"); if (minor != STREAM_MODULE) { retval = -EINVAL; break; } retval = sst_drain_stream(str_id); break; case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): { unsigned long long __user *bytes = (unsigned long long __user *)arg; struct snd_sst_tstamp tstamp = {0}; pr_debug("STREAM_BYTES_DECODED received!\n"); if (minor != STREAM_MODULE) { retval = -EINVAL; break; } memcpy_fromio(&tstamp, sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp), sizeof(tstamp)); if (copy_to_user(bytes, &tstamp.bytes_processed, sizeof(*bytes))) retval = -EFAULT; break; } case _IOC_NR(SNDRV_SST_FW_INFO): { struct snd_sst_fw_info *fw_info; pr_debug("SNDRV_SST_FW_INFO received\n"); fw_info = kzalloc(sizeof(*fw_info), GFP_ATOMIC); if (!fw_info) { retval = -ENOMEM; break; } retval = sst_get_fw_info(fw_info); if (retval) { retval = -EIO; kfree(fw_info); break; } if (copy_to_user((struct snd_sst_dbufs __user *)arg, fw_info, sizeof(*fw_info))) { kfree(fw_info); retval = -EFAULT; break; } /*sst_print_fw_info(fw_info);*/ kfree(fw_info); break; } case _IOC_NR(SNDRV_SST_GET_ALGO): case _IOC_NR(SNDRV_SST_SET_ALGO): if (minor != AM_MODULE) { retval = -EBADRQC; break; } retval = intel_sst_ioctl_dsp(cmd, arg); break; case _IOC_NR(SNDRV_SST_TUNING_PARAMS): if (minor != AM_MODULE) { retval = -EBADRQC; break; } retval = sst_ioctl_tuning_params(arg); break; default: retval = -EINVAL; } pr_debug("intel_sst_ioctl:complete ret code = %d\n", retval); return retval; }
gpl-2.0
wbdub/kernel_asus_tf300t
drivers/staging/comedi/drivers/das1800.c
561
50176
/* comedi/drivers/das1800.c Driver for Keitley das1700/das1800 series boards Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ************************************************************************ */ /* Driver: das1800 Description: Keithley Metrabyte DAS1800 (& compatibles) Author: Frank Mori Hess <fmhess@users.sourceforge.net> Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st), DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao), DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da), DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da), DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st), DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc), DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st), DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr), DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc), DAS-1802AO (das-1802ao) Status: works The waveform analog output on the 'ao' cards is not supported. If you need it, send me (Frank Hess) an email. Configuration options: [0] - I/O port base address [1] - IRQ (optional, required for timed or externally triggered conversions) [2] - DMA0 (optional, requires irq) [3] - DMA1 (optional, requires irq and dma0) */ /* This driver supports the following Keithley boards: das-1701st das-1701st-da das-1701ao das-1702st das-1702st-da das-1702hr das-1702hr-da das-1702ao das-1801st das-1801st-da das-1801hc das-1801ao das-1802st das-1802st-da das-1802hr das-1802hr-da das-1802hc das-1802ao Options: [0] - base io address [1] - irq (optional, required for timed or externally triggered conversions) [2] - dma0 (optional, requires irq) [3] - dma1 (optional, requires irq and dma0) irq can be omitted, although the cmd interface will not work without it. analog input cmd triggers supported: start_src: TRIG_NOW | TRIG_EXT scan_begin_src: TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT scan_end_src: TRIG_COUNT convert_src: TRIG_TIMER | TRIG_EXT (TRIG_EXT requires scan_begin_src == TRIG_FOLLOW) stop_src: TRIG_COUNT | TRIG_EXT | TRIG_NONE scan_begin_src triggers TRIG_TIMER and TRIG_EXT use the card's 'burst mode' which limits the valid conversion time to 64 microseconds (convert_arg <= 64000). This limitation does not apply if scan_begin_src is TRIG_FOLLOW. NOTES: Only the DAS-1801ST has been tested by me. Unipolar and bipolar ranges cannot be mixed in the channel/gain list. TODO: Make it automatically allocate irq and dma channels if they are not specified Add support for analog out on 'ao' cards read insn for analog out */ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/io.h> #include "../comedidev.h" #include <linux/ioport.h> #include <asm/dma.h> #include "8253.h" #include "comedi_fc.h" /* misc. defines */ #define DAS1800_SIZE 16 /* uses 16 io addresses */ #define FIFO_SIZE 1024 /* 1024 sample fifo */ #define TIMER_BASE 200 /* 5 Mhz master clock */ #define UNIPOLAR 0x4 /* bit that determines whether input range is uni/bipolar */ #define DMA_BUF_SIZE 0x1ff00 /* size in bytes of dma buffers */ /* Registers for the das1800 */ #define DAS1800_FIFO 0x0 #define DAS1800_QRAM 0x0 #define DAS1800_DAC 0x0 #define DAS1800_SELECT 0x2 #define ADC 0x0 #define QRAM 0x1 #define DAC(a) (0x2 + a) #define DAS1800_DIGITAL 0x3 #define DAS1800_CONTROL_A 0x4 #define FFEN 0x1 #define CGEN 0x4 #define CGSL 0x8 #define TGEN 0x10 #define TGSL 0x20 #define ATEN 0x80 #define DAS1800_CONTROL_B 0x5 #define DMA_CH5 0x1 #define DMA_CH6 0x2 #define DMA_CH7 0x3 #define DMA_CH5_CH6 0x5 #define DMA_CH6_CH7 0x6 #define DMA_CH7_CH5 0x7 #define DMA_ENABLED 0x3 /* mask used to determine if dma is enabled */ #define DMA_DUAL 0x4 #define IRQ3 0x8 #define IRQ5 0x10 #define IRQ7 0x18 #define IRQ10 0x28 #define IRQ11 0x30 #define IRQ15 0x38 #define FIMD 0x40 #define DAS1800_CONTROL_C 0X6 #define IPCLK 0x1 #define XPCLK 0x3 #define BMDE 0x4 #define CMEN 0x8 #define UQEN 0x10 #define SD 0x40 #define UB 0x80 #define DAS1800_STATUS 0x7 /* bits that prevent interrupt status bits (and CVEN) from being cleared on write */ #define CLEAR_INTR_MASK (CVEN_MASK | 0x1f) #define INT 0x1 #define DMATC 0x2 #define CT0TC 0x8 #define OVF 0x10 #define FHF 0x20 #define FNE 0x40 #define CVEN_MASK 0x40 /* masks CVEN on write */ #define CVEN 0x80 #define DAS1800_BURST_LENGTH 0x8 #define DAS1800_BURST_RATE 0x9 #define DAS1800_QRAM_ADDRESS 0xa #define DAS1800_COUNTER 0xc #define IOBASE2 0x400 /* offset of additional ioports used on 'ao' cards */ enum { das1701st, das1701st_da, das1702st, das1702st_da, das1702hr, das1702hr_da, das1701ao, das1702ao, das1801st, das1801st_da, das1802st, das1802st_da, das1802hr, das1802hr_da, das1801hc, das1802hc, das1801ao, das1802ao }; static int das1800_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int das1800_detach(struct comedi_device *dev); static int das1800_probe(struct comedi_device *dev); static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static irqreturn_t das1800_interrupt(int irq, void *d); static int das1800_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s); static void das1800_ai_handler(struct comedi_device *dev); static void das1800_handle_dma(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int status); static void das1800_flush_dma(struct comedi_device *dev, struct comedi_subdevice *s); static void das1800_flush_dma_channel(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int channel, uint16_t *buffer); static void das1800_handle_fifo_half_full(struct comedi_device *dev, struct comedi_subdevice *s); static void das1800_handle_fifo_not_empty(struct comedi_device *dev, struct comedi_subdevice *s); static int das1800_ai_do_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int das1800_ai_do_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int das1800_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das1800_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das1800_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das1800_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int das1800_set_frequency(struct comedi_device *dev); static unsigned int burst_convert_arg(unsigned int convert_arg, int round_mode); static unsigned int suggest_transfer_size(struct comedi_cmd *cmd); /* analog input ranges */ static const struct comedi_lrange range_ai_das1801 = { 8, { RANGE(-5, 5), RANGE(-1, 1), RANGE(-0.1, 0.1), RANGE(-0.02, 0.02), RANGE(0, 5), RANGE(0, 1), RANGE(0, 0.1), RANGE(0, 0.02), } }; static const struct comedi_lrange range_ai_das1802 = { 8, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-2.5, 2.5), RANGE(-1.25, 1.25), RANGE(0, 10), RANGE(0, 5), RANGE(0, 2.5), RANGE(0, 1.25), } }; struct das1800_board { const char *name; int ai_speed; /* max conversion period in nanoseconds */ int resolution; /* bits of ai resolution */ int qram_len; /* length of card's channel / gain queue */ int common; /* supports AREF_COMMON flag */ int do_n_chan; /* number of digital output channels */ int ao_ability; /* 0 == no analog out, 1 == basic analog out, 2 == waveform analog out */ int ao_n_chan; /* number of analog out channels */ const struct comedi_lrange *range_ai; /* available input ranges */ }; /* Warning: the maximum conversion speeds listed below are * not always achievable depending on board setup (see * user manual.) */ static const struct das1800_board das1800_boards[] = { { .name = "das-1701st", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1801, }, { .name = "das-1701st-da", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 4, .range_ai = &range_ai_das1801, }, { .name = "das-1702st", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1802, }, { .name = "das-1702st-da", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 4, .range_ai = &range_ai_das1802, }, { .name = "das-1702hr", .ai_speed = 20000, .resolution = 16, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1802, }, { .name = "das-1702hr-da", .ai_speed = 20000, .resolution = 16, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, { .name = "das-1701ao", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 2, .ao_n_chan = 2, .range_ai = &range_ai_das1801, }, { .name = "das-1702ao", .ai_speed = 6250, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 2, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, { .name = "das-1801st", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1801, }, { .name = "das-1801st-da", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 4, .range_ai = &range_ai_das1801, }, { .name = "das-1802st", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1802, }, { .name = "das-1802st-da", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 4, .range_ai = &range_ai_das1802, }, { .name = "das-1802hr", .ai_speed = 10000, .resolution = 16, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 0, .ao_n_chan = 0, .range_ai = &range_ai_das1802, }, { .name = "das-1802hr-da", .ai_speed = 10000, .resolution = 16, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 1, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, { .name = "das-1801hc", .ai_speed = 3000, .resolution = 12, .qram_len = 64, .common = 0, .do_n_chan = 8, .ao_ability = 1, .ao_n_chan = 2, .range_ai = &range_ai_das1801, }, { .name = "das-1802hc", .ai_speed = 3000, .resolution = 12, .qram_len = 64, .common = 0, .do_n_chan = 8, .ao_ability = 1, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, { .name = "das-1801ao", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 2, .ao_n_chan = 2, .range_ai = &range_ai_das1801, }, { .name = "das-1802ao", .ai_speed = 3000, .resolution = 12, .qram_len = 256, .common = 1, .do_n_chan = 4, .ao_ability = 2, .ao_n_chan = 2, .range_ai = &range_ai_das1802, }, }; /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct das1800_board *)dev->board_ptr) struct das1800_private { volatile unsigned int count; /* number of data points left to be taken */ unsigned int divisor1; /* value to load into board's counter 1 for timed conversions */ unsigned int divisor2; /* value to load into board's counter 2 for timed conversions */ int do_bits; /* digital output bits */ int irq_dma_bits; /* bits for control register b */ /* dma bits for control register b, stored so that dma can be * turned on and off */ int dma_bits; unsigned int dma0; /* dma channels used */ unsigned int dma1; volatile unsigned int dma_current; /* dma channel currently in use */ uint16_t *ai_buf0; /* pointers to dma buffers */ uint16_t *ai_buf1; uint16_t *dma_current_buf; /* pointer to dma buffer currently being used */ unsigned int dma_transfer_size; /* size of transfer currently used, in bytes */ unsigned long iobase2; /* secondary io address used for analog out on 'ao' boards */ short ao_update_bits; /* remembers the last write to the 'update' dac */ }; #define devpriv ((struct das1800_private *)dev->private) /* analog out range for boards with basic analog out */ static const struct comedi_lrange range_ao_1 = { 1, { RANGE(-10, 10), } }; /* analog out range for 'ao' boards */ /* static const struct comedi_lrange range_ao_2 = { 2, { RANGE(-10, 10), RANGE(-5, 5), } }; */ static struct comedi_driver driver_das1800 = { .driver_name = "das1800", .module = THIS_MODULE, .attach = das1800_attach, .detach = das1800_detach, .num_names = ARRAY_SIZE(das1800_boards), .board_name = &das1800_boards[0].name, .offset = sizeof(struct das1800_board), }; /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ static int __init driver_das1800_init_module(void) { return comedi_driver_register(&driver_das1800); } static void __exit driver_das1800_cleanup_module(void) { comedi_driver_unregister(&driver_das1800); } module_init(driver_das1800_init_module); module_exit(driver_das1800_cleanup_module); static int das1800_init_dma(struct comedi_device *dev, unsigned int dma0, unsigned int dma1) { unsigned long flags; /* need an irq to do dma */ if (dev->irq && dma0) { /* encode dma0 and dma1 into 2 digit hexadecimal for switch */ switch ((dma0 & 0x7) | (dma1 << 4)) { case 0x5: /* dma0 == 5 */ devpriv->dma_bits |= DMA_CH5; break; case 0x6: /* dma0 == 6 */ devpriv->dma_bits |= DMA_CH6; break; case 0x7: /* dma0 == 7 */ devpriv->dma_bits |= DMA_CH7; break; case 0x65: /* dma0 == 5, dma1 == 6 */ devpriv->dma_bits |= DMA_CH5_CH6; break; case 0x76: /* dma0 == 6, dma1 == 7 */ devpriv->dma_bits |= DMA_CH6_CH7; break; case 0x57: /* dma0 == 7, dma1 == 5 */ devpriv->dma_bits |= DMA_CH7_CH5; break; default: printk(" only supports dma channels 5 through 7\n" " Dual dma only allows the following combinations:\n" " dma 5,6 / 6,7 / or 7,5\n"); return -EINVAL; break; } if (request_dma(dma0, driver_das1800.driver_name)) { printk(" failed to allocate dma channel %i\n", dma0); return -EINVAL; } devpriv->dma0 = dma0; devpriv->dma_current = dma0; if (dma1) { if (request_dma(dma1, driver_das1800.driver_name)) { printk(" failed to allocate dma channel %i\n", dma1); return -EINVAL; } devpriv->dma1 = dma1; } devpriv->ai_buf0 = kmalloc(DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA); if (devpriv->ai_buf0 == NULL) return -ENOMEM; devpriv->dma_current_buf = devpriv->ai_buf0; if (dma1) { devpriv->ai_buf1 = kmalloc(DMA_BUF_SIZE, GFP_KERNEL | GFP_DMA); if (devpriv->ai_buf1 == NULL) return -ENOMEM; } flags = claim_dma_lock(); disable_dma(devpriv->dma0); set_dma_mode(devpriv->dma0, DMA_MODE_READ); if (dma1) { disable_dma(devpriv->dma1); set_dma_mode(devpriv->dma1, DMA_MODE_READ); } release_dma_lock(flags); } return 0; } static int das1800_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned long iobase = it->options[0]; unsigned int irq = it->options[1]; unsigned int dma0 = it->options[2]; unsigned int dma1 = it->options[3]; unsigned long iobase2; int board; int retval; /* allocate and initialize dev->private */ if (alloc_private(dev, sizeof(struct das1800_private)) < 0) return -ENOMEM; printk("comedi%d: %s: io 0x%lx", dev->minor, driver_das1800.driver_name, iobase); if (irq) { printk(", irq %u", irq); if (dma0) { printk(", dma %u", dma0); if (dma1) printk(" and %u", dma1); } } printk("\n"); if (iobase == 0) { printk(" io base address required\n"); return -EINVAL; } /* check if io addresses are available */ if (!request_region(iobase, DAS1800_SIZE, driver_das1800.driver_name)) { printk (" I/O port conflict: failed to allocate ports 0x%lx to 0x%lx\n", iobase, iobase + DAS1800_SIZE - 1); return -EIO; } dev->iobase = iobase; board = das1800_probe(dev); if (board < 0) { printk(" unable to determine board type\n"); return -ENODEV; } dev->board_ptr = das1800_boards + board; dev->board_name = thisboard->name; /* if it is an 'ao' board with fancy analog out then we need extra io ports */ if (thisboard->ao_ability == 2) { iobase2 = iobase + IOBASE2; if (!request_region(iobase2, DAS1800_SIZE, driver_das1800.driver_name)) { printk (" I/O port conflict: failed to allocate ports 0x%lx to 0x%lx\n", iobase2, iobase2 + DAS1800_SIZE - 1); return -EIO; } devpriv->iobase2 = iobase2; } /* grab our IRQ */ if (irq) { if (request_irq(irq, das1800_interrupt, 0, driver_das1800.driver_name, dev)) { printk(" unable to allocate irq %u\n", irq); return -EINVAL; } } dev->irq = irq; /* set bits that tell card which irq to use */ switch (irq) { case 0: break; case 3: devpriv->irq_dma_bits |= 0x8; break; case 5: devpriv->irq_dma_bits |= 0x10; break; case 7: devpriv->irq_dma_bits |= 0x18; break; case 10: devpriv->irq_dma_bits |= 0x28; break; case 11: devpriv->irq_dma_bits |= 0x30; break; case 15: devpriv->irq_dma_bits |= 0x38; break; default: printk(" irq out of range\n"); return -EINVAL; break; } retval = das1800_init_dma(dev, dma0, dma1); if (retval < 0) return retval; if (devpriv->ai_buf0 == NULL) { devpriv->ai_buf0 = kmalloc(FIFO_SIZE * sizeof(uint16_t), GFP_KERNEL); if (devpriv->ai_buf0 == NULL) return -ENOMEM; } if (alloc_subdevices(dev, 4) < 0) return -ENOMEM; /* analog input subdevice */ s = dev->subdevices + 0; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND | SDF_CMD_READ; if (thisboard->common) s->subdev_flags |= SDF_COMMON; s->n_chan = thisboard->qram_len; s->len_chanlist = thisboard->qram_len; s->maxdata = (1 << thisboard->resolution) - 1; s->range_table = thisboard->range_ai; s->do_cmd = das1800_ai_do_cmd; s->do_cmdtest = das1800_ai_do_cmdtest; s->insn_read = das1800_ai_rinsn; s->poll = das1800_ai_poll; s->cancel = das1800_cancel; /* analog out */ s = dev->subdevices + 1; if (thisboard->ao_ability == 1) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = thisboard->ao_n_chan; s->maxdata = (1 << thisboard->resolution) - 1; s->range_table = &range_ao_1; s->insn_write = das1800_ao_winsn; } else { s->type = COMEDI_SUBD_UNUSED; } /* di */ s = dev->subdevices + 2; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 4; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das1800_di_rbits; /* do */ s = dev->subdevices + 3; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE | SDF_READABLE; s->n_chan = thisboard->do_n_chan; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = das1800_do_wbits; das1800_cancel(dev, dev->read_subdev); /* initialize digital out channels */ outb(devpriv->do_bits, dev->iobase + DAS1800_DIGITAL); /* initialize analog out channels */ if (thisboard->ao_ability == 1) { /* select 'update' dac channel for baseAddress + 0x0 */ outb(DAC(thisboard->ao_n_chan - 1), dev->iobase + DAS1800_SELECT); outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC); } return 0; }; static int das1800_detach(struct comedi_device *dev) { /* only free stuff if it has been allocated by _attach */ if (dev->iobase) release_region(dev->iobase, DAS1800_SIZE); if (dev->irq) free_irq(dev->irq, dev); if (dev->private) { if (devpriv->iobase2) release_region(devpriv->iobase2, DAS1800_SIZE); if (devpriv->dma0) free_dma(devpriv->dma0); if (devpriv->dma1) free_dma(devpriv->dma1); kfree(devpriv->ai_buf0); kfree(devpriv->ai_buf1); } printk("comedi%d: %s: remove\n", dev->minor, driver_das1800.driver_name); return 0; }; /* probes and checks das-1800 series board type */ static int das1800_probe(struct comedi_device *dev) { int id; int board; id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf; /* get id bits */ board = ((struct das1800_board *)dev->board_ptr) - das1800_boards; switch (id) { case 0x3: if (board == das1801st_da || board == das1802st_da || board == das1701st_da || board == das1702st_da) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1800st-da series\n"); return das1801st; break; case 0x4: if (board == das1802hr_da || board == das1702hr_da) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1802hr-da\n"); return das1802hr; break; case 0x5: if (board == das1801ao || board == das1802ao || board == das1701ao || board == das1702ao) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1800ao series\n"); return das1801ao; break; case 0x6: if (board == das1802hr || board == das1702hr) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk(" Board model (probed, not recommended): das-1802hr\n"); return das1802hr; break; case 0x7: if (board == das1801st || board == das1802st || board == das1701st || board == das1702st) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1800st series\n"); return das1801st; break; case 0x8: if (board == das1801hc || board == das1802hc) { printk(" Board model: %s\n", das1800_boards[board].name); return board; } printk (" Board model (probed, not recommended): das-1800hc series\n"); return das1801hc; break; default: printk (" Board model: probe returned 0x%x (unknown, please report)\n", id); return board; break; } return -1; } static int das1800_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; /* prevent race with interrupt handler */ spin_lock_irqsave(&dev->spinlock, flags); das1800_ai_handler(dev); spin_unlock_irqrestore(&dev->spinlock, flags); return s->async->buf_write_count - s->async->buf_read_count; } static irqreturn_t das1800_interrupt(int irq, void *d) { struct comedi_device *dev = d; unsigned int status; if (dev->attached == 0) { comedi_error(dev, "premature interrupt"); return IRQ_HANDLED; } /* Prevent race with das1800_ai_poll() on multi processor systems. * Also protects indirect addressing in das1800_ai_handler */ spin_lock(&dev->spinlock); status = inb(dev->iobase + DAS1800_STATUS); /* if interrupt was not caused by das-1800 */ if (!(status & INT)) { spin_unlock(&dev->spinlock); return IRQ_NONE; } /* clear the interrupt status bit INT */ outb(CLEAR_INTR_MASK & ~INT, dev->iobase + DAS1800_STATUS); /* handle interrupt */ das1800_ai_handler(dev); spin_unlock(&dev->spinlock); return IRQ_HANDLED; } /* the guts of the interrupt handler, that is shared with das1800_ai_poll */ static void das1800_ai_handler(struct comedi_device *dev) { struct comedi_subdevice *s = dev->subdevices + 0; /* analog input subdevice */ struct comedi_async *async = s->async; struct comedi_cmd *cmd = &async->cmd; unsigned int status = inb(dev->iobase + DAS1800_STATUS); async->events = 0; /* select adc for base address + 0 */ outb(ADC, dev->iobase + DAS1800_SELECT); /* dma buffer full */ if (devpriv->irq_dma_bits & DMA_ENABLED) { /* look for data from dma transfer even if dma terminal count hasn't happened yet */ das1800_handle_dma(dev, s, status); } else if (status & FHF) { /* if fifo half full */ das1800_handle_fifo_half_full(dev, s); } else if (status & FNE) { /* if fifo not empty */ das1800_handle_fifo_not_empty(dev, s); } async->events |= COMEDI_CB_BLOCK; /* if the card's fifo has overflowed */ if (status & OVF) { /* clear OVF interrupt bit */ outb(CLEAR_INTR_MASK & ~OVF, dev->iobase + DAS1800_STATUS); comedi_error(dev, "DAS1800 FIFO overflow"); das1800_cancel(dev, s); async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; comedi_event(dev, s); return; } /* stop taking data if appropriate */ /* stop_src TRIG_EXT */ if (status & CT0TC) { /* clear CT0TC interrupt bit */ outb(CLEAR_INTR_MASK & ~CT0TC, dev->iobase + DAS1800_STATUS); /* make sure we get all remaining data from board before quitting */ if (devpriv->irq_dma_bits & DMA_ENABLED) das1800_flush_dma(dev, s); else das1800_handle_fifo_not_empty(dev, s); das1800_cancel(dev, s); /* disable hardware conversions */ async->events |= COMEDI_CB_EOA; } else if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0) { /* stop_src TRIG_COUNT */ das1800_cancel(dev, s); /* disable hardware conversions */ async->events |= COMEDI_CB_EOA; } comedi_event(dev, s); return; } static void das1800_handle_dma(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int status) { unsigned long flags; const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL; flags = claim_dma_lock(); das1800_flush_dma_channel(dev, s, devpriv->dma_current, devpriv->dma_current_buf); /* re-enable dma channel */ set_dma_addr(devpriv->dma_current, virt_to_bus(devpriv->dma_current_buf)); set_dma_count(devpriv->dma_current, devpriv->dma_transfer_size); enable_dma(devpriv->dma_current); release_dma_lock(flags); if (status & DMATC) { /* clear DMATC interrupt bit */ outb(CLEAR_INTR_MASK & ~DMATC, dev->iobase + DAS1800_STATUS); /* switch dma channels for next time, if appropriate */ if (dual_dma) { /* read data from the other channel next time */ if (devpriv->dma_current == devpriv->dma0) { devpriv->dma_current = devpriv->dma1; devpriv->dma_current_buf = devpriv->ai_buf1; } else { devpriv->dma_current = devpriv->dma0; devpriv->dma_current_buf = devpriv->ai_buf0; } } } return; } static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev, uint16_t sample) { sample += 1 << (thisboard->resolution - 1); return sample; } static void munge_data(struct comedi_device *dev, uint16_t * array, unsigned int num_elements) { unsigned int i; int unipolar; /* see if card is using a unipolar or bipolar range so we can munge data correctly */ unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB; /* convert to unsigned type if we are in a bipolar mode */ if (!unipolar) { for (i = 0; i < num_elements; i++) array[i] = munge_bipolar_sample(dev, array[i]); } } /* Utility function used by das1800_flush_dma() and das1800_handle_dma(). * Assumes dma lock is held */ static void das1800_flush_dma_channel(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int channel, uint16_t *buffer) { unsigned int num_bytes, num_samples; struct comedi_cmd *cmd = &s->async->cmd; disable_dma(channel); /* clear flip-flop to make sure 2-byte registers * get set correctly */ clear_dma_ff(channel); /* figure out how many points to read */ num_bytes = devpriv->dma_transfer_size - get_dma_residue(channel); num_samples = num_bytes / sizeof(short); /* if we only need some of the points */ if (cmd->stop_src == TRIG_COUNT && devpriv->count < num_samples) num_samples = devpriv->count; munge_data(dev, buffer, num_samples); cfc_write_array_to_buffer(s, buffer, num_bytes); if (s->async->cmd.stop_src == TRIG_COUNT) devpriv->count -= num_samples; return; } /* flushes remaining data from board when external trigger has stopped acquisition * and we are using dma transfers */ static void das1800_flush_dma(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long flags; const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL; flags = claim_dma_lock(); das1800_flush_dma_channel(dev, s, devpriv->dma_current, devpriv->dma_current_buf); if (dual_dma) { /* switch to other channel and flush it */ if (devpriv->dma_current == devpriv->dma0) { devpriv->dma_current = devpriv->dma1; devpriv->dma_current_buf = devpriv->ai_buf1; } else { devpriv->dma_current = devpriv->dma0; devpriv->dma_current_buf = devpriv->ai_buf0; } das1800_flush_dma_channel(dev, s, devpriv->dma_current, devpriv->dma_current_buf); } release_dma_lock(flags); /* get any remaining samples in fifo */ das1800_handle_fifo_not_empty(dev, s); return; } static void das1800_handle_fifo_half_full(struct comedi_device *dev, struct comedi_subdevice *s) { int numPoints = 0; /* number of points to read */ struct comedi_cmd *cmd = &s->async->cmd; numPoints = FIFO_SIZE / 2; /* if we only need some of the points */ if (cmd->stop_src == TRIG_COUNT && devpriv->count < numPoints) numPoints = devpriv->count; insw(dev->iobase + DAS1800_FIFO, devpriv->ai_buf0, numPoints); munge_data(dev, devpriv->ai_buf0, numPoints); cfc_write_array_to_buffer(s, devpriv->ai_buf0, numPoints * sizeof(devpriv->ai_buf0[0])); if (cmd->stop_src == TRIG_COUNT) devpriv->count -= numPoints; return; } static void das1800_handle_fifo_not_empty(struct comedi_device *dev, struct comedi_subdevice *s) { short dpnt; int unipolar; struct comedi_cmd *cmd = &s->async->cmd; unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB; while (inb(dev->iobase + DAS1800_STATUS) & FNE) { if (cmd->stop_src == TRIG_COUNT && devpriv->count == 0) break; dpnt = inw(dev->iobase + DAS1800_FIFO); /* convert to unsigned type if we are in a bipolar mode */ if (!unipolar) ; dpnt = munge_bipolar_sample(dev, dpnt); cfc_write_to_buffer(s, dpnt); if (cmd->stop_src == TRIG_COUNT) devpriv->count--; } return; } static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */ outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */ outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */ if (devpriv->dma0) disable_dma(devpriv->dma0); if (devpriv->dma1) disable_dma(devpriv->dma1); return 0; } /* test analog input cmd */ static int das1800_ai_do_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; unsigned int tmp_arg; int i; int unipolar; /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW | TRIG_EXT; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER | TRIG_EXT; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_EXT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually compatible */ /* uniqueness check */ if (cmd->start_src != TRIG_NOW && cmd->start_src != TRIG_EXT) err++; if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_EXT) err++; /* compatibility check */ if (cmd->scan_begin_src != TRIG_FOLLOW && cmd->convert_src != TRIG_TIMER) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg < thisboard->ai_speed) { cmd->convert_arg = thisboard->ai_speed; err++; } } if (!cmd->chanlist_len) { cmd->chanlist_len = 1; err++; } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } switch (cmd->stop_src) { case TRIG_COUNT: if (!cmd->stop_arg) { cmd->stop_arg = 1; err++; } break; case TRIG_NONE: if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } break; default: break; } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->convert_src == TRIG_TIMER) { /* if we are not in burst mode */ if (cmd->scan_begin_src == TRIG_FOLLOW) { tmp_arg = cmd->convert_arg; /* calculate counter values that give desired timing */ i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd->convert_arg), cmd-> flags & TRIG_ROUND_MASK); if (tmp_arg != cmd->convert_arg) err++; } /* if we are in burst mode */ else { /* check that convert_arg is compatible */ tmp_arg = cmd->convert_arg; cmd->convert_arg = burst_convert_arg(cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp_arg != cmd->convert_arg) err++; if (cmd->scan_begin_src == TRIG_TIMER) { /* if scans are timed faster than conversion rate allows */ if (cmd->convert_arg * cmd->chanlist_len > cmd->scan_begin_arg) { cmd->scan_begin_arg = cmd->convert_arg * cmd->chanlist_len; err++; } tmp_arg = cmd->scan_begin_arg; /* calculate counter values that give desired timing */ i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv-> divisor1), &(devpriv-> divisor2), &(cmd-> scan_begin_arg), cmd-> flags & TRIG_ROUND_MASK); if (tmp_arg != cmd->scan_begin_arg) err++; } } } if (err) return 4; /* make sure user is not trying to mix unipolar and bipolar ranges */ if (cmd->chanlist) { unipolar = CR_RANGE(cmd->chanlist[0]) & UNIPOLAR; for (i = 1; i < cmd->chanlist_len; i++) { if (unipolar != (CR_RANGE(cmd->chanlist[i]) & UNIPOLAR)) { comedi_error(dev, "unipolar and bipolar ranges cannot be mixed in the chanlist"); err++; break; } } } if (err) return 5; return 0; } /* analog input cmd interface */ /* first, some utility functions used in the main ai_do_cmd() */ /* returns appropriate bits for control register a, depending on command */ static int control_a_bits(struct comedi_cmd cmd) { int control_a; control_a = FFEN; /* enable fifo */ if (cmd.stop_src == TRIG_EXT) control_a |= ATEN; switch (cmd.start_src) { case TRIG_EXT: control_a |= TGEN | CGSL; break; case TRIG_NOW: control_a |= CGEN; break; default: break; } return control_a; } /* returns appropriate bits for control register c, depending on command */ static int control_c_bits(struct comedi_cmd cmd) { int control_c; int aref; /* set clock source to internal or external, select analog reference, * select unipolar / bipolar */ aref = CR_AREF(cmd.chanlist[0]); control_c = UQEN; /* enable upper qram addresses */ if (aref != AREF_DIFF) control_c |= SD; if (aref == AREF_COMMON) control_c |= CMEN; /* if a unipolar range was selected */ if (CR_RANGE(cmd.chanlist[0]) & UNIPOLAR) control_c |= UB; switch (cmd.scan_begin_src) { case TRIG_FOLLOW: /* not in burst mode */ switch (cmd.convert_src) { case TRIG_TIMER: /* trig on cascaded counters */ control_c |= IPCLK; break; case TRIG_EXT: /* trig on falling edge of external trigger */ control_c |= XPCLK; break; default: break; } break; case TRIG_TIMER: /* burst mode with internal pacer clock */ control_c |= BMDE | IPCLK; break; case TRIG_EXT: /* burst mode with external trigger */ control_c |= BMDE | XPCLK; break; default: break; } return control_c; } /* sets up counters */ static int setup_counters(struct comedi_device *dev, struct comedi_cmd cmd) { /* setup cascaded counters for conversion/scan frequency */ switch (cmd.scan_begin_src) { case TRIG_FOLLOW: /* not in burst mode */ if (cmd.convert_src == TRIG_TIMER) { /* set conversion frequency */ i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd.convert_arg), cmd. flags & TRIG_ROUND_MASK); if (das1800_set_frequency(dev) < 0) return -1; } break; case TRIG_TIMER: /* in burst mode */ /* set scan frequency */ i8253_cascade_ns_to_timer_2div(TIMER_BASE, &(devpriv->divisor1), &(devpriv->divisor2), &(cmd.scan_begin_arg), cmd.flags & TRIG_ROUND_MASK); if (das1800_set_frequency(dev) < 0) return -1; break; default: break; } /* setup counter 0 for 'about triggering' */ if (cmd.stop_src == TRIG_EXT) { /* load counter 0 in mode 0 */ i8254_load(dev->iobase + DAS1800_COUNTER, 0, 0, 1, 0); } return 0; } /* sets up dma */ static void setup_dma(struct comedi_device *dev, struct comedi_cmd cmd) { unsigned long lock_flags; const int dual_dma = devpriv->irq_dma_bits & DMA_DUAL; if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0) return; /* determine a reasonable dma transfer size */ devpriv->dma_transfer_size = suggest_transfer_size(&cmd); lock_flags = claim_dma_lock(); disable_dma(devpriv->dma0); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma0); set_dma_addr(devpriv->dma0, virt_to_bus(devpriv->ai_buf0)); /* set appropriate size of transfer */ set_dma_count(devpriv->dma0, devpriv->dma_transfer_size); devpriv->dma_current = devpriv->dma0; devpriv->dma_current_buf = devpriv->ai_buf0; enable_dma(devpriv->dma0); /* set up dual dma if appropriate */ if (dual_dma) { disable_dma(devpriv->dma1); /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma1); set_dma_addr(devpriv->dma1, virt_to_bus(devpriv->ai_buf1)); /* set appropriate size of transfer */ set_dma_count(devpriv->dma1, devpriv->dma_transfer_size); enable_dma(devpriv->dma1); } release_dma_lock(lock_flags); return; } /* programs channel/gain list into card */ static void program_chanlist(struct comedi_device *dev, struct comedi_cmd cmd) { int i, n, chan_range; unsigned long irq_flags; const int range_mask = 0x3; /* masks unipolar/bipolar bit off range */ const int range_bitshift = 8; n = cmd.chanlist_len; /* spinlock protects indirect addressing */ spin_lock_irqsave(&dev->spinlock, irq_flags); outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */ outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*set QRAM address start */ /* make channel / gain list */ for (i = 0; i < n; i++) { chan_range = CR_CHAN(cmd. chanlist[i]) | ((CR_RANGE(cmd.chanlist[i]) & range_mask) << range_bitshift); outw(chan_range, dev->iobase + DAS1800_QRAM); } outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */ spin_unlock_irqrestore(&dev->spinlock, irq_flags); return; } /* analog input do_cmd */ static int das1800_ai_do_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int ret; int control_a, control_c; struct comedi_async *async = s->async; struct comedi_cmd cmd = async->cmd; if (!dev->irq) { comedi_error(dev, "no irq assigned for das-1800, cannot do hardware conversions"); return -1; } /* disable dma on TRIG_WAKE_EOS, or TRIG_RT * (because dma in handler is unsafe at hard real-time priority) */ if (cmd.flags & (TRIG_WAKE_EOS | TRIG_RT)) devpriv->irq_dma_bits &= ~DMA_ENABLED; else devpriv->irq_dma_bits |= devpriv->dma_bits; /* interrupt on end of conversion for TRIG_WAKE_EOS */ if (cmd.flags & TRIG_WAKE_EOS) { /* interrupt fifo not empty */ devpriv->irq_dma_bits &= ~FIMD; } else { /* interrupt fifo half full */ devpriv->irq_dma_bits |= FIMD; } /* determine how many conversions we need */ if (cmd.stop_src == TRIG_COUNT) devpriv->count = cmd.stop_arg * cmd.chanlist_len; das1800_cancel(dev, s); /* determine proper bits for control registers */ control_a = control_a_bits(cmd); control_c = control_c_bits(cmd); /* setup card and start */ program_chanlist(dev, cmd); ret = setup_counters(dev, cmd); if (ret < 0) { comedi_error(dev, "Error setting up counters"); return ret; } setup_dma(dev, cmd); outb(control_c, dev->iobase + DAS1800_CONTROL_C); /* set conversion rate and length for burst mode */ if (control_c & BMDE) { /* program conversion period with number of microseconds minus 1 */ outb(cmd.convert_arg / 1000 - 1, dev->iobase + DAS1800_BURST_RATE); outb(cmd.chanlist_len - 1, dev->iobase + DAS1800_BURST_LENGTH); } outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B); /* enable irq/dma */ outb(control_a, dev->iobase + DAS1800_CONTROL_A); /* enable fifo and triggering */ outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */ return 0; } /* read analog input */ static int das1800_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i, n; int chan, range, aref, chan_range; int timeout = 1000; short dpnt; int conv_flags = 0; unsigned long irq_flags; /* set up analog reference and unipolar / bipolar mode */ aref = CR_AREF(insn->chanspec); conv_flags |= UQEN; if (aref != AREF_DIFF) conv_flags |= SD; if (aref == AREF_COMMON) conv_flags |= CMEN; /* if a unipolar range was selected */ if (CR_RANGE(insn->chanspec) & UNIPOLAR) conv_flags |= UB; outb(conv_flags, dev->iobase + DAS1800_CONTROL_C); /* software conversion enabled */ outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */ outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* reset fifo */ outb(FFEN, dev->iobase + DAS1800_CONTROL_A); chan = CR_CHAN(insn->chanspec); /* mask of unipolar/bipolar bit from range */ range = CR_RANGE(insn->chanspec) & 0x3; chan_range = chan | (range << 8); spin_lock_irqsave(&dev->spinlock, irq_flags); outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */ outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /* set QRAM address start */ outw(chan_range, dev->iobase + DAS1800_QRAM); outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */ outb(ADC, dev->iobase + DAS1800_SELECT); /* select ADC for baseAddress + 0x0 */ for (n = 0; n < insn->n; n++) { /* trigger conversion */ outb(0, dev->iobase + DAS1800_FIFO); for (i = 0; i < timeout; i++) { if (inb(dev->iobase + DAS1800_STATUS) & FNE) break; } if (i == timeout) { comedi_error(dev, "timeout"); n = -ETIME; goto exit; } dpnt = inw(dev->iobase + DAS1800_FIFO); /* shift data to offset binary for bipolar ranges */ if ((conv_flags & UB) == 0) dpnt += 1 << (thisboard->resolution - 1); data[n] = dpnt; } exit: spin_unlock_irqrestore(&dev->spinlock, irq_flags); return n; } /* writes to an analog output channel */ static int das1800_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); /* int range = CR_RANGE(insn->chanspec); */ int update_chan = thisboard->ao_n_chan - 1; short output; unsigned long irq_flags; /* card expects two's complement data */ output = data[0] - (1 << (thisboard->resolution - 1)); /* if the write is to the 'update' channel, we need to remember its value */ if (chan == update_chan) devpriv->ao_update_bits = output; /* write to channel */ spin_lock_irqsave(&dev->spinlock, irq_flags); outb(DAC(chan), dev->iobase + DAS1800_SELECT); /* select dac channel for baseAddress + 0x0 */ outw(output, dev->iobase + DAS1800_DAC); /* now we need to write to 'update' channel to update all dac channels */ if (chan != update_chan) { outb(DAC(update_chan), dev->iobase + DAS1800_SELECT); /* select 'update' channel for baseAddress + 0x0 */ outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC); } spin_unlock_irqrestore(&dev->spinlock, irq_flags); return 1; } /* reads from digital input channels */ static int das1800_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { data[1] = inb(dev->iobase + DAS1800_DIGITAL) & 0xf; data[0] = 0; return 2; } /* writes to digital output channels */ static int das1800_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int wbits; /* only set bits that have been masked */ data[0] &= (1 << s->n_chan) - 1; wbits = devpriv->do_bits; wbits &= ~data[0]; wbits |= data[0] & data[1]; devpriv->do_bits = wbits; outb(devpriv->do_bits, dev->iobase + DAS1800_DIGITAL); data[1] = devpriv->do_bits; return 2; } /* loads counters with divisor1, divisor2 from private structure */ static int das1800_set_frequency(struct comedi_device *dev) { int err = 0; /* counter 1, mode 2 */ if (i8254_load(dev->iobase + DAS1800_COUNTER, 0, 1, devpriv->divisor1, 2)) err++; /* counter 2, mode 2 */ if (i8254_load(dev->iobase + DAS1800_COUNTER, 0, 2, devpriv->divisor2, 2)) err++; if (err) return -1; return 0; } /* converts requested conversion timing to timing compatible with * hardware, used only when card is in 'burst mode' */ static unsigned int burst_convert_arg(unsigned int convert_arg, int round_mode) { unsigned int micro_sec; /* in burst mode, the maximum conversion time is 64 microseconds */ if (convert_arg > 64000) convert_arg = 64000; /* the conversion time must be an integral number of microseconds */ switch (round_mode) { case TRIG_ROUND_NEAREST: default: micro_sec = (convert_arg + 500) / 1000; break; case TRIG_ROUND_DOWN: micro_sec = convert_arg / 1000; break; case TRIG_ROUND_UP: micro_sec = (convert_arg - 1) / 1000 + 1; break; } /* return number of nanoseconds */ return micro_sec * 1000; } /* utility function that suggests a dma transfer size based on the conversion period 'ns' */ static unsigned int suggest_transfer_size(struct comedi_cmd *cmd) { unsigned int size = DMA_BUF_SIZE; static const int sample_size = 2; /* size in bytes of one sample from board */ unsigned int fill_time = 300000000; /* target time in nanoseconds for filling dma buffer */ unsigned int max_size; /* maximum size we will allow for a transfer */ /* make dma buffer fill in 0.3 seconds for timed modes */ switch (cmd->scan_begin_src) { case TRIG_FOLLOW: /* not in burst mode */ if (cmd->convert_src == TRIG_TIMER) size = (fill_time / cmd->convert_arg) * sample_size; break; case TRIG_TIMER: size = (fill_time / (cmd->scan_begin_arg * cmd->chanlist_len)) * sample_size; break; default: size = DMA_BUF_SIZE; break; } /* set a minimum and maximum size allowed */ max_size = DMA_BUF_SIZE; /* if we are taking limited number of conversions, limit transfer size to that */ if (cmd->stop_src == TRIG_COUNT && cmd->stop_arg * cmd->chanlist_len * sample_size < max_size) max_size = cmd->stop_arg * cmd->chanlist_len * sample_size; if (size > max_size) size = max_size; if (size < sample_size) size = sample_size; return size; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
virtuous/kernel-vivow-gingerbread-v2
scripts/lib/genalloc.c
1073
5221
/* * Basic general purpose allocator for managing special purpose memory * not managed by the regular kmalloc/kfree interface. * Uses for this includes on-device special memory, uncached memory * etc. * * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/bitmap.h> #include <linux/genalloc.h> /** * gen_pool_create - create a new special memory pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents * @nid: node id of the node the pool structure should be allocated on, or -1 * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) { struct gen_pool *pool; pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); if (pool != NULL) { rwlock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** * gen_pool_add - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @addr: starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. */ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, int nid) { struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; int nbytes = sizeof(struct gen_pool_chunk) + (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); if (unlikely(chunk == NULL)) return -1; spin_lock_init(&chunk->lock); chunk->start_addr = addr; chunk->end_addr = addr + size; write_lock(&pool->lock); list_add(&chunk->next_chunk, &pool->chunks); write_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_add); /** * gen_pool_destroy - destroy a special memory pool * @pool: pool to destroy * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int bit, end_bit; list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); kfree(chunk); } kfree(pool); return; } EXPORT_SYMBOL(gen_pool_destroy); /** * gen_pool_alloc - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. */ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) { struct list_head *_chunk; struct gen_pool_chunk *chunk; unsigned long addr, flags; int order = pool->min_alloc_order; int nbits, start_bit, end_bit; if (size == 0) return 0; nbits = (size + (1UL << order) - 1) >> order; read_lock(&pool->lock); list_for_each(_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; spin_lock_irqsave(&chunk->lock, flags); start_bit = bitmap_find_next_zero_area(chunk->bits, end_bit, 0, nbits, 0); if (start_bit >= end_bit) { spin_unlock_irqrestore(&chunk->lock, flags); continue; } addr = chunk->start_addr + ((unsigned long)start_bit << order); bitmap_set(chunk->bits, start_bit, nbits); spin_unlock_irqrestore(&chunk->lock, flags); read_unlock(&pool->lock); return addr; } read_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_alloc); /** * gen_pool_free - free allocated special memory back to the pool * @pool: pool to free to * @addr: starting address of memory to free back to pool * @size: size in bytes of memory to free * * Free previously allocated special memory back to the specified pool. */ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { struct list_head *_chunk; struct gen_pool_chunk *chunk; unsigned long flags; int order = pool->min_alloc_order; int bit, nbits; nbits = (size + (1UL << order) - 1) >> order; read_lock(&pool->lock); list_for_each(_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); if (addr >= chunk->start_addr && addr < chunk->end_addr) { BUG_ON(addr + size > chunk->end_addr); spin_lock_irqsave(&chunk->lock, flags); bit = (addr - chunk->start_addr) >> order; while (nbits--) __clear_bit(bit++, chunk->bits); spin_unlock_irqrestore(&chunk->lock, flags); break; } } BUG_ON(nbits > 0); read_unlock(&pool->lock); } EXPORT_SYMBOL(gen_pool_free);
gpl-2.0
ffolkes/plasmakernel_note4_tw_lp511
drivers/i2c/busses/scx200_acb.c
2609
14021
/* Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> National Semiconductor SCx200 ACCESS.bus support Also supports the AMD CS5535 and AMD CS5536 Based on i2c-keywest.c which is: Copyright (c) 2001 Benjamin Herrenschmidt <benh@kernel.crashing.org> Copyright (c) 2000 Philip Edelbrock <phil@stimpy.netroedge.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/scx200.h> MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver"); MODULE_ALIAS("platform:cs5535-smb"); MODULE_LICENSE("GPL"); #define MAX_DEVICES 4 static int base[MAX_DEVICES] = { 0x820, 0x840 }; module_param_array(base, int, NULL, 0); MODULE_PARM_DESC(base, "Base addresses for the ACCESS.bus controllers"); #define POLL_TIMEOUT (HZ/5) enum scx200_acb_state { state_idle, state_address, state_command, state_repeat_start, state_quick, state_read, state_write, }; static const char *scx200_acb_state_name[] = { "idle", "address", "command", "repeat_start", "quick", "read", "write", }; /* Physical interface */ struct scx200_acb_iface { struct scx200_acb_iface *next; struct i2c_adapter adapter; unsigned base; struct mutex mutex; /* State machine data */ enum scx200_acb_state state; int result; u8 address_byte; u8 command; u8 *ptr; char needs_reset; unsigned len; }; /* Register Definitions */ #define ACBSDA (iface->base + 0) #define ACBST (iface->base + 1) #define ACBST_SDAST 0x40 /* SDA Status */ #define ACBST_BER 0x20 #define ACBST_NEGACK 0x10 /* Negative Acknowledge */ #define ACBST_STASTR 0x08 /* Stall After Start */ #define ACBST_MASTER 0x02 #define ACBCST (iface->base + 2) #define ACBCST_BB 0x02 #define ACBCTL1 (iface->base + 3) #define ACBCTL1_STASTRE 0x80 #define ACBCTL1_NMINTE 0x40 #define ACBCTL1_ACK 0x10 #define ACBCTL1_STOP 0x02 #define ACBCTL1_START 0x01 #define ACBADDR (iface->base + 4) #define ACBCTL2 (iface->base + 5) #define ACBCTL2_ENABLE 0x01 /************************************************************************/ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) { const char *errmsg; dev_dbg(&iface->adapter.dev, "state %s, status = 0x%02x\n", scx200_acb_state_name[iface->state], status); if (status & ACBST_BER) { errmsg = "bus error"; goto error; } if (!(status & ACBST_MASTER)) { errmsg = "not master"; goto error; } if (status & ACBST_NEGACK) { dev_dbg(&iface->adapter.dev, "negative ack in state %s\n", scx200_acb_state_name[iface->state]); iface->state = state_idle; iface->result = -ENXIO; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); outb(ACBST_STASTR | ACBST_NEGACK, ACBST); /* Reset the status register */ outb(0, ACBST); return; } switch (iface->state) { case state_idle: dev_warn(&iface->adapter.dev, "interrupt in idle state\n"); break; case state_address: /* Do a pointer write first */ outb(iface->address_byte & ~1, ACBSDA); iface->state = state_command; break; case state_command: outb(iface->command, ACBSDA); if (iface->address_byte & 1) iface->state = state_repeat_start; else iface->state = state_write; break; case state_repeat_start: outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); /* fallthrough */ case state_quick: if (iface->address_byte & 1) { if (iface->len == 1) outb(inb(ACBCTL1) | ACBCTL1_ACK, ACBCTL1); else outb(inb(ACBCTL1) & ~ACBCTL1_ACK, ACBCTL1); outb(iface->address_byte, ACBSDA); iface->state = state_read; } else { outb(iface->address_byte, ACBSDA); iface->state = state_write; } break; case state_read: /* Set ACK if _next_ byte will be the last one */ if (iface->len == 2) outb(inb(ACBCTL1) | ACBCTL1_ACK, ACBCTL1); else outb(inb(ACBCTL1) & ~ACBCTL1_ACK, ACBCTL1); if (iface->len == 1) { iface->result = 0; iface->state = state_idle; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); } *iface->ptr++ = inb(ACBSDA); --iface->len; break; case state_write: if (iface->len == 0) { iface->result = 0; iface->state = state_idle; outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); break; } outb(*iface->ptr++, ACBSDA); --iface->len; break; } return; error: dev_err(&iface->adapter.dev, "%s in state %s (addr=0x%02x, len=%d, status=0x%02x)\n", errmsg, scx200_acb_state_name[iface->state], iface->address_byte, iface->len, status); iface->state = state_idle; iface->result = -EIO; iface->needs_reset = 1; } static void scx200_acb_poll(struct scx200_acb_iface *iface) { u8 status; unsigned long timeout; timeout = jiffies + POLL_TIMEOUT; while (1) { status = inb(ACBST); /* Reset the status register to avoid the hang */ outb(0, ACBST); if ((status & (ACBST_SDAST|ACBST_BER|ACBST_NEGACK)) != 0) { scx200_acb_machine(iface, status); return; } if (time_after(jiffies, timeout)) break; cpu_relax(); cond_resched(); } dev_err(&iface->adapter.dev, "timeout in state %s\n", scx200_acb_state_name[iface->state]); iface->state = state_idle; iface->result = -EIO; iface->needs_reset = 1; } static void scx200_acb_reset(struct scx200_acb_iface *iface) { /* Disable the ACCESS.bus device and Configure the SCL frequency: 16 clock cycles */ outb(0x70, ACBCTL2); /* Polling mode */ outb(0, ACBCTL1); /* Disable slave address */ outb(0, ACBADDR); /* Enable the ACCESS.bus device */ outb(inb(ACBCTL2) | ACBCTL2_ENABLE, ACBCTL2); /* Free STALL after START */ outb(inb(ACBCTL1) & ~(ACBCTL1_STASTRE | ACBCTL1_NMINTE), ACBCTL1); /* Send a STOP */ outb(inb(ACBCTL1) | ACBCTL1_STOP, ACBCTL1); /* Clear BER, NEGACK and STASTR bits */ outb(ACBST_BER | ACBST_NEGACK | ACBST_STASTR, ACBST); /* Clear BB bit */ outb(inb(ACBCST) | ACBCST_BB, ACBCST); } static s32 scx200_acb_smbus_xfer(struct i2c_adapter *adapter, u16 address, unsigned short flags, char rw, u8 command, int size, union i2c_smbus_data *data) { struct scx200_acb_iface *iface = i2c_get_adapdata(adapter); int len; u8 *buffer; u16 cur_word; int rc; switch (size) { case I2C_SMBUS_QUICK: len = 0; buffer = NULL; break; case I2C_SMBUS_BYTE: len = 1; buffer = rw ? &data->byte : &command; break; case I2C_SMBUS_BYTE_DATA: len = 1; buffer = &data->byte; break; case I2C_SMBUS_WORD_DATA: len = 2; cur_word = cpu_to_le16(data->word); buffer = (u8 *)&cur_word; break; case I2C_SMBUS_I2C_BLOCK_DATA: len = data->block[0]; if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) return -EINVAL; buffer = &data->block[1]; break; default: return -EINVAL; } dev_dbg(&adapter->dev, "size=%d, address=0x%x, command=0x%x, len=%d, read=%d\n", size, address, command, len, rw); if (!len && rw == I2C_SMBUS_READ) { dev_dbg(&adapter->dev, "zero length read\n"); return -EINVAL; } mutex_lock(&iface->mutex); iface->address_byte = (address << 1) | rw; iface->command = command; iface->ptr = buffer; iface->len = len; iface->result = -EINVAL; iface->needs_reset = 0; outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); if (size == I2C_SMBUS_QUICK || size == I2C_SMBUS_BYTE) iface->state = state_quick; else iface->state = state_address; while (iface->state != state_idle) scx200_acb_poll(iface); if (iface->needs_reset) scx200_acb_reset(iface); rc = iface->result; mutex_unlock(&iface->mutex); if (rc == 0 && size == I2C_SMBUS_WORD_DATA && rw == I2C_SMBUS_READ) data->word = le16_to_cpu(cur_word); #ifdef DEBUG dev_dbg(&adapter->dev, "transfer done, result: %d", rc); if (buffer) { int i; printk(" data:"); for (i = 0; i < len; ++i) printk(" %02x", buffer[i]); } printk("\n"); #endif return rc; } static u32 scx200_acb_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_I2C_BLOCK; } /* For now, we only handle combined mode (smbus) */ static const struct i2c_algorithm scx200_acb_algorithm = { .smbus_xfer = scx200_acb_smbus_xfer, .functionality = scx200_acb_func, }; static struct scx200_acb_iface *scx200_acb_list; static DEFINE_MUTEX(scx200_acb_list_mutex); static int scx200_acb_probe(struct scx200_acb_iface *iface) { u8 val; /* Disable the ACCESS.bus device and Configure the SCL frequency: 16 clock cycles */ outb(0x70, ACBCTL2); if (inb(ACBCTL2) != 0x70) { pr_debug("ACBCTL2 readback failed\n"); return -ENXIO; } outb(inb(ACBCTL1) | ACBCTL1_NMINTE, ACBCTL1); val = inb(ACBCTL1); if (val) { pr_debug("disabled, but ACBCTL1=0x%02x\n", val); return -ENXIO; } outb(inb(ACBCTL2) | ACBCTL2_ENABLE, ACBCTL2); outb(inb(ACBCTL1) | ACBCTL1_NMINTE, ACBCTL1); val = inb(ACBCTL1); if ((val & ACBCTL1_NMINTE) != ACBCTL1_NMINTE) { pr_debug("enabled, but NMINTE won't be set, ACBCTL1=0x%02x\n", val); return -ENXIO; } return 0; } static struct scx200_acb_iface *scx200_create_iface(const char *text, struct device *dev, int index) { struct scx200_acb_iface *iface; struct i2c_adapter *adapter; iface = kzalloc(sizeof(*iface), GFP_KERNEL); if (!iface) { pr_err("can't allocate memory\n"); return NULL; } adapter = &iface->adapter; i2c_set_adapdata(adapter, iface); snprintf(adapter->name, sizeof(adapter->name), "%s ACB%d", text, index); adapter->owner = THIS_MODULE; adapter->algo = &scx200_acb_algorithm; adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adapter->dev.parent = dev; mutex_init(&iface->mutex); return iface; } static int scx200_acb_create(struct scx200_acb_iface *iface) { struct i2c_adapter *adapter; int rc; adapter = &iface->adapter; rc = scx200_acb_probe(iface); if (rc) { pr_warn("probe failed\n"); return rc; } scx200_acb_reset(iface); if (i2c_add_adapter(adapter) < 0) { pr_err("failed to register\n"); return -ENODEV; } if (!adapter->dev.parent) { /* If there's no dev, we're tracking (ISA) ifaces manually */ mutex_lock(&scx200_acb_list_mutex); iface->next = scx200_acb_list; scx200_acb_list = iface; mutex_unlock(&scx200_acb_list_mutex); } return 0; } static struct scx200_acb_iface *scx200_create_dev(const char *text, unsigned long base, int index, struct device *dev) { struct scx200_acb_iface *iface; int rc; iface = scx200_create_iface(text, dev, index); if (iface == NULL) return NULL; if (!request_region(base, 8, iface->adapter.name)) { pr_err("can't allocate io 0x%lx-0x%lx\n", base, base + 8 - 1); goto errout_free; } iface->base = base; rc = scx200_acb_create(iface); if (rc == 0) return iface; release_region(base, 8); errout_free: kfree(iface); return NULL; } static int scx200_probe(struct platform_device *pdev) { struct scx200_acb_iface *iface; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res) { dev_err(&pdev->dev, "can't fetch device resource info\n"); return -ENODEV; } iface = scx200_create_dev("CS5535", res->start, 0, &pdev->dev); if (!iface) return -EIO; dev_info(&pdev->dev, "SCx200 device '%s' registered\n", iface->adapter.name); platform_set_drvdata(pdev, iface); return 0; } static void scx200_cleanup_iface(struct scx200_acb_iface *iface) { i2c_del_adapter(&iface->adapter); release_region(iface->base, 8); kfree(iface); } static int scx200_remove(struct platform_device *pdev) { struct scx200_acb_iface *iface; iface = platform_get_drvdata(pdev); scx200_cleanup_iface(iface); return 0; } static struct platform_driver scx200_pci_driver = { .driver = { .name = "cs5535-smb", .owner = THIS_MODULE, }, .probe = scx200_probe, .remove = scx200_remove, }; static DEFINE_PCI_DEVICE_TABLE(scx200_isa) = { { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) }, { 0, } }; static __init void scx200_scan_isa(void) { int i; if (!pci_dev_present(scx200_isa)) return; for (i = 0; i < MAX_DEVICES; ++i) { if (base[i] == 0) continue; /* XXX: should we care about failures? */ scx200_create_dev("SCx200", base[i], i, NULL); } } static int __init scx200_acb_init(void) { pr_debug("NatSemi SCx200 ACCESS.bus Driver\n"); /* First scan for ISA-based devices */ scx200_scan_isa(); /* XXX: should we care about errors? */ /* If at least one bus was created, init must succeed */ if (scx200_acb_list) return 0; /* No ISA devices; register the platform driver for PCI-based devices */ return platform_driver_register(&scx200_pci_driver); } static void __exit scx200_acb_cleanup(void) { struct scx200_acb_iface *iface; platform_driver_unregister(&scx200_pci_driver); mutex_lock(&scx200_acb_list_mutex); while ((iface = scx200_acb_list) != NULL) { scx200_acb_list = iface->next; mutex_unlock(&scx200_acb_list_mutex); scx200_cleanup_iface(iface); mutex_lock(&scx200_acb_list_mutex); } mutex_unlock(&scx200_acb_list_mutex); } module_init(scx200_acb_init); module_exit(scx200_acb_cleanup);
gpl-2.0
dnkn/rk3188_tablet
sound/pci/ens1370.c
2609
80877
/* * Driver for Ensoniq ES1370/ES1371 AudioPCI soundcard * Copyright (c) by Jaroslav Kysela <perex@perex.cz>, * Thomas Sailer <sailer@ife.ee.ethz.ch> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* Power-Management-Code ( CONFIG_PM ) * for ens1371 only ( FIXME ) * derived from cs4281.c, atiixp.c and via82xx.c * using http://www.alsa-project.org/~tiwai/writing-an-alsa-driver/ * by Kurt J. Bosch */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #ifdef CHIP1371 #include <sound/ac97_codec.h> #else #include <sound/ak4531_codec.h> #endif #include <sound/initval.h> #include <sound/asoundef.h> #ifndef CHIP1371 #undef CHIP1370 #define CHIP1370 #endif #ifdef CHIP1370 #define DRIVER_NAME "ENS1370" #else #define DRIVER_NAME "ENS1371" #endif MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Thomas Sailer <sailer@ife.ee.ethz.ch>"); MODULE_LICENSE("GPL"); #ifdef CHIP1370 MODULE_DESCRIPTION("Ensoniq AudioPCI ES1370"); MODULE_SUPPORTED_DEVICE("{{Ensoniq,AudioPCI-97 ES1370}," "{Creative Labs,SB PCI64/128 (ES1370)}}"); #endif #ifdef CHIP1371 MODULE_DESCRIPTION("Ensoniq/Creative AudioPCI ES1371+"); MODULE_SUPPORTED_DEVICE("{{Ensoniq,AudioPCI ES1371/73}," "{Ensoniq,AudioPCI ES1373}," "{Creative Labs,Ectiva EV1938}," "{Creative Labs,SB PCI64/128 (ES1371/73)}," "{Creative Labs,Vibra PCI128}," "{Ectiva,EV1938}}"); #endif #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable switches */ #ifdef SUPPORT_JOYSTICK #ifdef CHIP1371 static int joystick_port[SNDRV_CARDS]; #else static int joystick[SNDRV_CARDS]; #endif #endif #ifdef CHIP1371 static int spdif[SNDRV_CARDS]; static int lineio[SNDRV_CARDS]; #endif module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Ensoniq AudioPCI soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Ensoniq AudioPCI soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Ensoniq AudioPCI soundcard."); #ifdef SUPPORT_JOYSTICK #ifdef CHIP1371 module_param_array(joystick_port, int, NULL, 0444); MODULE_PARM_DESC(joystick_port, "Joystick port address."); #else module_param_array(joystick, bool, NULL, 0444); MODULE_PARM_DESC(joystick, "Enable joystick."); #endif #endif /* SUPPORT_JOYSTICK */ #ifdef CHIP1371 module_param_array(spdif, int, NULL, 0444); MODULE_PARM_DESC(spdif, "S/PDIF output (-1 = none, 0 = auto, 1 = force)."); module_param_array(lineio, int, NULL, 0444); MODULE_PARM_DESC(lineio, "Line In to Rear Out (0 = auto, 1 = force)."); #endif /* ES1371 chip ID */ /* This is a little confusing because all ES1371 compatible chips have the same DEVICE_ID, the only thing differentiating them is the REV_ID field. This is only significant if you want to enable features on the later parts. Yes, I know it's stupid and why didn't we use the sub IDs? */ #define ES1371REV_ES1373_A 0x04 #define ES1371REV_ES1373_B 0x06 #define ES1371REV_CT5880_A 0x07 #define CT5880REV_CT5880_C 0x02 #define CT5880REV_CT5880_D 0x03 /* ??? -jk */ #define CT5880REV_CT5880_E 0x04 /* mw */ #define ES1371REV_ES1371_B 0x09 #define EV1938REV_EV1938_A 0x00 #define ES1371REV_ES1373_8 0x08 /* * Direct registers */ #define ES_REG(ensoniq, x) ((ensoniq)->port + ES_REG_##x) #define ES_REG_CONTROL 0x00 /* R/W: Interrupt/Chip select control register */ #define ES_1370_ADC_STOP (1<<31) /* disable capture buffer transfers */ #define ES_1370_XCTL1 (1<<30) /* general purpose output bit */ #define ES_1373_BYPASS_P1 (1<<31) /* bypass SRC for PB1 */ #define ES_1373_BYPASS_P2 (1<<30) /* bypass SRC for PB2 */ #define ES_1373_BYPASS_R (1<<29) /* bypass SRC for REC */ #define ES_1373_TEST_BIT (1<<28) /* should be set to 0 for normal operation */ #define ES_1373_RECEN_B (1<<27) /* mix record with playback for I2S/SPDIF out */ #define ES_1373_SPDIF_THRU (1<<26) /* 0 = SPDIF thru mode, 1 = SPDIF == dig out */ #define ES_1371_JOY_ASEL(o) (((o)&0x03)<<24)/* joystick port mapping */ #define ES_1371_JOY_ASELM (0x03<<24) /* mask for above */ #define ES_1371_JOY_ASELI(i) (((i)>>24)&0x03) #define ES_1371_GPIO_IN(i) (((i)>>20)&0x0f)/* GPIO in [3:0] pins - R/O */ #define ES_1370_PCLKDIVO(o) (((o)&0x1fff)<<16)/* clock divide ratio for DAC2 */ #define ES_1370_PCLKDIVM ((0x1fff)<<16) /* mask for above */ #define ES_1370_PCLKDIVI(i) (((i)>>16)&0x1fff)/* clock divide ratio for DAC2 */ #define ES_1371_GPIO_OUT(o) (((o)&0x0f)<<16)/* GPIO out [3:0] pins - W/R */ #define ES_1371_GPIO_OUTM (0x0f<<16) /* mask for above */ #define ES_MSFMTSEL (1<<15) /* MPEG serial data format; 0 = SONY, 1 = I2S */ #define ES_1370_M_SBB (1<<14) /* clock source for DAC - 0 = clock generator; 1 = MPEG clocks */ #define ES_1371_SYNC_RES (1<<14) /* Warm AC97 reset */ #define ES_1370_WTSRSEL(o) (((o)&0x03)<<12)/* fixed frequency clock for DAC1 */ #define ES_1370_WTSRSELM (0x03<<12) /* mask for above */ #define ES_1371_ADC_STOP (1<<13) /* disable CCB transfer capture information */ #define ES_1371_PWR_INTRM (1<<12) /* power level change interrupts enable */ #define ES_1370_DAC_SYNC (1<<11) /* DAC's are synchronous */ #define ES_1371_M_CB (1<<11) /* capture clock source; 0 = AC'97 ADC; 1 = I2S */ #define ES_CCB_INTRM (1<<10) /* CCB voice interrupts enable */ #define ES_1370_M_CB (1<<9) /* capture clock source; 0 = ADC; 1 = MPEG */ #define ES_1370_XCTL0 (1<<8) /* generap purpose output bit */ #define ES_1371_PDLEV(o) (((o)&0x03)<<8) /* current power down level */ #define ES_1371_PDLEVM (0x03<<8) /* mask for above */ #define ES_BREQ (1<<7) /* memory bus request enable */ #define ES_DAC1_EN (1<<6) /* DAC1 playback channel enable */ #define ES_DAC2_EN (1<<5) /* DAC2 playback channel enable */ #define ES_ADC_EN (1<<4) /* ADC capture channel enable */ #define ES_UART_EN (1<<3) /* UART enable */ #define ES_JYSTK_EN (1<<2) /* Joystick module enable */ #define ES_1370_CDC_EN (1<<1) /* Codec interface enable */ #define ES_1371_XTALCKDIS (1<<1) /* Xtal clock disable */ #define ES_1370_SERR_DISABLE (1<<0) /* PCI serr signal disable */ #define ES_1371_PCICLKDIS (1<<0) /* PCI clock disable */ #define ES_REG_STATUS 0x04 /* R/O: Interrupt/Chip select status register */ #define ES_INTR (1<<31) /* Interrupt is pending */ #define ES_1371_ST_AC97_RST (1<<29) /* CT5880 AC'97 Reset bit */ #define ES_1373_REAR_BIT27 (1<<27) /* rear bits: 000 - front, 010 - mirror, 101 - separate */ #define ES_1373_REAR_BIT26 (1<<26) #define ES_1373_REAR_BIT24 (1<<24) #define ES_1373_GPIO_INT_EN(o)(((o)&0x0f)<<20)/* GPIO [3:0] pins - interrupt enable */ #define ES_1373_SPDIF_EN (1<<18) /* SPDIF enable */ #define ES_1373_SPDIF_TEST (1<<17) /* SPDIF test */ #define ES_1371_TEST (1<<16) /* test ASIC */ #define ES_1373_GPIO_INT(i) (((i)&0x0f)>>12)/* GPIO [3:0] pins - interrupt pending */ #define ES_1370_CSTAT (1<<10) /* CODEC is busy or register write in progress */ #define ES_1370_CBUSY (1<<9) /* CODEC is busy */ #define ES_1370_CWRIP (1<<8) /* CODEC register write in progress */ #define ES_1371_SYNC_ERR (1<<8) /* CODEC synchronization error occurred */ #define ES_1371_VC(i) (((i)>>6)&0x03) /* voice code from CCB module */ #define ES_1370_VC(i) (((i)>>5)&0x03) /* voice code from CCB module */ #define ES_1371_MPWR (1<<5) /* power level interrupt pending */ #define ES_MCCB (1<<4) /* CCB interrupt pending */ #define ES_UART (1<<3) /* UART interrupt pending */ #define ES_DAC1 (1<<2) /* DAC1 channel interrupt pending */ #define ES_DAC2 (1<<1) /* DAC2 channel interrupt pending */ #define ES_ADC (1<<0) /* ADC channel interrupt pending */ #define ES_REG_UART_DATA 0x08 /* R/W: UART data register */ #define ES_REG_UART_STATUS 0x09 /* R/O: UART status register */ #define ES_RXINT (1<<7) /* RX interrupt occurred */ #define ES_TXINT (1<<2) /* TX interrupt occurred */ #define ES_TXRDY (1<<1) /* transmitter ready */ #define ES_RXRDY (1<<0) /* receiver ready */ #define ES_REG_UART_CONTROL 0x09 /* W/O: UART control register */ #define ES_RXINTEN (1<<7) /* RX interrupt enable */ #define ES_TXINTENO(o) (((o)&0x03)<<5) /* TX interrupt enable */ #define ES_TXINTENM (0x03<<5) /* mask for above */ #define ES_TXINTENI(i) (((i)>>5)&0x03) #define ES_CNTRL(o) (((o)&0x03)<<0) /* control */ #define ES_CNTRLM (0x03<<0) /* mask for above */ #define ES_REG_UART_RES 0x0a /* R/W: UART reserver register */ #define ES_TEST_MODE (1<<0) /* test mode enabled */ #define ES_REG_MEM_PAGE 0x0c /* R/W: Memory page register */ #define ES_MEM_PAGEO(o) (((o)&0x0f)<<0) /* memory page select - out */ #define ES_MEM_PAGEM (0x0f<<0) /* mask for above */ #define ES_MEM_PAGEI(i) (((i)>>0)&0x0f) /* memory page select - in */ #define ES_REG_1370_CODEC 0x10 /* W/O: Codec write register address */ #define ES_1370_CODEC_WRITE(a,d) ((((a)&0xff)<<8)|(((d)&0xff)<<0)) #define ES_REG_1371_CODEC 0x14 /* W/R: Codec Read/Write register address */ #define ES_1371_CODEC_RDY (1<<31) /* codec ready */ #define ES_1371_CODEC_WIP (1<<30) /* codec register access in progress */ #define EV_1938_CODEC_MAGIC (1<<26) #define ES_1371_CODEC_PIRD (1<<23) /* codec read/write select register */ #define ES_1371_CODEC_WRITE(a,d) ((((a)&0x7f)<<16)|(((d)&0xffff)<<0)) #define ES_1371_CODEC_READS(a) ((((a)&0x7f)<<16)|ES_1371_CODEC_PIRD) #define ES_1371_CODEC_READ(i) (((i)>>0)&0xffff) #define ES_REG_1371_SMPRATE 0x10 /* W/R: Codec rate converter interface register */ #define ES_1371_SRC_RAM_ADDRO(o) (((o)&0x7f)<<25)/* address of the sample rate converter */ #define ES_1371_SRC_RAM_ADDRM (0x7f<<25) /* mask for above */ #define ES_1371_SRC_RAM_ADDRI(i) (((i)>>25)&0x7f)/* address of the sample rate converter */ #define ES_1371_SRC_RAM_WE (1<<24) /* R/W: read/write control for sample rate converter */ #define ES_1371_SRC_RAM_BUSY (1<<23) /* R/O: sample rate memory is busy */ #define ES_1371_SRC_DISABLE (1<<22) /* sample rate converter disable */ #define ES_1371_DIS_P1 (1<<21) /* playback channel 1 accumulator update disable */ #define ES_1371_DIS_P2 (1<<20) /* playback channel 1 accumulator update disable */ #define ES_1371_DIS_R1 (1<<19) /* capture channel accumulator update disable */ #define ES_1371_SRC_RAM_DATAO(o) (((o)&0xffff)<<0)/* current value of the sample rate converter */ #define ES_1371_SRC_RAM_DATAM (0xffff<<0) /* mask for above */ #define ES_1371_SRC_RAM_DATAI(i) (((i)>>0)&0xffff)/* current value of the sample rate converter */ #define ES_REG_1371_LEGACY 0x18 /* W/R: Legacy control/status register */ #define ES_1371_JFAST (1<<31) /* fast joystick timing */ #define ES_1371_HIB (1<<30) /* host interrupt blocking enable */ #define ES_1371_VSB (1<<29) /* SB; 0 = addr 0x220xH, 1 = 0x22FxH */ #define ES_1371_VMPUO(o) (((o)&0x03)<<27)/* base register address; 0 = 0x320xH; 1 = 0x330xH; 2 = 0x340xH; 3 = 0x350xH */ #define ES_1371_VMPUM (0x03<<27) /* mask for above */ #define ES_1371_VMPUI(i) (((i)>>27)&0x03)/* base register address */ #define ES_1371_VCDCO(o) (((o)&0x03)<<25)/* CODEC; 0 = 0x530xH; 1 = undefined; 2 = 0xe80xH; 3 = 0xF40xH */ #define ES_1371_VCDCM (0x03<<25) /* mask for above */ #define ES_1371_VCDCI(i) (((i)>>25)&0x03)/* CODEC address */ #define ES_1371_FIRQ (1<<24) /* force an interrupt */ #define ES_1371_SDMACAP (1<<23) /* enable event capture for slave DMA controller */ #define ES_1371_SPICAP (1<<22) /* enable event capture for slave IRQ controller */ #define ES_1371_MDMACAP (1<<21) /* enable event capture for master DMA controller */ #define ES_1371_MPICAP (1<<20) /* enable event capture for master IRQ controller */ #define ES_1371_ADCAP (1<<19) /* enable event capture for ADLIB register; 0x388xH */ #define ES_1371_SVCAP (1<<18) /* enable event capture for SB registers */ #define ES_1371_CDCCAP (1<<17) /* enable event capture for CODEC registers */ #define ES_1371_BACAP (1<<16) /* enable event capture for SoundScape base address */ #define ES_1371_EXI(i) (((i)>>8)&0x07) /* event number */ #define ES_1371_AI(i) (((i)>>3)&0x1f) /* event significant I/O address */ #define ES_1371_WR (1<<2) /* event capture; 0 = read; 1 = write */ #define ES_1371_LEGINT (1<<0) /* interrupt for legacy events; 0 = interrupt did occur */ #define ES_REG_CHANNEL_STATUS 0x1c /* R/W: first 32-bits from S/PDIF channel status block, es1373 */ #define ES_REG_SERIAL 0x20 /* R/W: Serial interface control register */ #define ES_1371_DAC_TEST (1<<22) /* DAC test mode enable */ #define ES_P2_END_INCO(o) (((o)&0x07)<<19)/* binary offset value to increment / loop end */ #define ES_P2_END_INCM (0x07<<19) /* mask for above */ #define ES_P2_END_INCI(i) (((i)>>16)&0x07)/* binary offset value to increment / loop end */ #define ES_P2_ST_INCO(o) (((o)&0x07)<<16)/* binary offset value to increment / start */ #define ES_P2_ST_INCM (0x07<<16) /* mask for above */ #define ES_P2_ST_INCI(i) (((i)<<16)&0x07)/* binary offset value to increment / start */ #define ES_R1_LOOP_SEL (1<<15) /* ADC; 0 - loop mode; 1 = stop mode */ #define ES_P2_LOOP_SEL (1<<14) /* DAC2; 0 - loop mode; 1 = stop mode */ #define ES_P1_LOOP_SEL (1<<13) /* DAC1; 0 - loop mode; 1 = stop mode */ #define ES_P2_PAUSE (1<<12) /* DAC2; 0 - play mode; 1 = pause mode */ #define ES_P1_PAUSE (1<<11) /* DAC1; 0 - play mode; 1 = pause mode */ #define ES_R1_INT_EN (1<<10) /* ADC interrupt enable */ #define ES_P2_INT_EN (1<<9) /* DAC2 interrupt enable */ #define ES_P1_INT_EN (1<<8) /* DAC1 interrupt enable */ #define ES_P1_SCT_RLD (1<<7) /* force sample counter reload for DAC1 */ #define ES_P2_DAC_SEN (1<<6) /* when stop mode: 0 - DAC2 play back zeros; 1 = DAC2 play back last sample */ #define ES_R1_MODEO(o) (((o)&0x03)<<4) /* ADC mode; 0 = 8-bit mono; 1 = 8-bit stereo; 2 = 16-bit mono; 3 = 16-bit stereo */ #define ES_R1_MODEM (0x03<<4) /* mask for above */ #define ES_R1_MODEI(i) (((i)>>4)&0x03) #define ES_P2_MODEO(o) (((o)&0x03)<<2) /* DAC2 mode; -- '' -- */ #define ES_P2_MODEM (0x03<<2) /* mask for above */ #define ES_P2_MODEI(i) (((i)>>2)&0x03) #define ES_P1_MODEO(o) (((o)&0x03)<<0) /* DAC1 mode; -- '' -- */ #define ES_P1_MODEM (0x03<<0) /* mask for above */ #define ES_P1_MODEI(i) (((i)>>0)&0x03) #define ES_REG_DAC1_COUNT 0x24 /* R/W: DAC1 sample count register */ #define ES_REG_DAC2_COUNT 0x28 /* R/W: DAC2 sample count register */ #define ES_REG_ADC_COUNT 0x2c /* R/W: ADC sample count register */ #define ES_REG_CURR_COUNT(i) (((i)>>16)&0xffff) #define ES_REG_COUNTO(o) (((o)&0xffff)<<0) #define ES_REG_COUNTM (0xffff<<0) #define ES_REG_COUNTI(i) (((i)>>0)&0xffff) #define ES_REG_DAC1_FRAME 0x30 /* R/W: PAGE 0x0c; DAC1 frame address */ #define ES_REG_DAC1_SIZE 0x34 /* R/W: PAGE 0x0c; DAC1 frame size */ #define ES_REG_DAC2_FRAME 0x38 /* R/W: PAGE 0x0c; DAC2 frame address */ #define ES_REG_DAC2_SIZE 0x3c /* R/W: PAGE 0x0c; DAC2 frame size */ #define ES_REG_ADC_FRAME 0x30 /* R/W: PAGE 0x0d; ADC frame address */ #define ES_REG_ADC_SIZE 0x34 /* R/W: PAGE 0x0d; ADC frame size */ #define ES_REG_FCURR_COUNTO(o) (((o)&0xffff)<<16) #define ES_REG_FCURR_COUNTM (0xffff<<16) #define ES_REG_FCURR_COUNTI(i) (((i)>>14)&0x3fffc) #define ES_REG_FSIZEO(o) (((o)&0xffff)<<0) #define ES_REG_FSIZEM (0xffff<<0) #define ES_REG_FSIZEI(i) (((i)>>0)&0xffff) #define ES_REG_PHANTOM_FRAME 0x38 /* R/W: PAGE 0x0d: phantom frame address */ #define ES_REG_PHANTOM_COUNT 0x3c /* R/W: PAGE 0x0d: phantom frame count */ #define ES_REG_UART_FIFO 0x30 /* R/W: PAGE 0x0e; UART FIFO register */ #define ES_REG_UF_VALID (1<<8) #define ES_REG_UF_BYTEO(o) (((o)&0xff)<<0) #define ES_REG_UF_BYTEM (0xff<<0) #define ES_REG_UF_BYTEI(i) (((i)>>0)&0xff) /* * Pages */ #define ES_PAGE_DAC 0x0c #define ES_PAGE_ADC 0x0d #define ES_PAGE_UART 0x0e #define ES_PAGE_UART1 0x0f /* * Sample rate converter addresses */ #define ES_SMPREG_DAC1 0x70 #define ES_SMPREG_DAC2 0x74 #define ES_SMPREG_ADC 0x78 #define ES_SMPREG_VOL_ADC 0x6c #define ES_SMPREG_VOL_DAC1 0x7c #define ES_SMPREG_VOL_DAC2 0x7e #define ES_SMPREG_TRUNC_N 0x00 #define ES_SMPREG_INT_REGS 0x01 #define ES_SMPREG_ACCUM_FRAC 0x02 #define ES_SMPREG_VFREQ_FRAC 0x03 /* * Some contants */ #define ES_1370_SRCLOCK 1411200 #define ES_1370_SRTODIV(x) (ES_1370_SRCLOCK/(x)-2) /* * Open modes */ #define ES_MODE_PLAY1 0x0001 #define ES_MODE_PLAY2 0x0002 #define ES_MODE_CAPTURE 0x0004 #define ES_MODE_OUTPUT 0x0001 /* for MIDI */ #define ES_MODE_INPUT 0x0002 /* for MIDI */ /* */ struct ensoniq { spinlock_t reg_lock; struct mutex src_mutex; int irq; unsigned long playback1size; unsigned long playback2size; unsigned long capture3size; unsigned long port; unsigned int mode; unsigned int uartm; /* UART mode */ unsigned int ctrl; /* control register */ unsigned int sctrl; /* serial control register */ unsigned int cssr; /* control status register */ unsigned int uartc; /* uart control register */ unsigned int rev; /* chip revision */ union { #ifdef CHIP1371 struct { struct snd_ac97 *ac97; } es1371; #else struct { int pclkdiv_lock; struct snd_ak4531 *ak4531; } es1370; #endif } u; struct pci_dev *pci; struct snd_card *card; struct snd_pcm *pcm1; /* DAC1/ADC PCM */ struct snd_pcm *pcm2; /* DAC2 PCM */ struct snd_pcm_substream *playback1_substream; struct snd_pcm_substream *playback2_substream; struct snd_pcm_substream *capture_substream; unsigned int p1_dma_size; unsigned int p2_dma_size; unsigned int c_dma_size; unsigned int p1_period_size; unsigned int p2_period_size; unsigned int c_period_size; struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *midi_input; struct snd_rawmidi_substream *midi_output; unsigned int spdif; unsigned int spdif_default; unsigned int spdif_stream; #ifdef CHIP1370 struct snd_dma_buffer dma_bug; #endif #ifdef SUPPORT_JOYSTICK struct gameport *gameport; #endif }; static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id); static DEFINE_PCI_DEVICE_TABLE(snd_audiopci_ids) = { #ifdef CHIP1370 { PCI_VDEVICE(ENSONIQ, 0x5000), 0, }, /* ES1370 */ #endif #ifdef CHIP1371 { PCI_VDEVICE(ENSONIQ, 0x1371), 0, }, /* ES1371 */ { PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */ { PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */ #endif { 0, } }; MODULE_DEVICE_TABLE(pci, snd_audiopci_ids); /* * constants */ #define POLL_COUNT 0xa000 #ifdef CHIP1370 static unsigned int snd_es1370_fixed_rates[] = {5512, 11025, 22050, 44100}; static struct snd_pcm_hw_constraint_list snd_es1370_hw_constraints_rates = { .count = 4, .list = snd_es1370_fixed_rates, .mask = 0, }; static struct snd_ratnum es1370_clock = { .num = ES_1370_SRCLOCK, .den_min = 29, .den_max = 353, .den_step = 1, }; static struct snd_pcm_hw_constraint_ratnums snd_es1370_hw_constraints_clock = { .nrats = 1, .rats = &es1370_clock, }; #else static struct snd_ratden es1371_dac_clock = { .num_min = 3000 * (1 << 15), .num_max = 48000 * (1 << 15), .num_step = 3000, .den = 1 << 15, }; static struct snd_pcm_hw_constraint_ratdens snd_es1371_hw_constraints_dac_clock = { .nrats = 1, .rats = &es1371_dac_clock, }; static struct snd_ratnum es1371_adc_clock = { .num = 48000 << 15, .den_min = 32768, .den_max = 393216, .den_step = 1, }; static struct snd_pcm_hw_constraint_ratnums snd_es1371_hw_constraints_adc_clock = { .nrats = 1, .rats = &es1371_adc_clock, }; #endif static const unsigned int snd_ensoniq_sample_shift[] = {0, 1, 1, 2}; /* * common I/O routines */ #ifdef CHIP1371 static unsigned int snd_es1371_wait_src_ready(struct ensoniq * ensoniq) { unsigned int t, r = 0; for (t = 0; t < POLL_COUNT; t++) { r = inl(ES_REG(ensoniq, 1371_SMPRATE)); if ((r & ES_1371_SRC_RAM_BUSY) == 0) return r; cond_resched(); } snd_printk(KERN_ERR "wait src ready timeout 0x%lx [0x%x]\n", ES_REG(ensoniq, 1371_SMPRATE), r); return 0; } static unsigned int snd_es1371_src_read(struct ensoniq * ensoniq, unsigned short reg) { unsigned int temp, i, orig, r; /* wait for ready */ temp = orig = snd_es1371_wait_src_ready(ensoniq); /* expose the SRC state bits */ r = temp & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 | ES_1371_DIS_P2 | ES_1371_DIS_R1); r |= ES_1371_SRC_RAM_ADDRO(reg) | 0x10000; outl(r, ES_REG(ensoniq, 1371_SMPRATE)); /* now, wait for busy and the correct time to read */ temp = snd_es1371_wait_src_ready(ensoniq); if ((temp & 0x00870000) != 0x00010000) { /* wait for the right state */ for (i = 0; i < POLL_COUNT; i++) { temp = inl(ES_REG(ensoniq, 1371_SMPRATE)); if ((temp & 0x00870000) == 0x00010000) break; } } /* hide the state bits */ r = orig & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 | ES_1371_DIS_P2 | ES_1371_DIS_R1); r |= ES_1371_SRC_RAM_ADDRO(reg); outl(r, ES_REG(ensoniq, 1371_SMPRATE)); return temp; } static void snd_es1371_src_write(struct ensoniq * ensoniq, unsigned short reg, unsigned short data) { unsigned int r; r = snd_es1371_wait_src_ready(ensoniq) & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 | ES_1371_DIS_P2 | ES_1371_DIS_R1); r |= ES_1371_SRC_RAM_ADDRO(reg) | ES_1371_SRC_RAM_DATAO(data); outl(r | ES_1371_SRC_RAM_WE, ES_REG(ensoniq, 1371_SMPRATE)); } #endif /* CHIP1371 */ #ifdef CHIP1370 static void snd_es1370_codec_write(struct snd_ak4531 *ak4531, unsigned short reg, unsigned short val) { struct ensoniq *ensoniq = ak4531->private_data; unsigned long end_time = jiffies + HZ / 10; #if 0 printk(KERN_DEBUG "CODEC WRITE: reg = 0x%x, val = 0x%x (0x%x), creg = 0x%x\n", reg, val, ES_1370_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1370_CODEC)); #endif do { if (!(inl(ES_REG(ensoniq, STATUS)) & ES_1370_CSTAT)) { outw(ES_1370_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1370_CODEC)); return; } schedule_timeout_uninterruptible(1); } while (time_after(end_time, jiffies)); snd_printk(KERN_ERR "codec write timeout, status = 0x%x\n", inl(ES_REG(ensoniq, STATUS))); } #endif /* CHIP1370 */ #ifdef CHIP1371 static inline bool is_ev1938(struct ensoniq *ensoniq) { return ensoniq->pci->device == 0x8938; } static void snd_es1371_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct ensoniq *ensoniq = ac97->private_data; unsigned int t, x, flag; flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; mutex_lock(&ensoniq->src_mutex); for (t = 0; t < POLL_COUNT; t++) { if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) { /* save the current state for latter */ x = snd_es1371_wait_src_ready(ensoniq); outl((x & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 | ES_1371_DIS_P2 | ES_1371_DIS_R1)) | 0x00010000, ES_REG(ensoniq, 1371_SMPRATE)); /* wait for not busy (state 0) first to avoid transition states */ for (t = 0; t < POLL_COUNT; t++) { if ((inl(ES_REG(ensoniq, 1371_SMPRATE)) & 0x00870000) == 0x00000000) break; } /* wait for a SAFE time to write addr/data and then do it, dammit */ for (t = 0; t < POLL_COUNT; t++) { if ((inl(ES_REG(ensoniq, 1371_SMPRATE)) & 0x00870000) == 0x00010000) break; } outl(ES_1371_CODEC_WRITE(reg, val) | flag, ES_REG(ensoniq, 1371_CODEC)); /* restore SRC reg */ snd_es1371_wait_src_ready(ensoniq); outl(x, ES_REG(ensoniq, 1371_SMPRATE)); mutex_unlock(&ensoniq->src_mutex); return; } } mutex_unlock(&ensoniq->src_mutex); snd_printk(KERN_ERR "codec write timeout at 0x%lx [0x%x]\n", ES_REG(ensoniq, 1371_CODEC), inl(ES_REG(ensoniq, 1371_CODEC))); } static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct ensoniq *ensoniq = ac97->private_data; unsigned int t, x, flag, fail = 0; flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; __again: mutex_lock(&ensoniq->src_mutex); for (t = 0; t < POLL_COUNT; t++) { if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) { /* save the current state for latter */ x = snd_es1371_wait_src_ready(ensoniq); outl((x & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 | ES_1371_DIS_P2 | ES_1371_DIS_R1)) | 0x00010000, ES_REG(ensoniq, 1371_SMPRATE)); /* wait for not busy (state 0) first to avoid transition states */ for (t = 0; t < POLL_COUNT; t++) { if ((inl(ES_REG(ensoniq, 1371_SMPRATE)) & 0x00870000) == 0x00000000) break; } /* wait for a SAFE time to write addr/data and then do it, dammit */ for (t = 0; t < POLL_COUNT; t++) { if ((inl(ES_REG(ensoniq, 1371_SMPRATE)) & 0x00870000) == 0x00010000) break; } outl(ES_1371_CODEC_READS(reg) | flag, ES_REG(ensoniq, 1371_CODEC)); /* restore SRC reg */ snd_es1371_wait_src_ready(ensoniq); outl(x, ES_REG(ensoniq, 1371_SMPRATE)); /* wait for WIP again */ for (t = 0; t < POLL_COUNT; t++) { if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) break; } /* now wait for the stinkin' data (RDY) */ for (t = 0; t < POLL_COUNT; t++) { if ((x = inl(ES_REG(ensoniq, 1371_CODEC))) & ES_1371_CODEC_RDY) { if (is_ev1938(ensoniq)) { for (t = 0; t < 100; t++) inl(ES_REG(ensoniq, CONTROL)); x = inl(ES_REG(ensoniq, 1371_CODEC)); } mutex_unlock(&ensoniq->src_mutex); return ES_1371_CODEC_READ(x); } } mutex_unlock(&ensoniq->src_mutex); if (++fail > 10) { snd_printk(KERN_ERR "codec read timeout (final) " "at 0x%lx, reg = 0x%x [0x%x]\n", ES_REG(ensoniq, 1371_CODEC), reg, inl(ES_REG(ensoniq, 1371_CODEC))); return 0; } goto __again; } } mutex_unlock(&ensoniq->src_mutex); snd_printk(KERN_ERR "es1371: codec read timeout at 0x%lx [0x%x]\n", ES_REG(ensoniq, 1371_CODEC), inl(ES_REG(ensoniq, 1371_CODEC))); return 0; } static void snd_es1371_codec_wait(struct snd_ac97 *ac97) { msleep(750); snd_es1371_codec_read(ac97, AC97_RESET); snd_es1371_codec_read(ac97, AC97_VENDOR_ID1); snd_es1371_codec_read(ac97, AC97_VENDOR_ID2); msleep(50); } static void snd_es1371_adc_rate(struct ensoniq * ensoniq, unsigned int rate) { unsigned int n, truncm, freq, result; mutex_lock(&ensoniq->src_mutex); n = rate / 3000; if ((1 << n) & ((1 << 15) | (1 << 13) | (1 << 11) | (1 << 9))) n--; truncm = (21 * n - 1) | 1; freq = ((48000UL << 15) / rate) * n; result = (48000UL << 15) / (freq / n); if (rate >= 24000) { if (truncm > 239) truncm = 239; snd_es1371_src_write(ensoniq, ES_SMPREG_ADC + ES_SMPREG_TRUNC_N, (((239 - truncm) >> 1) << 9) | (n << 4)); } else { if (truncm > 119) truncm = 119; snd_es1371_src_write(ensoniq, ES_SMPREG_ADC + ES_SMPREG_TRUNC_N, 0x8000 | (((119 - truncm) >> 1) << 9) | (n << 4)); } snd_es1371_src_write(ensoniq, ES_SMPREG_ADC + ES_SMPREG_INT_REGS, (snd_es1371_src_read(ensoniq, ES_SMPREG_ADC + ES_SMPREG_INT_REGS) & 0x00ff) | ((freq >> 5) & 0xfc00)); snd_es1371_src_write(ensoniq, ES_SMPREG_ADC + ES_SMPREG_VFREQ_FRAC, freq & 0x7fff); snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC, n << 8); snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC + 1, n << 8); mutex_unlock(&ensoniq->src_mutex); } static void snd_es1371_dac1_rate(struct ensoniq * ensoniq, unsigned int rate) { unsigned int freq, r; mutex_lock(&ensoniq->src_mutex); freq = ((rate << 15) + 1500) / 3000; r = (snd_es1371_wait_src_ready(ensoniq) & (ES_1371_SRC_DISABLE | ES_1371_DIS_P2 | ES_1371_DIS_R1)) | ES_1371_DIS_P1; outl(r, ES_REG(ensoniq, 1371_SMPRATE)); snd_es1371_src_write(ensoniq, ES_SMPREG_DAC1 + ES_SMPREG_INT_REGS, (snd_es1371_src_read(ensoniq, ES_SMPREG_DAC1 + ES_SMPREG_INT_REGS) & 0x00ff) | ((freq >> 5) & 0xfc00)); snd_es1371_src_write(ensoniq, ES_SMPREG_DAC1 + ES_SMPREG_VFREQ_FRAC, freq & 0x7fff); r = (snd_es1371_wait_src_ready(ensoniq) & (ES_1371_SRC_DISABLE | ES_1371_DIS_P2 | ES_1371_DIS_R1)); outl(r, ES_REG(ensoniq, 1371_SMPRATE)); mutex_unlock(&ensoniq->src_mutex); } static void snd_es1371_dac2_rate(struct ensoniq * ensoniq, unsigned int rate) { unsigned int freq, r; mutex_lock(&ensoniq->src_mutex); freq = ((rate << 15) + 1500) / 3000; r = (snd_es1371_wait_src_ready(ensoniq) & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 | ES_1371_DIS_R1)) | ES_1371_DIS_P2; outl(r, ES_REG(ensoniq, 1371_SMPRATE)); snd_es1371_src_write(ensoniq, ES_SMPREG_DAC2 + ES_SMPREG_INT_REGS, (snd_es1371_src_read(ensoniq, ES_SMPREG_DAC2 + ES_SMPREG_INT_REGS) & 0x00ff) | ((freq >> 5) & 0xfc00)); snd_es1371_src_write(ensoniq, ES_SMPREG_DAC2 + ES_SMPREG_VFREQ_FRAC, freq & 0x7fff); r = (snd_es1371_wait_src_ready(ensoniq) & (ES_1371_SRC_DISABLE | ES_1371_DIS_P1 | ES_1371_DIS_R1)); outl(r, ES_REG(ensoniq, 1371_SMPRATE)); mutex_unlock(&ensoniq->src_mutex); } #endif /* CHIP1371 */ static int snd_ensoniq_trigger(struct snd_pcm_substream *substream, int cmd) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: { unsigned int what = 0; struct snd_pcm_substream *s; snd_pcm_group_for_each_entry(s, substream) { if (s == ensoniq->playback1_substream) { what |= ES_P1_PAUSE; snd_pcm_trigger_done(s, substream); } else if (s == ensoniq->playback2_substream) { what |= ES_P2_PAUSE; snd_pcm_trigger_done(s, substream); } else if (s == ensoniq->capture_substream) return -EINVAL; } spin_lock(&ensoniq->reg_lock); if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH) ensoniq->sctrl |= what; else ensoniq->sctrl &= ~what; outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL)); spin_unlock(&ensoniq->reg_lock); break; } case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_STOP: { unsigned int what = 0; struct snd_pcm_substream *s; snd_pcm_group_for_each_entry(s, substream) { if (s == ensoniq->playback1_substream) { what |= ES_DAC1_EN; snd_pcm_trigger_done(s, substream); } else if (s == ensoniq->playback2_substream) { what |= ES_DAC2_EN; snd_pcm_trigger_done(s, substream); } else if (s == ensoniq->capture_substream) { what |= ES_ADC_EN; snd_pcm_trigger_done(s, substream); } } spin_lock(&ensoniq->reg_lock); if (cmd == SNDRV_PCM_TRIGGER_START) ensoniq->ctrl |= what; else ensoniq->ctrl &= ~what; outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); spin_unlock(&ensoniq->reg_lock); break; } default: return -EINVAL; } return 0; } /* * PCM part */ static int snd_ensoniq_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ensoniq_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } static int snd_ensoniq_playback1_prepare(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned int mode = 0; ensoniq->p1_dma_size = snd_pcm_lib_buffer_bytes(substream); ensoniq->p1_period_size = snd_pcm_lib_period_bytes(substream); if (snd_pcm_format_width(runtime->format) == 16) mode |= 0x02; if (runtime->channels > 1) mode |= 0x01; spin_lock_irq(&ensoniq->reg_lock); ensoniq->ctrl &= ~ES_DAC1_EN; #ifdef CHIP1371 /* 48k doesn't need SRC (it breaks AC3-passthru) */ if (runtime->rate == 48000) ensoniq->ctrl |= ES_1373_BYPASS_P1; else ensoniq->ctrl &= ~ES_1373_BYPASS_P1; #endif outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); outl(ES_MEM_PAGEO(ES_PAGE_DAC), ES_REG(ensoniq, MEM_PAGE)); outl(runtime->dma_addr, ES_REG(ensoniq, DAC1_FRAME)); outl((ensoniq->p1_dma_size >> 2) - 1, ES_REG(ensoniq, DAC1_SIZE)); ensoniq->sctrl &= ~(ES_P1_LOOP_SEL | ES_P1_PAUSE | ES_P1_SCT_RLD | ES_P1_MODEM); ensoniq->sctrl |= ES_P1_INT_EN | ES_P1_MODEO(mode); outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL)); outl((ensoniq->p1_period_size >> snd_ensoniq_sample_shift[mode]) - 1, ES_REG(ensoniq, DAC1_COUNT)); #ifdef CHIP1370 ensoniq->ctrl &= ~ES_1370_WTSRSELM; switch (runtime->rate) { case 5512: ensoniq->ctrl |= ES_1370_WTSRSEL(0); break; case 11025: ensoniq->ctrl |= ES_1370_WTSRSEL(1); break; case 22050: ensoniq->ctrl |= ES_1370_WTSRSEL(2); break; case 44100: ensoniq->ctrl |= ES_1370_WTSRSEL(3); break; default: snd_BUG(); } #endif outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); spin_unlock_irq(&ensoniq->reg_lock); #ifndef CHIP1370 snd_es1371_dac1_rate(ensoniq, runtime->rate); #endif return 0; } static int snd_ensoniq_playback2_prepare(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned int mode = 0; ensoniq->p2_dma_size = snd_pcm_lib_buffer_bytes(substream); ensoniq->p2_period_size = snd_pcm_lib_period_bytes(substream); if (snd_pcm_format_width(runtime->format) == 16) mode |= 0x02; if (runtime->channels > 1) mode |= 0x01; spin_lock_irq(&ensoniq->reg_lock); ensoniq->ctrl &= ~ES_DAC2_EN; outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); outl(ES_MEM_PAGEO(ES_PAGE_DAC), ES_REG(ensoniq, MEM_PAGE)); outl(runtime->dma_addr, ES_REG(ensoniq, DAC2_FRAME)); outl((ensoniq->p2_dma_size >> 2) - 1, ES_REG(ensoniq, DAC2_SIZE)); ensoniq->sctrl &= ~(ES_P2_LOOP_SEL | ES_P2_PAUSE | ES_P2_DAC_SEN | ES_P2_END_INCM | ES_P2_ST_INCM | ES_P2_MODEM); ensoniq->sctrl |= ES_P2_INT_EN | ES_P2_MODEO(mode) | ES_P2_END_INCO(mode & 2 ? 2 : 1) | ES_P2_ST_INCO(0); outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL)); outl((ensoniq->p2_period_size >> snd_ensoniq_sample_shift[mode]) - 1, ES_REG(ensoniq, DAC2_COUNT)); #ifdef CHIP1370 if (!(ensoniq->u.es1370.pclkdiv_lock & ES_MODE_CAPTURE)) { ensoniq->ctrl &= ~ES_1370_PCLKDIVM; ensoniq->ctrl |= ES_1370_PCLKDIVO(ES_1370_SRTODIV(runtime->rate)); ensoniq->u.es1370.pclkdiv_lock |= ES_MODE_PLAY2; } #endif outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); spin_unlock_irq(&ensoniq->reg_lock); #ifndef CHIP1370 snd_es1371_dac2_rate(ensoniq, runtime->rate); #endif return 0; } static int snd_ensoniq_capture_prepare(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned int mode = 0; ensoniq->c_dma_size = snd_pcm_lib_buffer_bytes(substream); ensoniq->c_period_size = snd_pcm_lib_period_bytes(substream); if (snd_pcm_format_width(runtime->format) == 16) mode |= 0x02; if (runtime->channels > 1) mode |= 0x01; spin_lock_irq(&ensoniq->reg_lock); ensoniq->ctrl &= ~ES_ADC_EN; outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); outl(ES_MEM_PAGEO(ES_PAGE_ADC), ES_REG(ensoniq, MEM_PAGE)); outl(runtime->dma_addr, ES_REG(ensoniq, ADC_FRAME)); outl((ensoniq->c_dma_size >> 2) - 1, ES_REG(ensoniq, ADC_SIZE)); ensoniq->sctrl &= ~(ES_R1_LOOP_SEL | ES_R1_MODEM); ensoniq->sctrl |= ES_R1_INT_EN | ES_R1_MODEO(mode); outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL)); outl((ensoniq->c_period_size >> snd_ensoniq_sample_shift[mode]) - 1, ES_REG(ensoniq, ADC_COUNT)); #ifdef CHIP1370 if (!(ensoniq->u.es1370.pclkdiv_lock & ES_MODE_PLAY2)) { ensoniq->ctrl &= ~ES_1370_PCLKDIVM; ensoniq->ctrl |= ES_1370_PCLKDIVO(ES_1370_SRTODIV(runtime->rate)); ensoniq->u.es1370.pclkdiv_lock |= ES_MODE_CAPTURE; } #endif outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); spin_unlock_irq(&ensoniq->reg_lock); #ifndef CHIP1370 snd_es1371_adc_rate(ensoniq, runtime->rate); #endif return 0; } static snd_pcm_uframes_t snd_ensoniq_playback1_pointer(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); size_t ptr; spin_lock(&ensoniq->reg_lock); if (inl(ES_REG(ensoniq, CONTROL)) & ES_DAC1_EN) { outl(ES_MEM_PAGEO(ES_PAGE_DAC), ES_REG(ensoniq, MEM_PAGE)); ptr = ES_REG_FCURR_COUNTI(inl(ES_REG(ensoniq, DAC1_SIZE))); ptr = bytes_to_frames(substream->runtime, ptr); } else { ptr = 0; } spin_unlock(&ensoniq->reg_lock); return ptr; } static snd_pcm_uframes_t snd_ensoniq_playback2_pointer(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); size_t ptr; spin_lock(&ensoniq->reg_lock); if (inl(ES_REG(ensoniq, CONTROL)) & ES_DAC2_EN) { outl(ES_MEM_PAGEO(ES_PAGE_DAC), ES_REG(ensoniq, MEM_PAGE)); ptr = ES_REG_FCURR_COUNTI(inl(ES_REG(ensoniq, DAC2_SIZE))); ptr = bytes_to_frames(substream->runtime, ptr); } else { ptr = 0; } spin_unlock(&ensoniq->reg_lock); return ptr; } static snd_pcm_uframes_t snd_ensoniq_capture_pointer(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); size_t ptr; spin_lock(&ensoniq->reg_lock); if (inl(ES_REG(ensoniq, CONTROL)) & ES_ADC_EN) { outl(ES_MEM_PAGEO(ES_PAGE_ADC), ES_REG(ensoniq, MEM_PAGE)); ptr = ES_REG_FCURR_COUNTI(inl(ES_REG(ensoniq, ADC_SIZE))); ptr = bytes_to_frames(substream->runtime, ptr); } else { ptr = 0; } spin_unlock(&ensoniq->reg_lock); return ptr; } static struct snd_pcm_hardware snd_ensoniq_playback1 = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = #ifndef CHIP1370 SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, #else (SNDRV_PCM_RATE_KNOT | /* 5512Hz rate */ SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100), #endif .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_ensoniq_playback2 = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_ensoniq_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 4000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static int snd_ensoniq_playback1_open(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; ensoniq->mode |= ES_MODE_PLAY1; ensoniq->playback1_substream = substream; runtime->hw = snd_ensoniq_playback1; snd_pcm_set_sync(substream); spin_lock_irq(&ensoniq->reg_lock); if (ensoniq->spdif && ensoniq->playback2_substream == NULL) ensoniq->spdif_stream = ensoniq->spdif_default; spin_unlock_irq(&ensoniq->reg_lock); #ifdef CHIP1370 snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_es1370_hw_constraints_rates); #else snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_es1371_hw_constraints_dac_clock); #endif return 0; } static int snd_ensoniq_playback2_open(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; ensoniq->mode |= ES_MODE_PLAY2; ensoniq->playback2_substream = substream; runtime->hw = snd_ensoniq_playback2; snd_pcm_set_sync(substream); spin_lock_irq(&ensoniq->reg_lock); if (ensoniq->spdif && ensoniq->playback1_substream == NULL) ensoniq->spdif_stream = ensoniq->spdif_default; spin_unlock_irq(&ensoniq->reg_lock); #ifdef CHIP1370 snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_es1370_hw_constraints_clock); #else snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_es1371_hw_constraints_dac_clock); #endif return 0; } static int snd_ensoniq_capture_open(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; ensoniq->mode |= ES_MODE_CAPTURE; ensoniq->capture_substream = substream; runtime->hw = snd_ensoniq_capture; snd_pcm_set_sync(substream); #ifdef CHIP1370 snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_es1370_hw_constraints_clock); #else snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &snd_es1371_hw_constraints_adc_clock); #endif return 0; } static int snd_ensoniq_playback1_close(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); ensoniq->playback1_substream = NULL; ensoniq->mode &= ~ES_MODE_PLAY1; return 0; } static int snd_ensoniq_playback2_close(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); ensoniq->playback2_substream = NULL; spin_lock_irq(&ensoniq->reg_lock); #ifdef CHIP1370 ensoniq->u.es1370.pclkdiv_lock &= ~ES_MODE_PLAY2; #endif ensoniq->mode &= ~ES_MODE_PLAY2; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_ensoniq_capture_close(struct snd_pcm_substream *substream) { struct ensoniq *ensoniq = snd_pcm_substream_chip(substream); ensoniq->capture_substream = NULL; spin_lock_irq(&ensoniq->reg_lock); #ifdef CHIP1370 ensoniq->u.es1370.pclkdiv_lock &= ~ES_MODE_CAPTURE; #endif ensoniq->mode &= ~ES_MODE_CAPTURE; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static struct snd_pcm_ops snd_ensoniq_playback1_ops = { .open = snd_ensoniq_playback1_open, .close = snd_ensoniq_playback1_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ensoniq_hw_params, .hw_free = snd_ensoniq_hw_free, .prepare = snd_ensoniq_playback1_prepare, .trigger = snd_ensoniq_trigger, .pointer = snd_ensoniq_playback1_pointer, }; static struct snd_pcm_ops snd_ensoniq_playback2_ops = { .open = snd_ensoniq_playback2_open, .close = snd_ensoniq_playback2_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ensoniq_hw_params, .hw_free = snd_ensoniq_hw_free, .prepare = snd_ensoniq_playback2_prepare, .trigger = snd_ensoniq_trigger, .pointer = snd_ensoniq_playback2_pointer, }; static struct snd_pcm_ops snd_ensoniq_capture_ops = { .open = snd_ensoniq_capture_open, .close = snd_ensoniq_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ensoniq_hw_params, .hw_free = snd_ensoniq_hw_free, .prepare = snd_ensoniq_capture_prepare, .trigger = snd_ensoniq_trigger, .pointer = snd_ensoniq_capture_pointer, }; static int __devinit snd_ensoniq_pcm(struct ensoniq * ensoniq, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; #ifdef CHIP1370 err = snd_pcm_new(ensoniq->card, "ES1370/1", device, 1, 1, &pcm); #else err = snd_pcm_new(ensoniq->card, "ES1371/1", device, 1, 1, &pcm); #endif if (err < 0) return err; #ifdef CHIP1370 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ensoniq_playback2_ops); #else snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ensoniq_playback1_ops); #endif snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ensoniq_capture_ops); pcm->private_data = ensoniq; pcm->info_flags = 0; #ifdef CHIP1370 strcpy(pcm->name, "ES1370 DAC2/ADC"); #else strcpy(pcm->name, "ES1371 DAC2/ADC"); #endif ensoniq->pcm1 = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(ensoniq->pci), 64*1024, 128*1024); if (rpcm) *rpcm = pcm; return 0; } static int __devinit snd_ensoniq_pcm2(struct ensoniq * ensoniq, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; #ifdef CHIP1370 err = snd_pcm_new(ensoniq->card, "ES1370/2", device, 1, 0, &pcm); #else err = snd_pcm_new(ensoniq->card, "ES1371/2", device, 1, 0, &pcm); #endif if (err < 0) return err; #ifdef CHIP1370 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ensoniq_playback1_ops); #else snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ensoniq_playback2_ops); #endif pcm->private_data = ensoniq; pcm->info_flags = 0; #ifdef CHIP1370 strcpy(pcm->name, "ES1370 DAC1"); #else strcpy(pcm->name, "ES1371 DAC1"); #endif ensoniq->pcm2 = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(ensoniq->pci), 64*1024, 128*1024); if (rpcm) *rpcm = pcm; return 0; } /* * Mixer section */ /* * ENS1371 mixer (including SPDIF interface) */ #ifdef CHIP1371 static int snd_ens1373_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ens1373_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); spin_lock_irq(&ensoniq->reg_lock); ucontrol->value.iec958.status[0] = (ensoniq->spdif_default >> 0) & 0xff; ucontrol->value.iec958.status[1] = (ensoniq->spdif_default >> 8) & 0xff; ucontrol->value.iec958.status[2] = (ensoniq->spdif_default >> 16) & 0xff; ucontrol->value.iec958.status[3] = (ensoniq->spdif_default >> 24) & 0xff; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_ens1373_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((u32)ucontrol->value.iec958.status[0] << 0) | ((u32)ucontrol->value.iec958.status[1] << 8) | ((u32)ucontrol->value.iec958.status[2] << 16) | ((u32)ucontrol->value.iec958.status[3] << 24); spin_lock_irq(&ensoniq->reg_lock); change = ensoniq->spdif_default != val; ensoniq->spdif_default = val; if (change && ensoniq->playback1_substream == NULL && ensoniq->playback2_substream == NULL) outl(val, ES_REG(ensoniq, CHANNEL_STATUS)); spin_unlock_irq(&ensoniq->reg_lock); return change; } static int snd_ens1373_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = 0xff; ucontrol->value.iec958.status[1] = 0xff; ucontrol->value.iec958.status[2] = 0xff; ucontrol->value.iec958.status[3] = 0xff; return 0; } static int snd_ens1373_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); spin_lock_irq(&ensoniq->reg_lock); ucontrol->value.iec958.status[0] = (ensoniq->spdif_stream >> 0) & 0xff; ucontrol->value.iec958.status[1] = (ensoniq->spdif_stream >> 8) & 0xff; ucontrol->value.iec958.status[2] = (ensoniq->spdif_stream >> 16) & 0xff; ucontrol->value.iec958.status[3] = (ensoniq->spdif_stream >> 24) & 0xff; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_ens1373_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((u32)ucontrol->value.iec958.status[0] << 0) | ((u32)ucontrol->value.iec958.status[1] << 8) | ((u32)ucontrol->value.iec958.status[2] << 16) | ((u32)ucontrol->value.iec958.status[3] << 24); spin_lock_irq(&ensoniq->reg_lock); change = ensoniq->spdif_stream != val; ensoniq->spdif_stream = val; if (change && (ensoniq->playback1_substream != NULL || ensoniq->playback2_substream != NULL)) outl(val, ES_REG(ensoniq, CHANNEL_STATUS)); spin_unlock_irq(&ensoniq->reg_lock); return change; } #define ES1371_SPDIF(xname) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .info = snd_es1371_spdif_info, \ .get = snd_es1371_spdif_get, .put = snd_es1371_spdif_put } #define snd_es1371_spdif_info snd_ctl_boolean_mono_info static int snd_es1371_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); spin_lock_irq(&ensoniq->reg_lock); ucontrol->value.integer.value[0] = ensoniq->ctrl & ES_1373_SPDIF_THRU ? 1 : 0; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_es1371_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); unsigned int nval1, nval2; int change; nval1 = ucontrol->value.integer.value[0] ? ES_1373_SPDIF_THRU : 0; nval2 = ucontrol->value.integer.value[0] ? ES_1373_SPDIF_EN : 0; spin_lock_irq(&ensoniq->reg_lock); change = (ensoniq->ctrl & ES_1373_SPDIF_THRU) != nval1; ensoniq->ctrl &= ~ES_1373_SPDIF_THRU; ensoniq->ctrl |= nval1; ensoniq->cssr &= ~ES_1373_SPDIF_EN; ensoniq->cssr |= nval2; outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); outl(ensoniq->cssr, ES_REG(ensoniq, STATUS)); spin_unlock_irq(&ensoniq->reg_lock); return change; } /* spdif controls */ static struct snd_kcontrol_new snd_es1371_mixer_spdif[] __devinitdata = { ES1371_SPDIF(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH)), { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_ens1373_spdif_info, .get = snd_ens1373_spdif_default_get, .put = snd_ens1373_spdif_default_put, }, { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), .info = snd_ens1373_spdif_info, .get = snd_ens1373_spdif_mask_get }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_ens1373_spdif_info, .get = snd_ens1373_spdif_stream_get, .put = snd_ens1373_spdif_stream_put }, }; #define snd_es1373_rear_info snd_ctl_boolean_mono_info static int snd_es1373_rear_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); int val = 0; spin_lock_irq(&ensoniq->reg_lock); if ((ensoniq->cssr & (ES_1373_REAR_BIT27|ES_1373_REAR_BIT26| ES_1373_REAR_BIT24)) == ES_1373_REAR_BIT26) val = 1; ucontrol->value.integer.value[0] = val; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_es1373_rear_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); unsigned int nval1; int change; nval1 = ucontrol->value.integer.value[0] ? ES_1373_REAR_BIT26 : (ES_1373_REAR_BIT27|ES_1373_REAR_BIT24); spin_lock_irq(&ensoniq->reg_lock); change = (ensoniq->cssr & (ES_1373_REAR_BIT27| ES_1373_REAR_BIT26|ES_1373_REAR_BIT24)) != nval1; ensoniq->cssr &= ~(ES_1373_REAR_BIT27|ES_1373_REAR_BIT26|ES_1373_REAR_BIT24); ensoniq->cssr |= nval1; outl(ensoniq->cssr, ES_REG(ensoniq, STATUS)); spin_unlock_irq(&ensoniq->reg_lock); return change; } static struct snd_kcontrol_new snd_ens1373_rear __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "AC97 2ch->4ch Copy Switch", .info = snd_es1373_rear_info, .get = snd_es1373_rear_get, .put = snd_es1373_rear_put, }; #define snd_es1373_line_info snd_ctl_boolean_mono_info static int snd_es1373_line_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); int val = 0; spin_lock_irq(&ensoniq->reg_lock); if ((ensoniq->ctrl & ES_1371_GPIO_OUTM) >= 4) val = 1; ucontrol->value.integer.value[0] = val; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_es1373_line_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); int changed; unsigned int ctrl; spin_lock_irq(&ensoniq->reg_lock); ctrl = ensoniq->ctrl; if (ucontrol->value.integer.value[0]) ensoniq->ctrl |= ES_1371_GPIO_OUT(4); /* switch line-in -> rear out */ else ensoniq->ctrl &= ~ES_1371_GPIO_OUT(4); changed = (ctrl != ensoniq->ctrl); if (changed) outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); spin_unlock_irq(&ensoniq->reg_lock); return changed; } static struct snd_kcontrol_new snd_ens1373_line __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Line In->Rear Out Switch", .info = snd_es1373_line_info, .get = snd_es1373_line_get, .put = snd_es1373_line_put, }; static void snd_ensoniq_mixer_free_ac97(struct snd_ac97 *ac97) { struct ensoniq *ensoniq = ac97->private_data; ensoniq->u.es1371.ac97 = NULL; } struct es1371_quirk { unsigned short vid; /* vendor ID */ unsigned short did; /* device ID */ unsigned char rev; /* revision */ }; static int es1371_quirk_lookup(struct ensoniq *ensoniq, struct es1371_quirk *list) { while (list->vid != (unsigned short)PCI_ANY_ID) { if (ensoniq->pci->vendor == list->vid && ensoniq->pci->device == list->did && ensoniq->rev == list->rev) return 1; list++; } return 0; } static struct es1371_quirk es1371_spdif_present[] __devinitdata = { { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_C }, { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_D }, { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_E }, { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_ES1371, .rev = ES1371REV_CT5880_A }, { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_ES1371, .rev = ES1371REV_ES1373_8 }, { .vid = PCI_ANY_ID, .did = PCI_ANY_ID } }; static struct snd_pci_quirk ens1373_line_quirk[] __devinitdata = { SND_PCI_QUIRK_ID(0x1274, 0x2000), /* GA-7DXR */ SND_PCI_QUIRK_ID(0x1458, 0xa000), /* GA-8IEXP */ { } /* end */ }; static int __devinit snd_ensoniq_1371_mixer(struct ensoniq *ensoniq, int has_spdif, int has_line) { struct snd_card *card = ensoniq->card; struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; int err; static struct snd_ac97_bus_ops ops = { .write = snd_es1371_codec_write, .read = snd_es1371_codec_read, .wait = snd_es1371_codec_wait, }; if ((err = snd_ac97_bus(card, 0, &ops, NULL, &pbus)) < 0) return err; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = ensoniq; ac97.private_free = snd_ensoniq_mixer_free_ac97; ac97.pci = ensoniq->pci; ac97.scaps = AC97_SCAP_AUDIO; if ((err = snd_ac97_mixer(pbus, &ac97, &ensoniq->u.es1371.ac97)) < 0) return err; if (has_spdif > 0 || (!has_spdif && es1371_quirk_lookup(ensoniq, es1371_spdif_present))) { struct snd_kcontrol *kctl; int i, is_spdif = 0; ensoniq->spdif_default = ensoniq->spdif_stream = SNDRV_PCM_DEFAULT_CON_SPDIF; outl(ensoniq->spdif_default, ES_REG(ensoniq, CHANNEL_STATUS)); if (ensoniq->u.es1371.ac97->ext_id & AC97_EI_SPDIF) is_spdif++; for (i = 0; i < ARRAY_SIZE(snd_es1371_mixer_spdif); i++) { kctl = snd_ctl_new1(&snd_es1371_mixer_spdif[i], ensoniq); if (!kctl) return -ENOMEM; kctl->id.index = is_spdif; err = snd_ctl_add(card, kctl); if (err < 0) return err; } } if (ensoniq->u.es1371.ac97->ext_id & AC97_EI_SDAC) { /* mirror rear to front speakers */ ensoniq->cssr &= ~(ES_1373_REAR_BIT27|ES_1373_REAR_BIT24); ensoniq->cssr |= ES_1373_REAR_BIT26; err = snd_ctl_add(card, snd_ctl_new1(&snd_ens1373_rear, ensoniq)); if (err < 0) return err; } if (has_line > 0 || snd_pci_quirk_lookup(ensoniq->pci, ens1373_line_quirk)) { err = snd_ctl_add(card, snd_ctl_new1(&snd_ens1373_line, ensoniq)); if (err < 0) return err; } return 0; } #endif /* CHIP1371 */ /* generic control callbacks for ens1370 */ #ifdef CHIP1370 #define ENSONIQ_CONTROL(xname, mask) \ { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = xname, .info = snd_ensoniq_control_info, \ .get = snd_ensoniq_control_get, .put = snd_ensoniq_control_put, \ .private_value = mask } #define snd_ensoniq_control_info snd_ctl_boolean_mono_info static int snd_ensoniq_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); int mask = kcontrol->private_value; spin_lock_irq(&ensoniq->reg_lock); ucontrol->value.integer.value[0] = ensoniq->ctrl & mask ? 1 : 0; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_ensoniq_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct ensoniq *ensoniq = snd_kcontrol_chip(kcontrol); int mask = kcontrol->private_value; unsigned int nval; int change; nval = ucontrol->value.integer.value[0] ? mask : 0; spin_lock_irq(&ensoniq->reg_lock); change = (ensoniq->ctrl & mask) != nval; ensoniq->ctrl &= ~mask; ensoniq->ctrl |= nval; outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); spin_unlock_irq(&ensoniq->reg_lock); return change; } /* * ENS1370 mixer */ static struct snd_kcontrol_new snd_es1370_controls[2] __devinitdata = { ENSONIQ_CONTROL("PCM 0 Output also on Line-In Jack", ES_1370_XCTL0), ENSONIQ_CONTROL("Mic +5V bias", ES_1370_XCTL1) }; #define ES1370_CONTROLS ARRAY_SIZE(snd_es1370_controls) static void snd_ensoniq_mixer_free_ak4531(struct snd_ak4531 *ak4531) { struct ensoniq *ensoniq = ak4531->private_data; ensoniq->u.es1370.ak4531 = NULL; } static int __devinit snd_ensoniq_1370_mixer(struct ensoniq * ensoniq) { struct snd_card *card = ensoniq->card; struct snd_ak4531 ak4531; unsigned int idx; int err; /* try reset AK4531 */ outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x02), ES_REG(ensoniq, 1370_CODEC)); inw(ES_REG(ensoniq, 1370_CODEC)); udelay(100); outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x03), ES_REG(ensoniq, 1370_CODEC)); inw(ES_REG(ensoniq, 1370_CODEC)); udelay(100); memset(&ak4531, 0, sizeof(ak4531)); ak4531.write = snd_es1370_codec_write; ak4531.private_data = ensoniq; ak4531.private_free = snd_ensoniq_mixer_free_ak4531; if ((err = snd_ak4531_mixer(card, &ak4531, &ensoniq->u.es1370.ak4531)) < 0) return err; for (idx = 0; idx < ES1370_CONTROLS; idx++) { err = snd_ctl_add(card, snd_ctl_new1(&snd_es1370_controls[idx], ensoniq)); if (err < 0) return err; } return 0; } #endif /* CHIP1370 */ #ifdef SUPPORT_JOYSTICK #ifdef CHIP1371 static int __devinit snd_ensoniq_get_joystick_port(int dev) { switch (joystick_port[dev]) { case 0: /* disabled */ case 1: /* auto-detect */ case 0x200: case 0x208: case 0x210: case 0x218: return joystick_port[dev]; default: printk(KERN_ERR "ens1371: invalid joystick port %#x", joystick_port[dev]); return 0; } } #else static inline int snd_ensoniq_get_joystick_port(int dev) { return joystick[dev] ? 0x200 : 0; } #endif static int __devinit snd_ensoniq_create_gameport(struct ensoniq *ensoniq, int dev) { struct gameport *gp; int io_port; io_port = snd_ensoniq_get_joystick_port(dev); switch (io_port) { case 0: return -ENOSYS; case 1: /* auto_detect */ for (io_port = 0x200; io_port <= 0x218; io_port += 8) if (request_region(io_port, 8, "ens137x: gameport")) break; if (io_port > 0x218) { printk(KERN_WARNING "ens137x: no gameport ports available\n"); return -EBUSY; } break; default: if (!request_region(io_port, 8, "ens137x: gameport")) { printk(KERN_WARNING "ens137x: gameport io port 0x%#x in use\n", io_port); return -EBUSY; } break; } ensoniq->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "ens137x: cannot allocate memory for gameport\n"); release_region(io_port, 8); return -ENOMEM; } gameport_set_name(gp, "ES137x"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(ensoniq->pci)); gameport_set_dev_parent(gp, &ensoniq->pci->dev); gp->io = io_port; ensoniq->ctrl |= ES_JYSTK_EN; #ifdef CHIP1371 ensoniq->ctrl &= ~ES_1371_JOY_ASELM; ensoniq->ctrl |= ES_1371_JOY_ASEL((io_port - 0x200) / 8); #endif outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); gameport_register_port(ensoniq->gameport); return 0; } static void snd_ensoniq_free_gameport(struct ensoniq *ensoniq) { if (ensoniq->gameport) { int port = ensoniq->gameport->io; gameport_unregister_port(ensoniq->gameport); ensoniq->gameport = NULL; ensoniq->ctrl &= ~ES_JYSTK_EN; outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); release_region(port, 8); } } #else static inline int snd_ensoniq_create_gameport(struct ensoniq *ensoniq, long port) { return -ENOSYS; } static inline void snd_ensoniq_free_gameport(struct ensoniq *ensoniq) { } #endif /* SUPPORT_JOYSTICK */ /* */ static void snd_ensoniq_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct ensoniq *ensoniq = entry->private_data; #ifdef CHIP1370 snd_iprintf(buffer, "Ensoniq AudioPCI ES1370\n\n"); #else snd_iprintf(buffer, "Ensoniq AudioPCI ES1371\n\n"); #endif snd_iprintf(buffer, "Joystick enable : %s\n", ensoniq->ctrl & ES_JYSTK_EN ? "on" : "off"); #ifdef CHIP1370 snd_iprintf(buffer, "MIC +5V bias : %s\n", ensoniq->ctrl & ES_1370_XCTL1 ? "on" : "off"); snd_iprintf(buffer, "Line In to AOUT : %s\n", ensoniq->ctrl & ES_1370_XCTL0 ? "on" : "off"); #else snd_iprintf(buffer, "Joystick port : 0x%x\n", (ES_1371_JOY_ASELI(ensoniq->ctrl) * 8) + 0x200); #endif } static void __devinit snd_ensoniq_proc_init(struct ensoniq * ensoniq) { struct snd_info_entry *entry; if (! snd_card_proc_new(ensoniq->card, "audiopci", &entry)) snd_info_set_text_ops(entry, ensoniq, snd_ensoniq_proc_read); } /* */ static int snd_ensoniq_free(struct ensoniq *ensoniq) { snd_ensoniq_free_gameport(ensoniq); if (ensoniq->irq < 0) goto __hw_end; #ifdef CHIP1370 outl(ES_1370_SERR_DISABLE, ES_REG(ensoniq, CONTROL)); /* switch everything off */ outl(0, ES_REG(ensoniq, SERIAL)); /* clear serial interface */ #else outl(0, ES_REG(ensoniq, CONTROL)); /* switch everything off */ outl(0, ES_REG(ensoniq, SERIAL)); /* clear serial interface */ #endif if (ensoniq->irq >= 0) synchronize_irq(ensoniq->irq); pci_set_power_state(ensoniq->pci, 3); __hw_end: #ifdef CHIP1370 if (ensoniq->dma_bug.area) snd_dma_free_pages(&ensoniq->dma_bug); #endif if (ensoniq->irq >= 0) free_irq(ensoniq->irq, ensoniq); pci_release_regions(ensoniq->pci); pci_disable_device(ensoniq->pci); kfree(ensoniq); return 0; } static int snd_ensoniq_dev_free(struct snd_device *device) { struct ensoniq *ensoniq = device->device_data; return snd_ensoniq_free(ensoniq); } #ifdef CHIP1371 static struct snd_pci_quirk es1371_amplifier_hack[] __devinitdata = { SND_PCI_QUIRK_ID(0x107b, 0x2150), /* Gateway Solo 2150 */ SND_PCI_QUIRK_ID(0x13bd, 0x100c), /* EV1938 on Mebius PC-MJ100V */ SND_PCI_QUIRK_ID(0x1102, 0x5938), /* Targa Xtender300 */ SND_PCI_QUIRK_ID(0x1102, 0x8938), /* IPC Topnote G notebook */ { } /* end */ }; static struct es1371_quirk es1371_ac97_reset_hack[] = { { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_C }, { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_D }, { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_CT5880, .rev = CT5880REV_CT5880_E }, { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_ES1371, .rev = ES1371REV_CT5880_A }, { .vid = PCI_VENDOR_ID_ENSONIQ, .did = PCI_DEVICE_ID_ENSONIQ_ES1371, .rev = ES1371REV_ES1373_8 }, { .vid = PCI_ANY_ID, .did = PCI_ANY_ID } }; #endif static void snd_ensoniq_chip_init(struct ensoniq *ensoniq) { #ifdef CHIP1371 int idx; #endif /* this code was part of snd_ensoniq_create before intruduction * of suspend/resume */ #ifdef CHIP1370 outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL)); outl(ES_MEM_PAGEO(ES_PAGE_ADC), ES_REG(ensoniq, MEM_PAGE)); outl(ensoniq->dma_bug.addr, ES_REG(ensoniq, PHANTOM_FRAME)); outl(0, ES_REG(ensoniq, PHANTOM_COUNT)); #else outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL)); outl(0, ES_REG(ensoniq, 1371_LEGACY)); if (es1371_quirk_lookup(ensoniq, es1371_ac97_reset_hack)) { outl(ensoniq->cssr, ES_REG(ensoniq, STATUS)); /* need to delay around 20ms(bleech) to give some CODECs enough time to wakeup */ msleep(20); } /* AC'97 warm reset to start the bitclk */ outl(ensoniq->ctrl | ES_1371_SYNC_RES, ES_REG(ensoniq, CONTROL)); inl(ES_REG(ensoniq, CONTROL)); udelay(20); outl(ensoniq->ctrl, ES_REG(ensoniq, CONTROL)); /* Init the sample rate converter */ snd_es1371_wait_src_ready(ensoniq); outl(ES_1371_SRC_DISABLE, ES_REG(ensoniq, 1371_SMPRATE)); for (idx = 0; idx < 0x80; idx++) snd_es1371_src_write(ensoniq, idx, 0); snd_es1371_src_write(ensoniq, ES_SMPREG_DAC1 + ES_SMPREG_TRUNC_N, 16 << 4); snd_es1371_src_write(ensoniq, ES_SMPREG_DAC1 + ES_SMPREG_INT_REGS, 16 << 10); snd_es1371_src_write(ensoniq, ES_SMPREG_DAC2 + ES_SMPREG_TRUNC_N, 16 << 4); snd_es1371_src_write(ensoniq, ES_SMPREG_DAC2 + ES_SMPREG_INT_REGS, 16 << 10); snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC, 1 << 12); snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_ADC + 1, 1 << 12); snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_DAC1, 1 << 12); snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_DAC1 + 1, 1 << 12); snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_DAC2, 1 << 12); snd_es1371_src_write(ensoniq, ES_SMPREG_VOL_DAC2 + 1, 1 << 12); snd_es1371_adc_rate(ensoniq, 22050); snd_es1371_dac1_rate(ensoniq, 22050); snd_es1371_dac2_rate(ensoniq, 22050); /* WARNING: * enabling the sample rate converter without properly programming * its parameters causes the chip to lock up (the SRC busy bit will * be stuck high, and I've found no way to rectify this other than * power cycle) - Thomas Sailer */ snd_es1371_wait_src_ready(ensoniq); outl(0, ES_REG(ensoniq, 1371_SMPRATE)); /* try reset codec directly */ outl(ES_1371_CODEC_WRITE(0, 0), ES_REG(ensoniq, 1371_CODEC)); #endif outb(ensoniq->uartc = 0x00, ES_REG(ensoniq, UART_CONTROL)); outb(0x00, ES_REG(ensoniq, UART_RES)); outl(ensoniq->cssr, ES_REG(ensoniq, STATUS)); synchronize_irq(ensoniq->irq); } #ifdef CONFIG_PM static int snd_ensoniq_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct ensoniq *ensoniq = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(ensoniq->pcm1); snd_pcm_suspend_all(ensoniq->pcm2); #ifdef CHIP1371 snd_ac97_suspend(ensoniq->u.es1371.ac97); #else /* try to reset AK4531 */ outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x02), ES_REG(ensoniq, 1370_CODEC)); inw(ES_REG(ensoniq, 1370_CODEC)); udelay(100); outw(ES_1370_CODEC_WRITE(AK4531_RESET, 0x03), ES_REG(ensoniq, 1370_CODEC)); inw(ES_REG(ensoniq, 1370_CODEC)); udelay(100); snd_ak4531_suspend(ensoniq->u.es1370.ak4531); #endif pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_ensoniq_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct ensoniq *ensoniq = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR DRIVER_NAME ": pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_ensoniq_chip_init(ensoniq); #ifdef CHIP1371 snd_ac97_resume(ensoniq->u.es1371.ac97); #else snd_ak4531_resume(ensoniq->u.es1370.ak4531); #endif snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ static int __devinit snd_ensoniq_create(struct snd_card *card, struct pci_dev *pci, struct ensoniq ** rensoniq) { struct ensoniq *ensoniq; int err; static struct snd_device_ops ops = { .dev_free = snd_ensoniq_dev_free, }; *rensoniq = NULL; if ((err = pci_enable_device(pci)) < 0) return err; ensoniq = kzalloc(sizeof(*ensoniq), GFP_KERNEL); if (ensoniq == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&ensoniq->reg_lock); mutex_init(&ensoniq->src_mutex); ensoniq->card = card; ensoniq->pci = pci; ensoniq->irq = -1; if ((err = pci_request_regions(pci, "Ensoniq AudioPCI")) < 0) { kfree(ensoniq); pci_disable_device(pci); return err; } ensoniq->port = pci_resource_start(pci, 0); if (request_irq(pci->irq, snd_audiopci_interrupt, IRQF_SHARED, "Ensoniq AudioPCI", ensoniq)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_ensoniq_free(ensoniq); return -EBUSY; } ensoniq->irq = pci->irq; #ifdef CHIP1370 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), 16, &ensoniq->dma_bug) < 0) { snd_printk(KERN_ERR "unable to allocate space for phantom area - dma_bug\n"); snd_ensoniq_free(ensoniq); return -EBUSY; } #endif pci_set_master(pci); ensoniq->rev = pci->revision; #ifdef CHIP1370 #if 0 ensoniq->ctrl = ES_1370_CDC_EN | ES_1370_SERR_DISABLE | ES_1370_PCLKDIVO(ES_1370_SRTODIV(8000)); #else /* get microphone working */ ensoniq->ctrl = ES_1370_CDC_EN | ES_1370_PCLKDIVO(ES_1370_SRTODIV(8000)); #endif ensoniq->sctrl = 0; #else ensoniq->ctrl = 0; ensoniq->sctrl = 0; ensoniq->cssr = 0; if (snd_pci_quirk_lookup(pci, es1371_amplifier_hack)) ensoniq->ctrl |= ES_1371_GPIO_OUT(1); /* turn amplifier on */ if (es1371_quirk_lookup(ensoniq, es1371_ac97_reset_hack)) ensoniq->cssr |= ES_1371_ST_AC97_RST; #endif snd_ensoniq_chip_init(ensoniq); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, ensoniq, &ops)) < 0) { snd_ensoniq_free(ensoniq); return err; } snd_ensoniq_proc_init(ensoniq); snd_card_set_dev(card, &pci->dev); *rensoniq = ensoniq; return 0; } /* * MIDI section */ static void snd_ensoniq_midi_interrupt(struct ensoniq * ensoniq) { struct snd_rawmidi *rmidi = ensoniq->rmidi; unsigned char status, mask, byte; if (rmidi == NULL) return; /* do Rx at first */ spin_lock(&ensoniq->reg_lock); mask = ensoniq->uartm & ES_MODE_INPUT ? ES_RXRDY : 0; while (mask) { status = inb(ES_REG(ensoniq, UART_STATUS)); if ((status & mask) == 0) break; byte = inb(ES_REG(ensoniq, UART_DATA)); snd_rawmidi_receive(ensoniq->midi_input, &byte, 1); } spin_unlock(&ensoniq->reg_lock); /* do Tx at second */ spin_lock(&ensoniq->reg_lock); mask = ensoniq->uartm & ES_MODE_OUTPUT ? ES_TXRDY : 0; while (mask) { status = inb(ES_REG(ensoniq, UART_STATUS)); if ((status & mask) == 0) break; if (snd_rawmidi_transmit(ensoniq->midi_output, &byte, 1) != 1) { ensoniq->uartc &= ~ES_TXINTENM; outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL)); mask &= ~ES_TXRDY; } else { outb(byte, ES_REG(ensoniq, UART_DATA)); } } spin_unlock(&ensoniq->reg_lock); } static int snd_ensoniq_midi_input_open(struct snd_rawmidi_substream *substream) { struct ensoniq *ensoniq = substream->rmidi->private_data; spin_lock_irq(&ensoniq->reg_lock); ensoniq->uartm |= ES_MODE_INPUT; ensoniq->midi_input = substream; if (!(ensoniq->uartm & ES_MODE_OUTPUT)) { outb(ES_CNTRL(3), ES_REG(ensoniq, UART_CONTROL)); outb(ensoniq->uartc = 0, ES_REG(ensoniq, UART_CONTROL)); outl(ensoniq->ctrl |= ES_UART_EN, ES_REG(ensoniq, CONTROL)); } spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_ensoniq_midi_input_close(struct snd_rawmidi_substream *substream) { struct ensoniq *ensoniq = substream->rmidi->private_data; spin_lock_irq(&ensoniq->reg_lock); if (!(ensoniq->uartm & ES_MODE_OUTPUT)) { outb(ensoniq->uartc = 0, ES_REG(ensoniq, UART_CONTROL)); outl(ensoniq->ctrl &= ~ES_UART_EN, ES_REG(ensoniq, CONTROL)); } else { outb(ensoniq->uartc &= ~ES_RXINTEN, ES_REG(ensoniq, UART_CONTROL)); } ensoniq->midi_input = NULL; ensoniq->uartm &= ~ES_MODE_INPUT; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_ensoniq_midi_output_open(struct snd_rawmidi_substream *substream) { struct ensoniq *ensoniq = substream->rmidi->private_data; spin_lock_irq(&ensoniq->reg_lock); ensoniq->uartm |= ES_MODE_OUTPUT; ensoniq->midi_output = substream; if (!(ensoniq->uartm & ES_MODE_INPUT)) { outb(ES_CNTRL(3), ES_REG(ensoniq, UART_CONTROL)); outb(ensoniq->uartc = 0, ES_REG(ensoniq, UART_CONTROL)); outl(ensoniq->ctrl |= ES_UART_EN, ES_REG(ensoniq, CONTROL)); } spin_unlock_irq(&ensoniq->reg_lock); return 0; } static int snd_ensoniq_midi_output_close(struct snd_rawmidi_substream *substream) { struct ensoniq *ensoniq = substream->rmidi->private_data; spin_lock_irq(&ensoniq->reg_lock); if (!(ensoniq->uartm & ES_MODE_INPUT)) { outb(ensoniq->uartc = 0, ES_REG(ensoniq, UART_CONTROL)); outl(ensoniq->ctrl &= ~ES_UART_EN, ES_REG(ensoniq, CONTROL)); } else { outb(ensoniq->uartc &= ~ES_TXINTENM, ES_REG(ensoniq, UART_CONTROL)); } ensoniq->midi_output = NULL; ensoniq->uartm &= ~ES_MODE_OUTPUT; spin_unlock_irq(&ensoniq->reg_lock); return 0; } static void snd_ensoniq_midi_input_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct ensoniq *ensoniq = substream->rmidi->private_data; int idx; spin_lock_irqsave(&ensoniq->reg_lock, flags); if (up) { if ((ensoniq->uartc & ES_RXINTEN) == 0) { /* empty input FIFO */ for (idx = 0; idx < 32; idx++) inb(ES_REG(ensoniq, UART_DATA)); ensoniq->uartc |= ES_RXINTEN; outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL)); } } else { if (ensoniq->uartc & ES_RXINTEN) { ensoniq->uartc &= ~ES_RXINTEN; outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL)); } } spin_unlock_irqrestore(&ensoniq->reg_lock, flags); } static void snd_ensoniq_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct ensoniq *ensoniq = substream->rmidi->private_data; unsigned char byte; spin_lock_irqsave(&ensoniq->reg_lock, flags); if (up) { if (ES_TXINTENI(ensoniq->uartc) == 0) { ensoniq->uartc |= ES_TXINTENO(1); /* fill UART FIFO buffer at first, and turn Tx interrupts only if necessary */ while (ES_TXINTENI(ensoniq->uartc) == 1 && (inb(ES_REG(ensoniq, UART_STATUS)) & ES_TXRDY)) { if (snd_rawmidi_transmit(substream, &byte, 1) != 1) { ensoniq->uartc &= ~ES_TXINTENM; } else { outb(byte, ES_REG(ensoniq, UART_DATA)); } } outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL)); } } else { if (ES_TXINTENI(ensoniq->uartc) == 1) { ensoniq->uartc &= ~ES_TXINTENM; outb(ensoniq->uartc, ES_REG(ensoniq, UART_CONTROL)); } } spin_unlock_irqrestore(&ensoniq->reg_lock, flags); } static struct snd_rawmidi_ops snd_ensoniq_midi_output = { .open = snd_ensoniq_midi_output_open, .close = snd_ensoniq_midi_output_close, .trigger = snd_ensoniq_midi_output_trigger, }; static struct snd_rawmidi_ops snd_ensoniq_midi_input = { .open = snd_ensoniq_midi_input_open, .close = snd_ensoniq_midi_input_close, .trigger = snd_ensoniq_midi_input_trigger, }; static int __devinit snd_ensoniq_midi(struct ensoniq * ensoniq, int device, struct snd_rawmidi **rrawmidi) { struct snd_rawmidi *rmidi; int err; if (rrawmidi) *rrawmidi = NULL; if ((err = snd_rawmidi_new(ensoniq->card, "ES1370/1", device, 1, 1, &rmidi)) < 0) return err; #ifdef CHIP1370 strcpy(rmidi->name, "ES1370"); #else strcpy(rmidi->name, "ES1371"); #endif snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_ensoniq_midi_output); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_ensoniq_midi_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = ensoniq; ensoniq->rmidi = rmidi; if (rrawmidi) *rrawmidi = rmidi; return 0; } /* * Interrupt handler */ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id) { struct ensoniq *ensoniq = dev_id; unsigned int status, sctrl; if (ensoniq == NULL) return IRQ_NONE; status = inl(ES_REG(ensoniq, STATUS)); if (!(status & ES_INTR)) return IRQ_NONE; spin_lock(&ensoniq->reg_lock); sctrl = ensoniq->sctrl; if (status & ES_DAC1) sctrl &= ~ES_P1_INT_EN; if (status & ES_DAC2) sctrl &= ~ES_P2_INT_EN; if (status & ES_ADC) sctrl &= ~ES_R1_INT_EN; outl(sctrl, ES_REG(ensoniq, SERIAL)); outl(ensoniq->sctrl, ES_REG(ensoniq, SERIAL)); spin_unlock(&ensoniq->reg_lock); if (status & ES_UART) snd_ensoniq_midi_interrupt(ensoniq); if ((status & ES_DAC2) && ensoniq->playback2_substream) snd_pcm_period_elapsed(ensoniq->playback2_substream); if ((status & ES_ADC) && ensoniq->capture_substream) snd_pcm_period_elapsed(ensoniq->capture_substream); if ((status & ES_DAC1) && ensoniq->playback1_substream) snd_pcm_period_elapsed(ensoniq->playback1_substream); return IRQ_HANDLED; } static int __devinit snd_audiopci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct ensoniq *ensoniq; int err, pcm_devs[2]; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; if ((err = snd_ensoniq_create(card, pci, &ensoniq)) < 0) { snd_card_free(card); return err; } card->private_data = ensoniq; pcm_devs[0] = 0; pcm_devs[1] = 1; #ifdef CHIP1370 if ((err = snd_ensoniq_1370_mixer(ensoniq)) < 0) { snd_card_free(card); return err; } #endif #ifdef CHIP1371 if ((err = snd_ensoniq_1371_mixer(ensoniq, spdif[dev], lineio[dev])) < 0) { snd_card_free(card); return err; } #endif if ((err = snd_ensoniq_pcm(ensoniq, 0, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ensoniq_pcm2(ensoniq, 1, NULL)) < 0) { snd_card_free(card); return err; } if ((err = snd_ensoniq_midi(ensoniq, 0, NULL)) < 0) { snd_card_free(card); return err; } snd_ensoniq_create_gameport(ensoniq, dev); strcpy(card->driver, DRIVER_NAME); strcpy(card->shortname, "Ensoniq AudioPCI"); sprintf(card->longname, "%s %s at 0x%lx, irq %i", card->shortname, card->driver, ensoniq->port, ensoniq->irq); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_audiopci_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = DRIVER_NAME, .id_table = snd_audiopci_ids, .probe = snd_audiopci_probe, .remove = __devexit_p(snd_audiopci_remove), #ifdef CONFIG_PM .suspend = snd_ensoniq_suspend, .resume = snd_ensoniq_resume, #endif }; static int __init alsa_card_ens137x_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_ens137x_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_ens137x_init) module_exit(alsa_card_ens137x_exit)
gpl-2.0
TEAM-Gummy/android_kernel_lg_g2
arch/sh/kernel/process_64.c
4401
16045
/* * arch/sh/kernel/process_64.c * * This file handles the architecture-dependent parts of process handling.. * * Copyright (C) 2000, 2001 Paolo Alberelli * Copyright (C) 2003 - 2007 Paul Mundt * Copyright (C) 2003, 2004 Richard Curnow * * Started from SH3/4 version: * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima * * In turn started from i386 version: * Copyright (C) 1995 Linus Torvalds * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/mm.h> #include <linux/fs.h> #include <linux/ptrace.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <asm/syscalls.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/mmu_context.h> #include <asm/fpu.h> #include <asm/switch_to.h> struct task_struct *last_task_used_math = NULL; void show_regs(struct pt_regs *regs) { unsigned long long ah, al, bh, bl, ch, cl; printk("\n"); ah = (regs->pc) >> 32; al = (regs->pc) & 0xffffffff; bh = (regs->regs[18]) >> 32; bl = (regs->regs[18]) & 0xffffffff; ch = (regs->regs[15]) >> 32; cl = (regs->regs[15]) & 0xffffffff; printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->sr) >> 32; al = (regs->sr) & 0xffffffff; asm volatile ("getcon " __TEA ", %0" : "=r" (bh)); asm volatile ("getcon " __TEA ", %0" : "=r" (bl)); bh = (bh) >> 32; bl = (bl) & 0xffffffff; asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch)); asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl)); ch = (ch) >> 32; cl = (cl) & 0xffffffff; printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[0]) >> 32; al = (regs->regs[0]) & 0xffffffff; bh = (regs->regs[1]) >> 32; bl = (regs->regs[1]) & 0xffffffff; ch = (regs->regs[2]) >> 32; cl = (regs->regs[2]) & 0xffffffff; printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[3]) >> 32; al = (regs->regs[3]) & 0xffffffff; bh = (regs->regs[4]) >> 32; bl = (regs->regs[4]) & 0xffffffff; ch = (regs->regs[5]) >> 32; cl = (regs->regs[5]) & 0xffffffff; printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[6]) >> 32; al = (regs->regs[6]) & 0xffffffff; bh = (regs->regs[7]) >> 32; bl = (regs->regs[7]) & 0xffffffff; ch = (regs->regs[8]) >> 32; cl = (regs->regs[8]) & 0xffffffff; printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[9]) >> 32; al = (regs->regs[9]) & 0xffffffff; bh = (regs->regs[10]) >> 32; bl = (regs->regs[10]) & 0xffffffff; ch = (regs->regs[11]) >> 32; cl = (regs->regs[11]) & 0xffffffff; printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[12]) >> 32; al = (regs->regs[12]) & 0xffffffff; bh = (regs->regs[13]) >> 32; bl = (regs->regs[13]) & 0xffffffff; ch = (regs->regs[14]) >> 32; cl = (regs->regs[14]) & 0xffffffff; printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[16]) >> 32; al = (regs->regs[16]) & 0xffffffff; bh = (regs->regs[17]) >> 32; bl = (regs->regs[17]) & 0xffffffff; ch = (regs->regs[19]) >> 32; cl = (regs->regs[19]) & 0xffffffff; printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[20]) >> 32; al = (regs->regs[20]) & 0xffffffff; bh = (regs->regs[21]) >> 32; bl = (regs->regs[21]) & 0xffffffff; ch = (regs->regs[22]) >> 32; cl = (regs->regs[22]) & 0xffffffff; printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[23]) >> 32; al = (regs->regs[23]) & 0xffffffff; bh = (regs->regs[24]) >> 32; bl = (regs->regs[24]) & 0xffffffff; ch = (regs->regs[25]) >> 32; cl = (regs->regs[25]) & 0xffffffff; printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[26]) >> 32; al = (regs->regs[26]) & 0xffffffff; bh = (regs->regs[27]) >> 32; bl = (regs->regs[27]) & 0xffffffff; ch = (regs->regs[28]) >> 32; cl = (regs->regs[28]) & 0xffffffff; printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[29]) >> 32; al = (regs->regs[29]) & 0xffffffff; bh = (regs->regs[30]) >> 32; bl = (regs->regs[30]) & 0xffffffff; ch = (regs->regs[31]) >> 32; cl = (regs->regs[31]) & 0xffffffff; printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[32]) >> 32; al = (regs->regs[32]) & 0xffffffff; bh = (regs->regs[33]) >> 32; bl = (regs->regs[33]) & 0xffffffff; ch = (regs->regs[34]) >> 32; cl = (regs->regs[34]) & 0xffffffff; printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[35]) >> 32; al = (regs->regs[35]) & 0xffffffff; bh = (regs->regs[36]) >> 32; bl = (regs->regs[36]) & 0xffffffff; ch = (regs->regs[37]) >> 32; cl = (regs->regs[37]) & 0xffffffff; printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[38]) >> 32; al = (regs->regs[38]) & 0xffffffff; bh = (regs->regs[39]) >> 32; bl = (regs->regs[39]) & 0xffffffff; ch = (regs->regs[40]) >> 32; cl = (regs->regs[40]) & 0xffffffff; printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[41]) >> 32; al = (regs->regs[41]) & 0xffffffff; bh = (regs->regs[42]) >> 32; bl = (regs->regs[42]) & 0xffffffff; ch = (regs->regs[43]) >> 32; cl = (regs->regs[43]) & 0xffffffff; printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[44]) >> 32; al = (regs->regs[44]) & 0xffffffff; bh = (regs->regs[45]) >> 32; bl = (regs->regs[45]) & 0xffffffff; ch = (regs->regs[46]) >> 32; cl = (regs->regs[46]) & 0xffffffff; printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[47]) >> 32; al = (regs->regs[47]) & 0xffffffff; bh = (regs->regs[48]) >> 32; bl = (regs->regs[48]) & 0xffffffff; ch = (regs->regs[49]) >> 32; cl = (regs->regs[49]) & 0xffffffff; printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[50]) >> 32; al = (regs->regs[50]) & 0xffffffff; bh = (regs->regs[51]) >> 32; bl = (regs->regs[51]) & 0xffffffff; ch = (regs->regs[52]) >> 32; cl = (regs->regs[52]) & 0xffffffff; printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[53]) >> 32; al = (regs->regs[53]) & 0xffffffff; bh = (regs->regs[54]) >> 32; bl = (regs->regs[54]) & 0xffffffff; ch = (regs->regs[55]) >> 32; cl = (regs->regs[55]) & 0xffffffff; printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[56]) >> 32; al = (regs->regs[56]) & 0xffffffff; bh = (regs->regs[57]) >> 32; bl = (regs->regs[57]) & 0xffffffff; ch = (regs->regs[58]) >> 32; cl = (regs->regs[58]) & 0xffffffff; printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[59]) >> 32; al = (regs->regs[59]) & 0xffffffff; bh = (regs->regs[60]) >> 32; bl = (regs->regs[60]) & 0xffffffff; ch = (regs->regs[61]) >> 32; cl = (regs->regs[61]) & 0xffffffff; printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[62]) >> 32; al = (regs->regs[62]) & 0xffffffff; bh = (regs->tregs[0]) >> 32; bl = (regs->tregs[0]) & 0xffffffff; ch = (regs->tregs[1]) >> 32; cl = (regs->tregs[1]) & 0xffffffff; printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->tregs[2]) >> 32; al = (regs->tregs[2]) & 0xffffffff; bh = (regs->tregs[3]) >> 32; bl = (regs->tregs[3]) & 0xffffffff; ch = (regs->tregs[4]) >> 32; cl = (regs->tregs[4]) & 0xffffffff; printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->tregs[5]) >> 32; al = (regs->tregs[5]) & 0xffffffff; bh = (regs->tregs[6]) >> 32; bl = (regs->tregs[6]) & 0xffffffff; ch = (regs->tregs[7]) >> 32; cl = (regs->tregs[7]) & 0xffffffff; printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); /* * If we're in kernel mode, dump the stack too.. */ if (!user_mode(regs)) { void show_stack(struct task_struct *tsk, unsigned long *sp); unsigned long sp = regs->regs[15] & 0xffffffff; struct task_struct *tsk = get_current(); tsk->thread.kregs = regs; show_stack(tsk, (unsigned long *)sp); } } /* * Create a kernel thread */ __noreturn void kernel_thread_helper(void *arg, int (*fn)(void *)) { do_exit(fn(arg)); } /* * This is the mechanism for creating a new kernel thread. * * NOTE! Only a kernel-only process(ie the swapper or direct descendants * who haven't done an "execve()") should use this: it will work within * a system call from a "real" process, but the process memory space will * not be freed until both the parent and the child have exited. */ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) { struct pt_regs regs; memset(&regs, 0, sizeof(regs)); regs.regs[2] = (unsigned long)arg; regs.regs[3] = (unsigned long)fn; regs.pc = (unsigned long)kernel_thread_helper; regs.sr = (1 << 30); /* Ok, create the new process.. */ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL); } EXPORT_SYMBOL(kernel_thread); /* * Free current thread data structures etc.. */ void exit_thread(void) { /* * See arch/sparc/kernel/process.c for the precedent for doing * this -- RPC. * * The SH-5 FPU save/restore approach relies on * last_task_used_math pointing to a live task_struct. When * another task tries to use the FPU for the 1st time, the FPUDIS * trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the * existing FPU state to the FP regs field within * last_task_used_math before re-loading the new task's FPU state * (or initialising it if the FPU has been used before). So if * last_task_used_math is stale, and its page has already been * re-allocated for another use, the consequences are rather * grim. Unless we null it here, there is no other path through * which it would get safely nulled. */ #ifdef CONFIG_SH_FPU if (last_task_used_math == current) { last_task_used_math = NULL; } #endif } void flush_thread(void) { /* Called by fs/exec.c (setup_new_exec) to remove traces of a * previously running executable. */ #ifdef CONFIG_SH_FPU if (last_task_used_math == current) { last_task_used_math = NULL; } /* Force FPU state to be reinitialised after exec */ clear_used_math(); #endif /* if we are a kernel thread, about to change to user thread, * update kreg */ if(current->thread.kregs==&fake_swapper_regs) { current->thread.kregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1); current->thread.uregs = current->thread.kregs; } } void release_thread(struct task_struct *dead_task) { /* do nothing */ } /* Fill in the fpu structure for a core dump.. */ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) { #ifdef CONFIG_SH_FPU int fpvalid; struct task_struct *tsk = current; fpvalid = !!tsk_used_math(tsk); if (fpvalid) { if (current == last_task_used_math) { enable_fpu(); save_fpu(tsk); disable_fpu(); last_task_used_math = 0; regs->sr |= SR_FD; } memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu)); } return fpvalid; #else return 0; /* Task didn't use the fpu at all. */ #endif } EXPORT_SYMBOL(dump_fpu); asmlinkage void ret_from_fork(void); int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; #ifdef CONFIG_SH_FPU if(last_task_used_math == current) { enable_fpu(); save_fpu(current); disable_fpu(); last_task_used_math = NULL; regs->sr |= SR_FD; } #endif /* Copy from sh version */ childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1; *childregs = *regs; /* * Sign extend the edited stack. * Note that thread.pc and thread.pc will stay * 32-bit wide and context switch must take care * of NEFF sign extension. */ if (user_mode(regs)) { childregs->regs[15] = neff_sign_extend(usp); p->thread.uregs = childregs; } else { childregs->regs[15] = neff_sign_extend((unsigned long)task_stack_page(p) + THREAD_SIZE); } childregs->regs[9] = 0; /* Set return value for child */ childregs->sr |= SR_FD; /* Invalidate FPU flag */ p->thread.sp = (unsigned long) childregs; p->thread.pc = (unsigned long) ret_from_fork; return 0; } asmlinkage int sys_fork(unsigned long r2, unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs *pregs) { return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0); } asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs *pregs) { if (!newsp) newsp = pregs->regs[15]; return do_fork(clone_flags, newsp, pregs, 0, 0, 0); } /* * This is trivial, and on the face of it looks like it * could equally well be done in user mode. * * Not so, for quite unobvious reasons - register pressure. * In user mode vfork() cannot have a stack frame, and if * done by calling the "clone()" system call directly, you * do not have enough call-clobbered registers to hold all * the information you need. */ asmlinkage int sys_vfork(unsigned long r2, unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs *pregs) { return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0); } /* * sys_execve() executes a new program. */ asmlinkage int sys_execve(const char *ufilename, char **uargv, char **uenvp, unsigned long r5, unsigned long r6, unsigned long r7, struct pt_regs *pregs) { int error; char *filename; filename = getname((char __user *)ufilename); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; error = do_execve(filename, (const char __user *const __user *)uargv, (const char __user *const __user *)uenvp, pregs); putname(filename); out: return error; } #ifdef CONFIG_FRAME_POINTER static int in_sh64_switch_to(unsigned long pc) { extern char __sh64_switch_to_end; /* For a sleeping task, the PC is somewhere in the middle of the function, so we don't have to worry about masking the LSB off */ return (pc >= (unsigned long) sh64_switch_to) && (pc < (unsigned long) &__sh64_switch_to_end); } #endif unsigned long get_wchan(struct task_struct *p) { unsigned long pc; if (!p || p == current || p->state == TASK_RUNNING) return 0; /* * The same comment as on the Alpha applies here, too ... */ pc = thread_saved_pc(p); #ifdef CONFIG_FRAME_POINTER if (in_sh64_switch_to(pc)) { unsigned long schedule_fp; unsigned long sh64_switch_to_fp; unsigned long schedule_caller_pc; sh64_switch_to_fp = (long) p->thread.sp; /* r14 is saved at offset 4 in the sh64_switch_to frame */ schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4); /* and the caller of 'schedule' is (currently!) saved at offset 24 in the frame of schedule (from disasm) */ schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24); return schedule_caller_pc; } #endif return pc; }
gpl-2.0
syphyr/android_kernel_lge_awifi
security/tomoyo/mount.c
4913
6817
/* * security/tomoyo/mount.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/slab.h> #include "common.h" /* String table for special mount operations. */ static const char * const tomoyo_mounts[TOMOYO_MAX_SPECIAL_MOUNT] = { [TOMOYO_MOUNT_BIND] = "--bind", [TOMOYO_MOUNT_MOVE] = "--move", [TOMOYO_MOUNT_REMOUNT] = "--remount", [TOMOYO_MOUNT_MAKE_UNBINDABLE] = "--make-unbindable", [TOMOYO_MOUNT_MAKE_PRIVATE] = "--make-private", [TOMOYO_MOUNT_MAKE_SLAVE] = "--make-slave", [TOMOYO_MOUNT_MAKE_SHARED] = "--make-shared", }; /** * tomoyo_audit_mount_log - Audit mount log. * * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_audit_mount_log(struct tomoyo_request_info *r) { return tomoyo_supervisor(r, "file mount %s %s %s 0x%lX\n", r->param.mount.dev->name, r->param.mount.dir->name, r->param.mount.type->name, r->param.mount.flags); } /** * tomoyo_check_mount_acl - Check permission for path path path number operation. * * @r: Pointer to "struct tomoyo_request_info". * @ptr: Pointer to "struct tomoyo_acl_info". * * Returns true if granted, false otherwise. */ static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_mount_acl *acl = container_of(ptr, typeof(*acl), head); return tomoyo_compare_number_union(r->param.mount.flags, &acl->flags) && tomoyo_compare_name_union(r->param.mount.type, &acl->fs_type) && tomoyo_compare_name_union(r->param.mount.dir, &acl->dir_name) && (!r->param.mount.need_dev || tomoyo_compare_name_union(r->param.mount.dev, &acl->dev_name)); } /** * tomoyo_mount_acl - Check permission for mount() operation. * * @r: Pointer to "struct tomoyo_request_info". * @dev_name: Name of device file. Maybe NULL. * @dir: Pointer to "struct path". * @type: Name of filesystem type. * @flags: Mount options. * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ static int tomoyo_mount_acl(struct tomoyo_request_info *r, const char *dev_name, struct path *dir, const char *type, unsigned long flags) { struct tomoyo_obj_info obj = { }; struct path path; struct file_system_type *fstype = NULL; const char *requested_type = NULL; const char *requested_dir_name = NULL; const char *requested_dev_name = NULL; struct tomoyo_path_info rtype; struct tomoyo_path_info rdev; struct tomoyo_path_info rdir; int need_dev = 0; int error = -ENOMEM; r->obj = &obj; /* Get fstype. */ requested_type = tomoyo_encode(type); if (!requested_type) goto out; rtype.name = requested_type; tomoyo_fill_path_info(&rtype); /* Get mount point. */ obj.path2 = *dir; requested_dir_name = tomoyo_realpath_from_path(dir); if (!requested_dir_name) { error = -ENOMEM; goto out; } rdir.name = requested_dir_name; tomoyo_fill_path_info(&rdir); /* Compare fs name. */ if (type == tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]) { /* dev_name is ignored. */ } else if (type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE] || type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE] || type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE] || type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]) { /* dev_name is ignored. */ } else if (type == tomoyo_mounts[TOMOYO_MOUNT_BIND] || type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) { need_dev = -1; /* dev_name is a directory */ } else { fstype = get_fs_type(type); if (!fstype) { error = -ENODEV; goto out; } if (fstype->fs_flags & FS_REQUIRES_DEV) /* dev_name is a block device file. */ need_dev = 1; } if (need_dev) { /* Get mount point or device file. */ if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) { error = -ENOENT; goto out; } obj.path1 = path; requested_dev_name = tomoyo_realpath_from_path(&path); if (!requested_dev_name) { error = -ENOENT; goto out; } } else { /* Map dev_name to "<NULL>" if no dev_name given. */ if (!dev_name) dev_name = "<NULL>"; requested_dev_name = tomoyo_encode(dev_name); if (!requested_dev_name) { error = -ENOMEM; goto out; } } rdev.name = requested_dev_name; tomoyo_fill_path_info(&rdev); r->param_type = TOMOYO_TYPE_MOUNT_ACL; r->param.mount.need_dev = need_dev; r->param.mount.dev = &rdev; r->param.mount.dir = &rdir; r->param.mount.type = &rtype; r->param.mount.flags = flags; do { tomoyo_check_acl(r, tomoyo_check_mount_acl); error = tomoyo_audit_mount_log(r); } while (error == TOMOYO_RETRY_REQUEST); out: kfree(requested_dev_name); kfree(requested_dir_name); if (fstype) put_filesystem(fstype); kfree(requested_type); /* Drop refcount obtained by kern_path(). */ if (obj.path1.dentry) path_put(&obj.path1); return error; } /** * tomoyo_mount_permission - Check permission for mount() operation. * * @dev_name: Name of device file. Maybe NULL. * @path: Pointer to "struct path". * @type: Name of filesystem type. Maybe NULL. * @flags: Mount options. * @data_page: Optional data. Maybe NULL. * * Returns 0 on success, negative value otherwise. */ int tomoyo_mount_permission(const char *dev_name, struct path *path, const char *type, unsigned long flags, void *data_page) { struct tomoyo_request_info r; int error; int idx; if (tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_MOUNT) == TOMOYO_CONFIG_DISABLED) return 0; if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; if (flags & MS_REMOUNT) { type = tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]; flags &= ~MS_REMOUNT; } else if (flags & MS_BIND) { type = tomoyo_mounts[TOMOYO_MOUNT_BIND]; flags &= ~MS_BIND; } else if (flags & MS_SHARED) { if (flags & (MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) return -EINVAL; type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]; flags &= ~MS_SHARED; } else if (flags & MS_PRIVATE) { if (flags & (MS_SHARED | MS_SLAVE | MS_UNBINDABLE)) return -EINVAL; type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE]; flags &= ~MS_PRIVATE; } else if (flags & MS_SLAVE) { if (flags & (MS_SHARED | MS_PRIVATE | MS_UNBINDABLE)) return -EINVAL; type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE]; flags &= ~MS_SLAVE; } else if (flags & MS_UNBINDABLE) { if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE)) return -EINVAL; type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE]; flags &= ~MS_UNBINDABLE; } else if (flags & MS_MOVE) { type = tomoyo_mounts[TOMOYO_MOUNT_MOVE]; flags &= ~MS_MOVE; } if (!type) type = "<NULL>"; idx = tomoyo_read_lock(); error = tomoyo_mount_acl(&r, dev_name, path, type, flags); tomoyo_read_unlock(idx); return error; }
gpl-2.0
elektroschmock/android_kernel_google_msm
arch/arm/mach-imx/mx31moboard-marxbot.c
4913
9527
/* * Copyright (C) 2009 Valentin Longchamp, EPFL Mobots group * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/usb/otg.h> #include <mach/common.h> #include <mach/hardware.h> #include <mach/iomux-mx3.h> #include <mach/ulpi.h> #include <media/soc_camera.h> #include "devices-imx31.h" static unsigned int marxbot_pins[] = { /* SDHC2 */ MX31_PIN_PC_PWRON__SD2_DATA3, MX31_PIN_PC_VS1__SD2_DATA2, MX31_PIN_PC_READY__SD2_DATA1, MX31_PIN_PC_WAIT_B__SD2_DATA0, MX31_PIN_PC_CD2_B__SD2_CLK, MX31_PIN_PC_CD1_B__SD2_CMD, MX31_PIN_ATA_DIOR__GPIO3_28, MX31_PIN_ATA_DIOW__GPIO3_29, /* CSI */ MX31_PIN_CSI_D6__CSI_D6, MX31_PIN_CSI_D7__CSI_D7, MX31_PIN_CSI_D8__CSI_D8, MX31_PIN_CSI_D9__CSI_D9, MX31_PIN_CSI_D10__CSI_D10, MX31_PIN_CSI_D11__CSI_D11, MX31_PIN_CSI_D12__CSI_D12, MX31_PIN_CSI_D13__CSI_D13, MX31_PIN_CSI_D14__CSI_D14, MX31_PIN_CSI_D15__CSI_D15, MX31_PIN_CSI_HSYNC__CSI_HSYNC, MX31_PIN_CSI_MCLK__CSI_MCLK, MX31_PIN_CSI_PIXCLK__CSI_PIXCLK, MX31_PIN_CSI_VSYNC__CSI_VSYNC, MX31_PIN_CSI_D4__GPIO3_4, MX31_PIN_CSI_D5__GPIO3_5, MX31_PIN_GPIO3_0__GPIO3_0, MX31_PIN_GPIO3_1__GPIO3_1, MX31_PIN_TXD2__GPIO1_28, /* dsPIC resets */ MX31_PIN_STXD5__GPIO1_21, MX31_PIN_SRXD5__GPIO1_22, /*battery detection */ MX31_PIN_LCS0__GPIO3_23, /* USB H1 */ MX31_PIN_CSPI1_MISO__USBH1_RXDP, MX31_PIN_CSPI1_MOSI__USBH1_RXDM, MX31_PIN_CSPI1_SS0__USBH1_TXDM, MX31_PIN_CSPI1_SS1__USBH1_TXDP, MX31_PIN_CSPI1_SS2__USBH1_RCV, MX31_PIN_CSPI1_SCLK__USBH1_OEB, MX31_PIN_CSPI1_SPI_RDY__USBH1_FS, MX31_PIN_SFS6__USBH1_SUSPEND, MX31_PIN_NFRE_B__GPIO1_11, MX31_PIN_NFALE__GPIO1_12, /* SEL */ MX31_PIN_DTR_DCE1__GPIO2_8, MX31_PIN_DSR_DCE1__GPIO2_9, MX31_PIN_RI_DCE1__GPIO2_10, MX31_PIN_DCD_DCE1__GPIO2_11, }; #define SDHC2_CD IOMUX_TO_GPIO(MX31_PIN_ATA_DIOR) #define SDHC2_WP IOMUX_TO_GPIO(MX31_PIN_ATA_DIOW) static int marxbot_sdhc2_get_ro(struct device *dev) { return !gpio_get_value(SDHC2_WP); } static int marxbot_sdhc2_init(struct device *dev, irq_handler_t detect_irq, void *data) { int ret; ret = gpio_request(SDHC2_CD, "sdhc-detect"); if (ret) return ret; gpio_direction_input(SDHC2_CD); ret = gpio_request(SDHC2_WP, "sdhc-wp"); if (ret) goto err_gpio_free; gpio_direction_input(SDHC2_WP); ret = request_irq(gpio_to_irq(SDHC2_CD), detect_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "sdhc2-card-detect", data); if (ret) goto err_gpio_free_2; return 0; err_gpio_free_2: gpio_free(SDHC2_WP); err_gpio_free: gpio_free(SDHC2_CD); return ret; } static void marxbot_sdhc2_exit(struct device *dev, void *data) { free_irq(gpio_to_irq(SDHC2_CD), data); gpio_free(SDHC2_WP); gpio_free(SDHC2_CD); } static const struct imxmmc_platform_data sdhc2_pdata __initconst = { .get_ro = marxbot_sdhc2_get_ro, .init = marxbot_sdhc2_init, .exit = marxbot_sdhc2_exit, }; #define TRSLAT_RST_B IOMUX_TO_GPIO(MX31_PIN_STXD5) #define DSPICS_RST_B IOMUX_TO_GPIO(MX31_PIN_SRXD5) static void dspics_resets_init(void) { if (!gpio_request(TRSLAT_RST_B, "translator-rst")) { gpio_direction_output(TRSLAT_RST_B, 0); gpio_export(TRSLAT_RST_B, false); } if (!gpio_request(DSPICS_RST_B, "dspics-rst")) { gpio_direction_output(DSPICS_RST_B, 0); gpio_export(DSPICS_RST_B, false); } } static struct spi_board_info marxbot_spi_board_info[] __initdata = { { .modalias = "spidev", .max_speed_hz = 300000, .bus_num = 1, .chip_select = 1, /* according spi1_cs[] ! */ }, }; #define TURRETCAM_POWER IOMUX_TO_GPIO(MX31_PIN_GPIO3_1) #define BASECAM_POWER IOMUX_TO_GPIO(MX31_PIN_CSI_D5) #define TURRETCAM_RST_B IOMUX_TO_GPIO(MX31_PIN_GPIO3_0) #define BASECAM_RST_B IOMUX_TO_GPIO(MX31_PIN_CSI_D4) #define CAM_CHOICE IOMUX_TO_GPIO(MX31_PIN_TXD2) static int marxbot_basecam_power(struct device *dev, int on) { gpio_set_value(BASECAM_POWER, !on); return 0; } static int marxbot_basecam_reset(struct device *dev) { gpio_set_value(BASECAM_RST_B, 0); udelay(100); gpio_set_value(BASECAM_RST_B, 1); return 0; } static struct i2c_board_info marxbot_i2c_devices[] = { { I2C_BOARD_INFO("mt9t031", 0x5d), }, }; static struct soc_camera_link base_iclink = { .bus_id = 0, /* Must match with the camera ID */ .power = marxbot_basecam_power, .reset = marxbot_basecam_reset, .board_info = &marxbot_i2c_devices[0], .i2c_adapter_id = 0, }; static struct platform_device marxbot_camera[] = { { .name = "soc-camera-pdrv", .id = 0, .dev = { .platform_data = &base_iclink, }, }, }; static struct platform_device *marxbot_cameras[] __initdata = { &marxbot_camera[0], }; static int __init marxbot_cam_init(void) { int ret = gpio_request(CAM_CHOICE, "cam-choice"); if (ret) return ret; gpio_direction_output(CAM_CHOICE, 0); ret = gpio_request(BASECAM_RST_B, "basecam-reset"); if (ret) return ret; gpio_direction_output(BASECAM_RST_B, 1); ret = gpio_request(BASECAM_POWER, "basecam-standby"); if (ret) return ret; gpio_direction_output(BASECAM_POWER, 0); ret = gpio_request(TURRETCAM_RST_B, "turretcam-reset"); if (ret) return ret; gpio_direction_output(TURRETCAM_RST_B, 1); ret = gpio_request(TURRETCAM_POWER, "turretcam-standby"); if (ret) return ret; gpio_direction_output(TURRETCAM_POWER, 0); return 0; } #define SEL0 IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1) #define SEL1 IOMUX_TO_GPIO(MX31_PIN_DSR_DCE1) #define SEL2 IOMUX_TO_GPIO(MX31_PIN_RI_DCE1) #define SEL3 IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1) static void marxbot_init_sel_gpios(void) { if (!gpio_request(SEL0, "sel0")) { gpio_direction_input(SEL0); gpio_export(SEL0, true); } if (!gpio_request(SEL1, "sel1")) { gpio_direction_input(SEL1); gpio_export(SEL1, true); } if (!gpio_request(SEL2, "sel2")) { gpio_direction_input(SEL2); gpio_export(SEL2, true); } if (!gpio_request(SEL3, "sel3")) { gpio_direction_input(SEL3); gpio_export(SEL3, true); } } #define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \ PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU) static int marxbot_usbh1_hw_init(struct platform_device *pdev) { mxc_iomux_set_gpr(MUX_PGP_USB_SUSPEND, true); mxc_iomux_set_pad(MX31_PIN_CSPI1_MISO, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_MOSI, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SS0, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SS1, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SS2, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SCLK, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_CSPI1_SPI_RDY, USB_PAD_CFG); mxc_iomux_set_pad(MX31_PIN_SFS6, USB_PAD_CFG); mdelay(10); return mx31_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED | MXC_EHCI_INTERFACE_SINGLE_UNI); } #define USBH1_VBUSEN_B IOMUX_TO_GPIO(MX31_PIN_NFRE_B) #define USBH1_MODE IOMUX_TO_GPIO(MX31_PIN_NFALE) static int marxbot_isp1105_init(struct usb_phy *otg) { int ret = gpio_request(USBH1_MODE, "usbh1-mode"); if (ret) return ret; /* single ended */ gpio_direction_output(USBH1_MODE, 0); ret = gpio_request(USBH1_VBUSEN_B, "usbh1-vbusen"); if (ret) { gpio_free(USBH1_MODE); return ret; } gpio_direction_output(USBH1_VBUSEN_B, 1); return 0; } static int marxbot_isp1105_set_vbus(struct usb_otg *otg, bool on) { if (on) gpio_set_value(USBH1_VBUSEN_B, 0); else gpio_set_value(USBH1_VBUSEN_B, 1); return 0; } static struct mxc_usbh_platform_data usbh1_pdata __initdata = { .init = marxbot_usbh1_hw_init, .portsc = MXC_EHCI_MODE_UTMI | MXC_EHCI_SERIAL, }; static int __init marxbot_usbh1_init(void) { struct usb_phy *phy; struct platform_device *pdev; phy = kzalloc(sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL); if (!phy->otg) { kfree(phy); return -ENOMEM; } phy->label = "ISP1105"; phy->init = marxbot_isp1105_init; phy->otg->set_vbus = marxbot_isp1105_set_vbus; usbh1_pdata.otg = phy; pdev = imx31_add_mxc_ehci_hs(1, &usbh1_pdata); if (IS_ERR(pdev)) return PTR_ERR(pdev); return 0; } static const struct fsl_usb2_platform_data usb_pdata __initconst = { .operating_mode = FSL_USB2_DR_DEVICE, .phy_mode = FSL_USB2_PHY_ULPI, }; /* * system init for baseboard usage. Will be called by mx31moboard init. */ void __init mx31moboard_marxbot_init(void) { printk(KERN_INFO "Initializing mx31marxbot peripherals\n"); mxc_iomux_setup_multiple_pins(marxbot_pins, ARRAY_SIZE(marxbot_pins), "marxbot"); marxbot_init_sel_gpios(); dspics_resets_init(); imx31_add_mxc_mmc(1, &sdhc2_pdata); spi_register_board_info(marxbot_spi_board_info, ARRAY_SIZE(marxbot_spi_board_info)); marxbot_cam_init(); platform_add_devices(marxbot_cameras, ARRAY_SIZE(marxbot_cameras)); /* battery present pin */ gpio_request(IOMUX_TO_GPIO(MX31_PIN_LCS0), "bat-present"); gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_LCS0)); gpio_export(IOMUX_TO_GPIO(MX31_PIN_LCS0), false); imx31_add_fsl_usb2_udc(&usb_pdata); marxbot_usbh1_init(); }
gpl-2.0
tweezy23/kernel-msm
arch/arm/mach-imx/mmdc.c
5169
1683
/* * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> #define MMDC_MAPSR 0x404 #define BP_MMDC_MAPSR_PSD 0 #define BP_MMDC_MAPSR_PSS 4 static int __devinit imx_mmdc_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; void __iomem *mmdc_base, *reg; u32 val; int timeout = 0x400; mmdc_base = of_iomap(np, 0); WARN_ON(!mmdc_base); reg = mmdc_base + MMDC_MAPSR; /* Enable automatic power saving */ val = readl_relaxed(reg); val &= ~(1 << BP_MMDC_MAPSR_PSD); writel_relaxed(val, reg); /* Ensure it's successfully enabled */ while (!(readl_relaxed(reg) & 1 << BP_MMDC_MAPSR_PSS) && --timeout) cpu_relax(); if (unlikely(!timeout)) { pr_warn("%s: failed to enable automatic power saving\n", __func__); return -EBUSY; } return 0; } static struct of_device_id imx_mmdc_dt_ids[] = { { .compatible = "fsl,imx6q-mmdc", }, { /* sentinel */ } }; static struct platform_driver imx_mmdc_driver = { .driver = { .name = "imx-mmdc", .owner = THIS_MODULE, .of_match_table = imx_mmdc_dt_ids, }, .probe = imx_mmdc_probe, }; static int __init imx_mmdc_init(void) { return platform_driver_register(&imx_mmdc_driver); } postcore_initcall(imx_mmdc_init);
gpl-2.0
embeddedarm/linux-3.4-ts75xx
drivers/staging/comedi/drivers/dmm32at.c
7985
29779
/* comedi/drivers/dmm32at.c Diamond Systems mm32at code for a Comedi driver COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: dmm32at Description: Diamond Systems mm32at driver. Devices: Author: Perry J. Piplani <perry.j.piplani@nasa.gov> Updated: Fri Jun 4 09:13:24 CDT 2004 Status: experimental This driver is for the Diamond Systems MM-32-AT board http://www.diamondsystems.com/products/diamondmm32at It is being used on serveral projects inside NASA, without problems so far. For analog input commands, TRIG_EXT is not yet supported at all.. Configuration Options: comedi_config /dev/comedi0 dmm32at baseaddr,irq */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> /* Board register addresses */ #define DMM32AT_MEMSIZE 0x10 #define DMM32AT_CONV 0x00 #define DMM32AT_AILSB 0x00 #define DMM32AT_AUXDOUT 0x01 #define DMM32AT_AIMSB 0x01 #define DMM32AT_AILOW 0x02 #define DMM32AT_AIHIGH 0x03 #define DMM32AT_DACLSB 0x04 #define DMM32AT_DACSTAT 0x04 #define DMM32AT_DACMSB 0x05 #define DMM32AT_FIFOCNTRL 0x07 #define DMM32AT_FIFOSTAT 0x07 #define DMM32AT_CNTRL 0x08 #define DMM32AT_AISTAT 0x08 #define DMM32AT_INTCLOCK 0x09 #define DMM32AT_CNTRDIO 0x0a #define DMM32AT_AICONF 0x0b #define DMM32AT_AIRBACK 0x0b #define DMM32AT_CLK1 0x0d #define DMM32AT_CLK2 0x0e #define DMM32AT_CLKCT 0x0f #define DMM32AT_DIOA 0x0c #define DMM32AT_DIOB 0x0d #define DMM32AT_DIOC 0x0e #define DMM32AT_DIOCONF 0x0f #define dmm_inb(cdev, reg) inb((cdev->iobase)+reg) #define dmm_outb(cdev, reg, valu) outb(valu, (cdev->iobase)+reg) /* Board register values. */ /* DMM32AT_DACSTAT 0x04 */ #define DMM32AT_DACBUSY 0x80 /* DMM32AT_FIFOCNTRL 0x07 */ #define DMM32AT_FIFORESET 0x02 #define DMM32AT_SCANENABLE 0x04 /* DMM32AT_CNTRL 0x08 */ #define DMM32AT_RESET 0x20 #define DMM32AT_INTRESET 0x08 #define DMM32AT_CLKACC 0x00 #define DMM32AT_DIOACC 0x01 /* DMM32AT_AISTAT 0x08 */ #define DMM32AT_STATUS 0x80 /* DMM32AT_INTCLOCK 0x09 */ #define DMM32AT_ADINT 0x80 #define DMM32AT_CLKSEL 0x03 /* DMM32AT_CNTRDIO 0x0a */ #define DMM32AT_FREQ12 0x80 /* DMM32AT_AICONF 0x0b */ #define DMM32AT_RANGE_U10 0x0c #define DMM32AT_RANGE_U5 0x0d #define DMM32AT_RANGE_B10 0x08 #define DMM32AT_RANGE_B5 0x00 #define DMM32AT_SCINT_20 0x00 #define DMM32AT_SCINT_15 0x10 #define DMM32AT_SCINT_10 0x20 #define DMM32AT_SCINT_5 0x30 /* DMM32AT_CLKCT 0x0f */ #define DMM32AT_CLKCT1 0x56 /* mode3 counter 1 - write low byte only */ #define DMM32AT_CLKCT2 0xb6 /* mode3 counter 2 - write high and low byte */ /* DMM32AT_DIOCONF 0x0f */ #define DMM32AT_DIENABLE 0x80 #define DMM32AT_DIRA 0x10 #define DMM32AT_DIRB 0x02 #define DMM32AT_DIRCL 0x01 #define DMM32AT_DIRCH 0x08 /* board AI ranges in comedi structure */ static const struct comedi_lrange dmm32at_airanges = { 4, { UNI_RANGE(10), UNI_RANGE(5), BIP_RANGE(10), BIP_RANGE(5), } }; /* register values for above ranges */ static const unsigned char dmm32at_rangebits[] = { DMM32AT_RANGE_U10, DMM32AT_RANGE_U5, DMM32AT_RANGE_B10, DMM32AT_RANGE_B5, }; /* only one of these ranges is valid, as set by a jumper on the * board. The application should only use the range set by the jumper */ static const struct comedi_lrange dmm32at_aoranges = { 4, { UNI_RANGE(10), UNI_RANGE(5), BIP_RANGE(10), BIP_RANGE(5), } }; /* * Board descriptions for two imaginary boards. Describing the * boards in this way is optional, and completely driver-dependent. * Some drivers use arrays such as this, other do not. */ struct dmm32at_board { const char *name; int ai_chans; int ai_bits; const struct comedi_lrange *ai_ranges; int ao_chans; int ao_bits; const struct comedi_lrange *ao_ranges; int have_dio; int dio_chans; }; static const struct dmm32at_board dmm32at_boards[] = { { .name = "dmm32at", .ai_chans = 32, .ai_bits = 16, .ai_ranges = &dmm32at_airanges, .ao_chans = 4, .ao_bits = 12, .ao_ranges = &dmm32at_aoranges, .have_dio = 1, .dio_chans = 24, }, }; /* * Useful for shorthand access to the particular board structure */ #define thisboard ((const struct dmm32at_board *)dev->board_ptr) /* this structure is for data unique to this hardware driver. If * several hardware drivers keep similar information in this structure, * feel free to suggest moving the variable to the struct comedi_device struct. */ struct dmm32at_private { int data; int ai_inuse; unsigned int ai_scans_left; /* Used for AO readback */ unsigned int ao_readback[4]; unsigned char dio_config; }; /* * most drivers define the following macro to make it easy to * access the private structure. */ #define devpriv ((struct dmm32at_private *)dev->private) /* * The struct comedi_driver structure tells the Comedi core module * which functions to call to configure/deconfigure (attach/detach) * the board, and also about the kernel module that contains * the device code. */ static int dmm32at_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int dmm32at_detach(struct comedi_device *dev); static struct comedi_driver driver_dmm32at = { .driver_name = "dmm32at", .module = THIS_MODULE, .attach = dmm32at_attach, .detach = dmm32at_detach, /* It is not necessary to implement the following members if you are * writing a driver for a ISA PnP or PCI card */ /* Most drivers will support multiple types of boards by * having an array of board structures. These were defined * in dmm32at_boards[] above. Note that the element 'name' * was first in the structure -- Comedi uses this fact to * extract the name of the board without knowing any details * about the structure except for its length. * When a device is attached (by comedi_config), the name * of the device is given to Comedi, and Comedi tries to * match it by going through the list of board names. If * there is a match, the address of the pointer is put * into dev->board_ptr and driver->attach() is called. * * Note that these are not necessary if you can determine * the type of board in software. ISA PnP, PCI, and PCMCIA * devices are such boards. */ .board_name = &dmm32at_boards[0].name, .offset = sizeof(struct dmm32at_board), .num_names = ARRAY_SIZE(dmm32at_boards), }; /* prototypes for driver functions below */ static int dmm32at_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dmm32at_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dmm32at_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dmm32at_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dmm32at_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int dmm32at_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int dmm32at_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int dmm32at_ns_to_timer(unsigned int *ns, int round); static irqreturn_t dmm32at_isr(int irq, void *d); void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec); /* * Attach is called by the Comedi core to configure the driver * for a particular board. If you specified a board_name array * in the driver structure, dev->board_ptr contains that * address. */ static int dmm32at_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int ret; struct comedi_subdevice *s; unsigned char aihi, ailo, fifostat, aistat, intstat, airback; unsigned long iobase; unsigned int irq; iobase = it->options[0]; irq = it->options[1]; printk(KERN_INFO "comedi%d: dmm32at: attaching\n", dev->minor); printk(KERN_DEBUG "dmm32at: probing at address 0x%04lx, irq %u\n", iobase, irq); /* register address space */ if (!request_region(iobase, DMM32AT_MEMSIZE, thisboard->name)) { printk(KERN_ERR "comedi%d: dmm32at: I/O port conflict\n", dev->minor); return -EIO; } dev->iobase = iobase; /* the following just makes sure the board is there and gets it to a known state */ /* reset the board */ dmm_outb(dev, DMM32AT_CNTRL, DMM32AT_RESET); /* allow a millisecond to reset */ udelay(1000); /* zero scan and fifo control */ dmm_outb(dev, DMM32AT_FIFOCNTRL, 0x0); /* zero interrupt and clock control */ dmm_outb(dev, DMM32AT_INTCLOCK, 0x0); /* write a test channel range, the high 3 bits should drop */ dmm_outb(dev, DMM32AT_AILOW, 0x80); dmm_outb(dev, DMM32AT_AIHIGH, 0xff); /* set the range at 10v unipolar */ dmm_outb(dev, DMM32AT_AICONF, DMM32AT_RANGE_U10); /* should take 10 us to settle, here's a hundred */ udelay(100); /* read back the values */ ailo = dmm_inb(dev, DMM32AT_AILOW); aihi = dmm_inb(dev, DMM32AT_AIHIGH); fifostat = dmm_inb(dev, DMM32AT_FIFOSTAT); aistat = dmm_inb(dev, DMM32AT_AISTAT); intstat = dmm_inb(dev, DMM32AT_INTCLOCK); airback = dmm_inb(dev, DMM32AT_AIRBACK); printk(KERN_DEBUG "dmm32at: lo=0x%02x hi=0x%02x fifostat=0x%02x\n", ailo, aihi, fifostat); printk(KERN_DEBUG "dmm32at: aistat=0x%02x intstat=0x%02x airback=0x%02x\n", aistat, intstat, airback); if ((ailo != 0x00) || (aihi != 0x1f) || (fifostat != 0x80) || (aistat != 0x60 || (intstat != 0x00) || airback != 0x0c)) { printk(KERN_ERR "dmmat32: board detection failed\n"); return -EIO; } /* board is there, register interrupt */ if (irq) { ret = request_irq(irq, dmm32at_isr, 0, thisboard->name, dev); if (ret < 0) { printk(KERN_ERR "dmm32at: irq conflict\n"); return ret; } dev->irq = irq; } /* * If you can probe the device to determine what device in a series * it is, this is the place to do it. Otherwise, dev->board_ptr * should already be initialized. */ /* dev->board_ptr = dmm32at_probe(dev); */ /* * Initialize dev->board_name. Note that we can use the "thisboard" * macro now, since we just initialized it in the last line. */ dev->board_name = thisboard->name; /* * Allocate the private structure area. alloc_private() is a * convenient macro defined in comedidev.h. */ if (alloc_private(dev, sizeof(struct dmm32at_private)) < 0) return -ENOMEM; /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. */ if (alloc_subdevices(dev, 3) < 0) return -ENOMEM; s = dev->subdevices + 0; dev->read_subdev = s; /* analog input subdevice */ s->type = COMEDI_SUBD_AI; /* we support single-ended (ground) and differential */ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_DIFF | SDF_CMD_READ; s->n_chan = thisboard->ai_chans; s->maxdata = (1 << thisboard->ai_bits) - 1; s->range_table = thisboard->ai_ranges; s->len_chanlist = 32; /* This is the maximum chanlist length that the board can handle */ s->insn_read = dmm32at_ai_rinsn; s->do_cmd = dmm32at_ai_cmd; s->do_cmdtest = dmm32at_ai_cmdtest; s->cancel = dmm32at_ai_cancel; s = dev->subdevices + 1; /* analog output subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = thisboard->ao_chans; s->maxdata = (1 << thisboard->ao_bits) - 1; s->range_table = thisboard->ao_ranges; s->insn_write = dmm32at_ao_winsn; s->insn_read = dmm32at_ao_rinsn; s = dev->subdevices + 2; /* digital i/o subdevice */ if (thisboard->have_dio) { /* get access to the DIO regs */ dmm_outb(dev, DMM32AT_CNTRL, DMM32AT_DIOACC); /* set the DIO's to the defualt input setting */ devpriv->dio_config = DMM32AT_DIRA | DMM32AT_DIRB | DMM32AT_DIRCL | DMM32AT_DIRCH | DMM32AT_DIENABLE; dmm_outb(dev, DMM32AT_DIOCONF, devpriv->dio_config); /* set up the subdevice */ s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = thisboard->dio_chans; s->maxdata = 1; s->state = 0; s->range_table = &range_digital; s->insn_bits = dmm32at_dio_insn_bits; s->insn_config = dmm32at_dio_insn_config; } else { s->type = COMEDI_SUBD_UNUSED; } /* success */ printk(KERN_INFO "comedi%d: dmm32at: attached\n", dev->minor); return 1; } /* * _detach is called to deconfigure a device. It should deallocate * resources. * This function is also called when _attach() fails, so it should be * careful not to release resources that were not necessarily * allocated by _attach(). dev->private and dev->subdevices are * deallocated automatically by the core. */ static int dmm32at_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: dmm32at: remove\n", dev->minor); if (dev->irq) free_irq(dev->irq, dev); if (dev->iobase) release_region(dev->iobase, DMM32AT_MEMSIZE); return 0; } /* * "instructions" read/write data in "one-shot" or "software-triggered" * mode. */ static int dmm32at_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, i; unsigned int d; unsigned char status; unsigned short msb, lsb; unsigned char chan; int range; /* get the channel and range number */ chan = CR_CHAN(insn->chanspec) & (s->n_chan - 1); range = CR_RANGE(insn->chanspec); /* printk("channel=0x%02x, range=%d\n",chan,range); */ /* zero scan and fifo control and reset fifo */ dmm_outb(dev, DMM32AT_FIFOCNTRL, DMM32AT_FIFORESET); /* write the ai channel range regs */ dmm_outb(dev, DMM32AT_AILOW, chan); dmm_outb(dev, DMM32AT_AIHIGH, chan); /* set the range bits */ dmm_outb(dev, DMM32AT_AICONF, dmm32at_rangebits[range]); /* wait for circuit to settle */ for (i = 0; i < 40000; i++) { status = dmm_inb(dev, DMM32AT_AIRBACK); if ((status & DMM32AT_STATUS) == 0) break; } if (i == 40000) { printk(KERN_WARNING "dmm32at: timeout\n"); return -ETIMEDOUT; } /* convert n samples */ for (n = 0; n < insn->n; n++) { /* trigger conversion */ dmm_outb(dev, DMM32AT_CONV, 0xff); /* wait for conversion to end */ for (i = 0; i < 40000; i++) { status = dmm_inb(dev, DMM32AT_AISTAT); if ((status & DMM32AT_STATUS) == 0) break; } if (i == 40000) { printk(KERN_WARNING "dmm32at: timeout\n"); return -ETIMEDOUT; } /* read data */ lsb = dmm_inb(dev, DMM32AT_AILSB); msb = dmm_inb(dev, DMM32AT_AIMSB); /* invert sign bit to make range unsigned, this is an idiosyncrasy of the diamond board, it return conversions as a signed value, i.e. -32768 to 32767, flipping the bit and interpreting it as signed gives you a range of 0 to 65535 which is used by comedi */ d = ((msb ^ 0x0080) << 8) + lsb; data[n] = d; } /* return the number of samples read/written */ return n; } static int dmm32at_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; int start_chan, gain, i; /* printk("dmmat32 in command test\n"); */ /* cmdtest tests a particular command to see if it is valid. * Using the cmdtest ioctl, a user can create a valid cmd * and then have it executes by the cmd ioctl. * * cmdtest returns 1,2,3,4 or 0, depending on which tests * the command passes. */ /* step 1: make sure trigger sources are trivially valid */ tmp = cmd->start_src; cmd->start_src &= TRIG_NOW; if (!cmd->start_src || tmp != cmd->start_src) err++; tmp = cmd->scan_begin_src; cmd->scan_begin_src &= TRIG_TIMER /*| TRIG_EXT */ ; if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src) err++; tmp = cmd->convert_src; cmd->convert_src &= TRIG_TIMER /*| TRIG_EXT */ ; if (!cmd->convert_src || tmp != cmd->convert_src) err++; tmp = cmd->scan_end_src; cmd->scan_end_src &= TRIG_COUNT; if (!cmd->scan_end_src || tmp != cmd->scan_end_src) err++; tmp = cmd->stop_src; cmd->stop_src &= TRIG_COUNT | TRIG_NONE; if (!cmd->stop_src || tmp != cmd->stop_src) err++; if (err) return 1; /* step 2: make sure trigger sources are unique and mutually * compatible */ /* note that mutual compatibility is not an issue here */ if (cmd->scan_begin_src != TRIG_TIMER && cmd->scan_begin_src != TRIG_EXT) err++; if (cmd->convert_src != TRIG_TIMER && cmd->convert_src != TRIG_EXT) err++; if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE) err++; if (err) return 2; /* step 3: make sure arguments are trivially compatible */ if (cmd->start_arg != 0) { cmd->start_arg = 0; err++; } #define MAX_SCAN_SPEED 1000000 /* in nanoseconds */ #define MIN_SCAN_SPEED 1000000000 /* in nanoseconds */ if (cmd->scan_begin_src == TRIG_TIMER) { if (cmd->scan_begin_arg < MAX_SCAN_SPEED) { cmd->scan_begin_arg = MAX_SCAN_SPEED; err++; } if (cmd->scan_begin_arg > MIN_SCAN_SPEED) { cmd->scan_begin_arg = MIN_SCAN_SPEED; err++; } } else { /* external trigger */ /* should be level/edge, hi/lo specification here */ /* should specify multiple external triggers */ if (cmd->scan_begin_arg > 9) { cmd->scan_begin_arg = 9; err++; } } if (cmd->convert_src == TRIG_TIMER) { if (cmd->convert_arg >= 17500) cmd->convert_arg = 20000; else if (cmd->convert_arg >= 12500) cmd->convert_arg = 15000; else if (cmd->convert_arg >= 7500) cmd->convert_arg = 10000; else cmd->convert_arg = 5000; } else { /* external trigger */ /* see above */ if (cmd->convert_arg > 9) { cmd->convert_arg = 9; err++; } } if (cmd->scan_end_arg != cmd->chanlist_len) { cmd->scan_end_arg = cmd->chanlist_len; err++; } if (cmd->stop_src == TRIG_COUNT) { if (cmd->stop_arg > 0xfffffff0) { cmd->stop_arg = 0xfffffff0; err++; } if (cmd->stop_arg == 0) { cmd->stop_arg = 1; err++; } } else { /* TRIG_NONE */ if (cmd->stop_arg != 0) { cmd->stop_arg = 0; err++; } } if (err) return 3; /* step 4: fix up any arguments */ if (cmd->scan_begin_src == TRIG_TIMER) { tmp = cmd->scan_begin_arg; dmm32at_ns_to_timer(&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; } if (cmd->convert_src == TRIG_TIMER) { tmp = cmd->convert_arg; dmm32at_ns_to_timer(&cmd->convert_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->convert_arg) err++; if (cmd->scan_begin_src == TRIG_TIMER && cmd->scan_begin_arg < cmd->convert_arg * cmd->scan_end_arg) { cmd->scan_begin_arg = cmd->convert_arg * cmd->scan_end_arg; err++; } } if (err) return 4; /* step 5 check the channel list, the channel list for this board must be consecutive and gains must be the same */ if (cmd->chanlist) { gain = CR_RANGE(cmd->chanlist[0]); start_chan = CR_CHAN(cmd->chanlist[0]); for (i = 1; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) != (start_chan + i) % s->n_chan) { comedi_error(dev, "entries in chanlist must be consecutive channels, counting upwards\n"); err++; } if (CR_RANGE(cmd->chanlist[i]) != gain) { comedi_error(dev, "entries in chanlist must all have the same gain\n"); err++; } } } if (err) return 5; return 0; } static int dmm32at_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct comedi_cmd *cmd = &s->async->cmd; int i, range; unsigned char chanlo, chanhi, status; if (!cmd->chanlist) return -EINVAL; /* get the channel list and range */ chanlo = CR_CHAN(cmd->chanlist[0]) & (s->n_chan - 1); chanhi = chanlo + cmd->chanlist_len - 1; if (chanhi >= s->n_chan) return -EINVAL; range = CR_RANGE(cmd->chanlist[0]); /* reset fifo */ dmm_outb(dev, DMM32AT_FIFOCNTRL, DMM32AT_FIFORESET); /* set scan enable */ dmm_outb(dev, DMM32AT_FIFOCNTRL, DMM32AT_SCANENABLE); /* write the ai channel range regs */ dmm_outb(dev, DMM32AT_AILOW, chanlo); dmm_outb(dev, DMM32AT_AIHIGH, chanhi); /* set the range bits */ dmm_outb(dev, DMM32AT_AICONF, dmm32at_rangebits[range]); /* reset the interrupt just in case */ dmm_outb(dev, DMM32AT_CNTRL, DMM32AT_INTRESET); if (cmd->stop_src == TRIG_COUNT) devpriv->ai_scans_left = cmd->stop_arg; else { /* TRIG_NONE */ devpriv->ai_scans_left = 0xffffffff; /* indicates TRIG_NONE to * isr */ } /* wait for circuit to settle */ for (i = 0; i < 40000; i++) { status = dmm_inb(dev, DMM32AT_AIRBACK); if ((status & DMM32AT_STATUS) == 0) break; } if (i == 40000) { printk(KERN_WARNING "dmm32at: timeout\n"); return -ETIMEDOUT; } if (devpriv->ai_scans_left > 1) { /* start the clock and enable the interrupts */ dmm32at_setaitimer(dev, cmd->scan_begin_arg); } else { /* start the interrups and initiate a single scan */ dmm_outb(dev, DMM32AT_INTCLOCK, DMM32AT_ADINT); dmm_outb(dev, DMM32AT_CONV, 0xff); } /* printk("dmmat32 in command\n"); */ /* for(i=0;i<cmd->chanlist_len;i++) */ /* comedi_buf_put(s->async,i*100); */ /* s->async->events |= COMEDI_CB_EOA; */ /* comedi_event(dev, s); */ return 0; } static int dmm32at_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { devpriv->ai_scans_left = 1; return 0; } static irqreturn_t dmm32at_isr(int irq, void *d) { unsigned char intstat; unsigned int samp; unsigned short msb, lsb; int i; struct comedi_device *dev = d; if (!dev->attached) { comedi_error(dev, "spurious interrupt"); return IRQ_HANDLED; } intstat = dmm_inb(dev, DMM32AT_INTCLOCK); if (intstat & DMM32AT_ADINT) { struct comedi_subdevice *s = dev->read_subdev; struct comedi_cmd *cmd = &s->async->cmd; for (i = 0; i < cmd->chanlist_len; i++) { /* read data */ lsb = dmm_inb(dev, DMM32AT_AILSB); msb = dmm_inb(dev, DMM32AT_AIMSB); /* invert sign bit to make range unsigned */ samp = ((msb ^ 0x0080) << 8) + lsb; comedi_buf_put(s->async, samp); } if (devpriv->ai_scans_left != 0xffffffff) { /* TRIG_COUNT */ devpriv->ai_scans_left--; if (devpriv->ai_scans_left == 0) { /* disable further interrupts and clocks */ dmm_outb(dev, DMM32AT_INTCLOCK, 0x0); /* set the buffer to be flushed with an EOF */ s->async->events |= COMEDI_CB_EOA; } } /* flush the buffer */ comedi_event(dev, s); } /* reset the interrupt */ dmm_outb(dev, DMM32AT_CNTRL, DMM32AT_INTRESET); return IRQ_HANDLED; } /* This function doesn't require a particular form, this is just * what happens to be used in some of the drivers. It should * convert ns nanoseconds to a counter value suitable for programming * the device. Also, it should adjust ns so that it cooresponds to * the actual time that the device will use. */ static int dmm32at_ns_to_timer(unsigned int *ns, int round) { /* trivial timer */ /* if your timing is done through two cascaded timers, the * i8253_cascade_ns_to_timer() function in 8253.h can be * very helpful. There are also i8254_load() and i8254_mm_load() * which can be used to load values into the ubiquitous 8254 counters */ return *ns; } static int dmm32at_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); unsigned char hi, lo, status; /* Writing a list of values to an AO channel is probably not * very useful, but that's how the interface is defined. */ for (i = 0; i < insn->n; i++) { devpriv->ao_readback[chan] = data[i]; /* get the low byte */ lo = data[i] & 0x00ff; /* high byte also contains channel number */ hi = (data[i] >> 8) + chan * (1 << 6); /* printk("writing 0x%02x 0x%02x\n",hi,lo); */ /* write the low and high values to the board */ dmm_outb(dev, DMM32AT_DACLSB, lo); dmm_outb(dev, DMM32AT_DACMSB, hi); /* wait for circuit to settle */ for (i = 0; i < 40000; i++) { status = dmm_inb(dev, DMM32AT_DACSTAT); if ((status & DMM32AT_DACBUSY) == 0) break; } if (i == 40000) { printk(KERN_WARNING "dmm32at: timeout\n"); return -ETIMEDOUT; } /* dummy read to update trigger the output */ status = dmm_inb(dev, DMM32AT_DACMSB); } /* return the number of samples read/written */ return i; } /* AO subdevices should have a read insn as well as a write insn. * Usually this means copying a value stored in devpriv. */ static int dmm32at_ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_readback[chan]; return i; } /* DIO devices are slightly special. Although it is possible to * implement the insn_read/insn_write interface, it is much more * useful to applications if you implement the insn_bits interface. * This allows packed reading/writing of the DIO channels. The * comedi core can convert between insn_bits and insn_read/write */ static int dmm32at_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned char diobits; if (insn->n != 2) return -EINVAL; /* The insn data is a mask in data[0] and the new data * in data[1], each channel cooresponding to a bit. */ if (data[0]) { s->state &= ~data[0]; s->state |= data[0] & data[1]; /* Write out the new digital output lines */ /* outw(s->state,dev->iobase + DMM32AT_DIO); */ } /* get access to the DIO regs */ dmm_outb(dev, DMM32AT_CNTRL, DMM32AT_DIOACC); /* if either part of dio is set for output */ if (((devpriv->dio_config & DMM32AT_DIRCL) == 0) || ((devpriv->dio_config & DMM32AT_DIRCH) == 0)) { diobits = (s->state & 0x00ff0000) >> 16; dmm_outb(dev, DMM32AT_DIOC, diobits); } if ((devpriv->dio_config & DMM32AT_DIRB) == 0) { diobits = (s->state & 0x0000ff00) >> 8; dmm_outb(dev, DMM32AT_DIOB, diobits); } if ((devpriv->dio_config & DMM32AT_DIRA) == 0) { diobits = (s->state & 0x000000ff); dmm_outb(dev, DMM32AT_DIOA, diobits); } /* now read the state back in */ s->state = dmm_inb(dev, DMM32AT_DIOC); s->state <<= 8; s->state |= dmm_inb(dev, DMM32AT_DIOB); s->state <<= 8; s->state |= dmm_inb(dev, DMM32AT_DIOA); data[1] = s->state; /* on return, data[1] contains the value of the digital * input and output lines. */ /* data[1]=inw(dev->iobase + DMM32AT_DIO); */ /* or we could just return the software copy of the output values if * it was a purely digital output subdevice */ /* data[1]=s->state; */ return 2; } static int dmm32at_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned char chanbit; int chan = CR_CHAN(insn->chanspec); if (insn->n != 1) return -EINVAL; if (chan < 8) chanbit = DMM32AT_DIRA; else if (chan < 16) chanbit = DMM32AT_DIRB; else if (chan < 20) chanbit = DMM32AT_DIRCL; else chanbit = DMM32AT_DIRCH; /* The input or output configuration of each digital line is * configured by a special insn_config instruction. chanspec * contains the channel to be changed, and data[0] contains the * value COMEDI_INPUT or COMEDI_OUTPUT. */ /* if output clear the bit, otherwise set it */ if (data[0] == COMEDI_OUTPUT) devpriv->dio_config &= ~chanbit; else devpriv->dio_config |= chanbit; /* get access to the DIO regs */ dmm_outb(dev, DMM32AT_CNTRL, DMM32AT_DIOACC); /* set the DIO's to the new configuration setting */ dmm_outb(dev, DMM32AT_DIOCONF, devpriv->dio_config); return 1; } void dmm32at_setaitimer(struct comedi_device *dev, unsigned int nansec) { unsigned char lo1, lo2, hi2; unsigned short both2; /* based on 10mhz clock */ lo1 = 200; both2 = nansec / 20000; hi2 = (both2 & 0xff00) >> 8; lo2 = both2 & 0x00ff; /* set the counter frequency to 10mhz */ dmm_outb(dev, DMM32AT_CNTRDIO, 0); /* get access to the clock regs */ dmm_outb(dev, DMM32AT_CNTRL, DMM32AT_CLKACC); /* write the counter 1 control word and low byte to counter */ dmm_outb(dev, DMM32AT_CLKCT, DMM32AT_CLKCT1); dmm_outb(dev, DMM32AT_CLK1, lo1); /* write the counter 2 control word and low byte then to counter */ dmm_outb(dev, DMM32AT_CLKCT, DMM32AT_CLKCT2); dmm_outb(dev, DMM32AT_CLK2, lo2); dmm_outb(dev, DMM32AT_CLK2, hi2); /* enable the ai conversion interrupt and the clock to start scans */ dmm_outb(dev, DMM32AT_INTCLOCK, DMM32AT_ADINT | DMM32AT_CLKSEL); } /* * A convenient macro that defines init_module() and cleanup_module(), * as necessary. */ static int __init driver_dmm32at_init_module(void) { return comedi_driver_register(&driver_dmm32at); } static void __exit driver_dmm32at_cleanup_module(void) { comedi_driver_unregister(&driver_dmm32at); } module_init(driver_dmm32at_init_module); module_exit(driver_dmm32at_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
drewx2/android_kernel_htc_dlx
virt/drivers/sn/ioc3.c
9265
20530
/* * SGI IOC3 master driver and IRQ demuxer * * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org> * Heavily based on similar work by: * Brent Casavant <bcasavan@sgi.com> - IOC4 master driver * Pat Gefre <pfg@sgi.com> - IOC3 serial port IRQ demuxer */ #include <linux/errno.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/ioc3.h> #include <linux/rwsem.h> #include <linux/slab.h> #define IOC3_PCI_SIZE 0x100000 static LIST_HEAD(ioc3_devices); static int ioc3_counter; static DECLARE_RWSEM(ioc3_devices_rwsem); static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES]; static struct ioc3_submodule *ioc3_ethernet; static DEFINE_RWLOCK(ioc3_submodules_lock); /* NIC probing code */ #define GPCR_MLAN_EN 0x00200000 /* enable MCR to pin 8 */ static inline unsigned mcr_pack(unsigned pulse, unsigned sample) { return (pulse << 10) | (sample << 2); } static int nic_wait(struct ioc3_driver_data *idd) { unsigned mcr; do { mcr = readl(&idd->vma->mcr); } while (!(mcr & 2)); return mcr & 1; } static int nic_reset(struct ioc3_driver_data *idd) { int presence; unsigned long flags; local_irq_save(flags); writel(mcr_pack(500, 65), &idd->vma->mcr); presence = nic_wait(idd); local_irq_restore(flags); udelay(500); return presence; } static int nic_read_bit(struct ioc3_driver_data *idd) { int result; unsigned long flags; local_irq_save(flags); writel(mcr_pack(6, 13), &idd->vma->mcr); result = nic_wait(idd); local_irq_restore(flags); udelay(500); return result; } static void nic_write_bit(struct ioc3_driver_data *idd, int bit) { if (bit) writel(mcr_pack(6, 110), &idd->vma->mcr); else writel(mcr_pack(80, 30), &idd->vma->mcr); nic_wait(idd); } static unsigned nic_read_byte(struct ioc3_driver_data *idd) { unsigned result = 0; int i; for (i = 0; i < 8; i++) result = (result >> 1) | (nic_read_bit(idd) << 7); return result; } static void nic_write_byte(struct ioc3_driver_data *idd, int byte) { int i, bit; for (i = 8; i; i--) { bit = byte & 1; byte >>= 1; nic_write_bit(idd, bit); } } static unsigned long nic_find(struct ioc3_driver_data *idd, int *last, unsigned long addr) { int a, b, index, disc; nic_reset(idd); /* Search ROM. */ nic_write_byte(idd, 0xF0); /* Algorithm from ``Book of iButton Standards''. */ for (index = 0, disc = 0; index < 64; index++) { a = nic_read_bit(idd); b = nic_read_bit(idd); if (a && b) { printk(KERN_WARNING "IOC3 NIC search failed.\n"); *last = 0; return 0; } if (!a && !b) { if (index == *last) { addr |= 1UL << index; } else if (index > *last) { addr &= ~(1UL << index); disc = index; } else if ((addr & (1UL << index)) == 0) disc = index; nic_write_bit(idd, (addr>>index)&1); continue; } else { if (a) addr |= 1UL << index; else addr &= ~(1UL << index); nic_write_bit(idd, a); continue; } } *last = disc; return addr; } static void nic_addr(struct ioc3_driver_data *idd, unsigned long addr) { int index; nic_reset(idd); nic_write_byte(idd, 0xF0); for (index = 0; index < 64; index++) { nic_read_bit(idd); nic_read_bit(idd); nic_write_bit(idd, (addr>>index)&1); } } static void crc16_byte(unsigned int *crc, unsigned char db) { int i; for(i=0;i<8;i++) { *crc <<= 1; if((db^(*crc>>16)) & 1) *crc ^= 0x8005; db >>= 1; } *crc &= 0xFFFF; } static unsigned int crc16_area(unsigned char *dbs, int size, unsigned int crc) { while(size--) crc16_byte(&crc, *(dbs++)); return crc; } static void crc8_byte(unsigned int *crc, unsigned char db) { int i,f; for(i=0;i<8;i++) { f = (*crc ^ db) & 1; *crc >>= 1; db >>= 1; if(f) *crc ^= 0x8c; } *crc &= 0xff; } static unsigned int crc8_addr(unsigned long addr) { int i; unsigned int crc = 0x00; for(i=0;i<8;i++) crc8_byte(&crc, addr>>(i<<3)); return crc; } static void read_redir_page(struct ioc3_driver_data *idd, unsigned long addr, int page, unsigned char *redir, unsigned char *data) { int loops = 16, i; while(redir[page] != 0xFF) { page = redir[page]^0xFF; loops--; if(loops<0) { printk(KERN_ERR "IOC3: NIC circular redirection\n"); return; } } loops = 3; while(loops>0) { nic_addr(idd, addr); nic_write_byte(idd, 0xF0); nic_write_byte(idd, (page << 5) & 0xE0); nic_write_byte(idd, (page >> 3) & 0x1F); for(i=0;i<0x20;i++) data[i] = nic_read_byte(idd); if(crc16_area(data, 0x20, 0x0000) == 0x800d) return; loops--; } printk(KERN_ERR "IOC3: CRC error in data page\n"); for(i=0;i<0x20;i++) data[i] = 0x00; } static void read_redir_map(struct ioc3_driver_data *idd, unsigned long addr, unsigned char *redir) { int i,j,loops = 3,crc_ok; unsigned int crc; while(loops>0) { crc_ok = 1; nic_addr(idd, addr); nic_write_byte(idd, 0xAA); nic_write_byte(idd, 0x00); nic_write_byte(idd, 0x01); for(i=0;i<64;i+=8) { for(j=0;j<8;j++) redir[i+j] = nic_read_byte(idd); crc = crc16_area(redir+i, 8, (i==0)?0x8707:0x0000); crc16_byte(&crc, nic_read_byte(idd)); crc16_byte(&crc, nic_read_byte(idd)); if(crc != 0x800d) crc_ok = 0; } if(crc_ok) return; loops--; } printk(KERN_ERR "IOC3: CRC error in redirection page\n"); for(i=0;i<64;i++) redir[i] = 0xFF; } static void read_nic(struct ioc3_driver_data *idd, unsigned long addr) { unsigned char redir[64]; unsigned char data[64],part[32]; int i,j; /* read redirections */ read_redir_map(idd, addr, redir); /* read data pages */ read_redir_page(idd, addr, 0, redir, data); read_redir_page(idd, addr, 1, redir, data+32); /* assemble the part # */ j=0; for(i=0;i<19;i++) if(data[i+11] != ' ') part[j++] = data[i+11]; for(i=0;i<6;i++) if(data[i+32] != ' ') part[j++] = data[i+32]; part[j] = 0; /* skip Octane power supplies */ if(!strncmp(part, "060-0035-", 9)) return; if(!strncmp(part, "060-0038-", 9)) return; strcpy(idd->nic_part, part); /* assemble the serial # */ j=0; for(i=0;i<10;i++) if(data[i+1] != ' ') idd->nic_serial[j++] = data[i+1]; idd->nic_serial[j] = 0; } static void read_mac(struct ioc3_driver_data *idd, unsigned long addr) { int i, loops = 3; unsigned char data[13]; while(loops>0) { nic_addr(idd, addr); nic_write_byte(idd, 0xF0); nic_write_byte(idd, 0x00); nic_write_byte(idd, 0x00); nic_read_byte(idd); for(i=0;i<13;i++) data[i] = nic_read_byte(idd); if(crc16_area(data, 13, 0x0000) == 0x800d) { for(i=10;i>4;i--) idd->nic_mac[10-i] = data[i]; return; } loops--; } printk(KERN_ERR "IOC3: CRC error in MAC address\n"); for(i=0;i<6;i++) idd->nic_mac[i] = 0x00; } static void probe_nic(struct ioc3_driver_data *idd) { int save = 0, loops = 3; unsigned long first, addr; writel(GPCR_MLAN_EN, &idd->vma->gpcr_s); while(loops>0) { idd->nic_part[0] = 0; idd->nic_serial[0] = 0; addr = first = nic_find(idd, &save, 0); if(!first) return; while(1) { if(crc8_addr(addr)) break; else { switch(addr & 0xFF) { case 0x0B: read_nic(idd, addr); break; case 0x09: case 0x89: case 0x91: read_mac(idd, addr); break; } } addr = nic_find(idd, &save, addr); if(addr == first) return; } loops--; } printk(KERN_ERR "IOC3: CRC error in NIC address\n"); } /* Interrupts */ static void write_ireg(struct ioc3_driver_data *idd, uint32_t val, int which) { unsigned long flags; spin_lock_irqsave(&idd->ir_lock, flags); switch (which) { case IOC3_W_IES: writel(val, &idd->vma->sio_ies); break; case IOC3_W_IEC: writel(val, &idd->vma->sio_iec); break; } spin_unlock_irqrestore(&idd->ir_lock, flags); } static inline uint32_t get_pending_intrs(struct ioc3_driver_data *idd) { unsigned long flag; uint32_t intrs = 0; spin_lock_irqsave(&idd->ir_lock, flag); intrs = readl(&idd->vma->sio_ir); intrs &= readl(&idd->vma->sio_ies); spin_unlock_irqrestore(&idd->ir_lock, flag); return intrs; } static irqreturn_t ioc3_intr_io(int irq, void *arg) { unsigned long flags; struct ioc3_driver_data *idd = arg; int handled = 1, id; unsigned int pending; read_lock_irqsave(&ioc3_submodules_lock, flags); if(idd->dual_irq && readb(&idd->vma->eisr)) { /* send Ethernet IRQ to the driver */ if(ioc3_ethernet && idd->active[ioc3_ethernet->id] && ioc3_ethernet->intr) { handled = handled && !ioc3_ethernet->intr(ioc3_ethernet, idd, 0); } } pending = get_pending_intrs(idd); /* look at the IO IRQs */ for(id=0;id<IOC3_MAX_SUBMODULES;id++) { if(idd->active[id] && ioc3_submodules[id] && (pending & ioc3_submodules[id]->irq_mask) && ioc3_submodules[id]->intr) { write_ireg(idd, ioc3_submodules[id]->irq_mask, IOC3_W_IEC); if(!ioc3_submodules[id]->intr(ioc3_submodules[id], idd, pending & ioc3_submodules[id]->irq_mask)) pending &= ~ioc3_submodules[id]->irq_mask; if (ioc3_submodules[id]->reset_mask) write_ireg(idd, ioc3_submodules[id]->irq_mask, IOC3_W_IES); } } read_unlock_irqrestore(&ioc3_submodules_lock, flags); if(pending) { printk(KERN_WARNING "IOC3: Pending IRQs 0x%08x discarded and disabled\n",pending); write_ireg(idd, pending, IOC3_W_IEC); handled = 1; } return handled?IRQ_HANDLED:IRQ_NONE; } static irqreturn_t ioc3_intr_eth(int irq, void *arg) { unsigned long flags; struct ioc3_driver_data *idd = (struct ioc3_driver_data *)arg; int handled = 1; if(!idd->dual_irq) return IRQ_NONE; read_lock_irqsave(&ioc3_submodules_lock, flags); if(ioc3_ethernet && idd->active[ioc3_ethernet->id] && ioc3_ethernet->intr) handled = handled && !ioc3_ethernet->intr(ioc3_ethernet, idd, 0); read_unlock_irqrestore(&ioc3_submodules_lock, flags); return handled?IRQ_HANDLED:IRQ_NONE; } void ioc3_enable(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int irqs) { write_ireg(idd, irqs & is->irq_mask, IOC3_W_IES); } void ioc3_ack(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int irqs) { writel(irqs & is->irq_mask, &idd->vma->sio_ir); } void ioc3_disable(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int irqs) { write_ireg(idd, irqs & is->irq_mask, IOC3_W_IEC); } void ioc3_gpcr_set(struct ioc3_driver_data *idd, unsigned int val) { unsigned long flags; spin_lock_irqsave(&idd->gpio_lock, flags); writel(val, &idd->vma->gpcr_s); spin_unlock_irqrestore(&idd->gpio_lock, flags); } /* Keep it simple, stupid! */ static int find_slot(void **tab, int max) { int i; for(i=0;i<max;i++) if(!(tab[i])) return i; return -1; } /* Register an IOC3 submodule */ int ioc3_register_submodule(struct ioc3_submodule *is) { struct ioc3_driver_data *idd; int alloc_id; unsigned long flags; write_lock_irqsave(&ioc3_submodules_lock, flags); alloc_id = find_slot((void **)ioc3_submodules, IOC3_MAX_SUBMODULES); if(alloc_id != -1) { ioc3_submodules[alloc_id] = is; if(is->ethernet) { if(ioc3_ethernet==NULL) ioc3_ethernet=is; else printk(KERN_WARNING "IOC3 Ethernet module already registered!\n"); } } write_unlock_irqrestore(&ioc3_submodules_lock, flags); if(alloc_id == -1) { printk(KERN_WARNING "Increase IOC3_MAX_SUBMODULES!\n"); return -ENOMEM; } is->id=alloc_id; /* Initialize submodule for each IOC3 */ if (!is->probe) return 0; down_read(&ioc3_devices_rwsem); list_for_each_entry(idd, &ioc3_devices, list) { /* set to 1 for IRQs in probe */ idd->active[alloc_id] = 1; idd->active[alloc_id] = !is->probe(is, idd); } up_read(&ioc3_devices_rwsem); return 0; } /* Unregister an IOC3 submodule */ void ioc3_unregister_submodule(struct ioc3_submodule *is) { struct ioc3_driver_data *idd; unsigned long flags; write_lock_irqsave(&ioc3_submodules_lock, flags); if(ioc3_submodules[is->id]==is) ioc3_submodules[is->id]=NULL; else printk(KERN_WARNING "IOC3 submodule %s has wrong ID.\n",is->name); if(ioc3_ethernet==is) ioc3_ethernet = NULL; write_unlock_irqrestore(&ioc3_submodules_lock, flags); /* Remove submodule for each IOC3 */ down_read(&ioc3_devices_rwsem); list_for_each_entry(idd, &ioc3_devices, list) if(idd->active[is->id]) { if(is->remove) if(is->remove(is, idd)) printk(KERN_WARNING "%s: IOC3 submodule %s remove failed " "for pci_dev %s.\n", __func__, module_name(is->owner), pci_name(idd->pdev)); idd->active[is->id] = 0; if(is->irq_mask) write_ireg(idd, is->irq_mask, IOC3_W_IEC); } up_read(&ioc3_devices_rwsem); } /********************* * Device management * *********************/ static char * __devinitdata ioc3_class_names[]={"unknown", "IP27 BaseIO", "IP30 system", "MENET 1/2/3", "MENET 4", "CADduo", "Altix Serial"}; static int __devinit ioc3_class(struct ioc3_driver_data *idd) { int res = IOC3_CLASS_NONE; /* NIC-based logic */ if(!strncmp(idd->nic_part, "030-0891-", 9)) res = IOC3_CLASS_BASE_IP30; if(!strncmp(idd->nic_part, "030-1155-", 9)) res = IOC3_CLASS_CADDUO; if(!strncmp(idd->nic_part, "030-1657-", 9)) res = IOC3_CLASS_SERIAL; if(!strncmp(idd->nic_part, "030-1664-", 9)) res = IOC3_CLASS_SERIAL; /* total random heuristics */ #ifdef CONFIG_SGI_IP27 if(!idd->nic_part[0]) res = IOC3_CLASS_BASE_IP27; #endif /* print educational message */ printk(KERN_INFO "IOC3 part: [%s], serial: [%s] => class %s\n", idd->nic_part, idd->nic_serial, ioc3_class_names[res]); return res; } /* Adds a new instance of an IOC3 card */ static int __devinit ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { struct ioc3_driver_data *idd; uint32_t pcmd; int ret, id; /* Enable IOC3 and take ownership of it */ if ((ret = pci_enable_device(pdev))) { printk(KERN_WARNING "%s: Failed to enable IOC3 device for pci_dev %s.\n", __func__, pci_name(pdev)); goto out; } pci_set_master(pdev); #ifdef USE_64BIT_DMA ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!ret) { ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (ret < 0) { printk(KERN_WARNING "%s: Unable to obtain 64 bit DMA " "for consistent allocations\n", __func__); } } #endif /* Set up per-IOC3 data */ idd = kzalloc(sizeof(struct ioc3_driver_data), GFP_KERNEL); if (!idd) { printk(KERN_WARNING "%s: Failed to allocate IOC3 data for pci_dev %s.\n", __func__, pci_name(pdev)); ret = -ENODEV; goto out_idd; } spin_lock_init(&idd->ir_lock); spin_lock_init(&idd->gpio_lock); idd->pdev = pdev; /* Map all IOC3 registers. These are shared between subdevices * so the main IOC3 module manages them. */ idd->pma = pci_resource_start(pdev, 0); if (!idd->pma) { printk(KERN_WARNING "%s: Unable to find IOC3 resource " "for pci_dev %s.\n", __func__, pci_name(pdev)); ret = -ENODEV; goto out_pci; } if (!request_mem_region(idd->pma, IOC3_PCI_SIZE, "ioc3")) { printk(KERN_WARNING "%s: Unable to request IOC3 region " "for pci_dev %s.\n", __func__, pci_name(pdev)); ret = -ENODEV; goto out_pci; } idd->vma = ioremap(idd->pma, IOC3_PCI_SIZE); if (!idd->vma) { printk(KERN_WARNING "%s: Unable to remap IOC3 region " "for pci_dev %s.\n", __func__, pci_name(pdev)); ret = -ENODEV; goto out_misc_region; } /* Track PCI-device specific data */ pci_set_drvdata(pdev, idd); down_write(&ioc3_devices_rwsem); list_add_tail(&idd->list, &ioc3_devices); idd->id = ioc3_counter++; up_write(&ioc3_devices_rwsem); idd->gpdr_shadow = readl(&idd->vma->gpdr); /* Read IOC3 NIC contents */ probe_nic(idd); /* Detect IOC3 class */ idd->class = ioc3_class(idd); /* Initialize IOC3 */ pci_read_config_dword(pdev, PCI_COMMAND, &pcmd); pci_write_config_dword(pdev, PCI_COMMAND, pcmd | PCI_COMMAND_MEMORY | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_SCR_DROP_MODE_EN); write_ireg(idd, ~0, IOC3_W_IEC); writel(~0, &idd->vma->sio_ir); /* Set up IRQs */ if(idd->class == IOC3_CLASS_BASE_IP30 || idd->class == IOC3_CLASS_BASE_IP27) { writel(0, &idd->vma->eier); writel(~0, &idd->vma->eisr); idd->dual_irq = 1; if (!request_irq(pdev->irq, ioc3_intr_eth, IRQF_SHARED, "ioc3-eth", (void *)idd)) { idd->irq_eth = pdev->irq; } else { printk(KERN_WARNING "%s : request_irq fails for IRQ 0x%x\n ", __func__, pdev->irq); } if (!request_irq(pdev->irq+2, ioc3_intr_io, IRQF_SHARED, "ioc3-io", (void *)idd)) { idd->irq_io = pdev->irq+2; } else { printk(KERN_WARNING "%s : request_irq fails for IRQ 0x%x\n ", __func__, pdev->irq+2); } } else { if (!request_irq(pdev->irq, ioc3_intr_io, IRQF_SHARED, "ioc3", (void *)idd)) { idd->irq_io = pdev->irq; } else { printk(KERN_WARNING "%s : request_irq fails for IRQ 0x%x\n ", __func__, pdev->irq); } } /* Add this IOC3 to all submodules */ for(id=0;id<IOC3_MAX_SUBMODULES;id++) if(ioc3_submodules[id] && ioc3_submodules[id]->probe) { idd->active[id] = 1; idd->active[id] = !ioc3_submodules[id]->probe (ioc3_submodules[id], idd); } printk(KERN_INFO "IOC3 Master Driver loaded for %s\n", pci_name(pdev)); return 0; out_misc_region: release_mem_region(idd->pma, IOC3_PCI_SIZE); out_pci: kfree(idd); out_idd: pci_disable_device(pdev); out: return ret; } /* Removes a particular instance of an IOC3 card. */ static void __devexit ioc3_remove(struct pci_dev *pdev) { int id; struct ioc3_driver_data *idd; idd = pci_get_drvdata(pdev); /* Remove this IOC3 from all submodules */ for(id=0;id<IOC3_MAX_SUBMODULES;id++) if(idd->active[id]) { if(ioc3_submodules[id] && ioc3_submodules[id]->remove) if(ioc3_submodules[id]->remove(ioc3_submodules[id], idd)) printk(KERN_WARNING "%s: IOC3 submodule 0x%s remove failed " "for pci_dev %s.\n", __func__, module_name(ioc3_submodules[id]->owner), pci_name(pdev)); idd->active[id] = 0; } /* Clear and disable all IRQs */ write_ireg(idd, ~0, IOC3_W_IEC); writel(~0, &idd->vma->sio_ir); /* Release resources */ free_irq(idd->irq_io, (void *)idd); if(idd->dual_irq) free_irq(idd->irq_eth, (void *)idd); iounmap(idd->vma); release_mem_region(idd->pma, IOC3_PCI_SIZE); /* Disable IOC3 and relinquish */ pci_disable_device(pdev); /* Remove and free driver data */ down_write(&ioc3_devices_rwsem); list_del(&idd->list); up_write(&ioc3_devices_rwsem); kfree(idd); } static struct pci_device_id ioc3_id_table[] = { {PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID}, {0} }; static struct pci_driver ioc3_driver = { .name = "IOC3", .id_table = ioc3_id_table, .probe = ioc3_probe, .remove = __devexit_p(ioc3_remove), }; MODULE_DEVICE_TABLE(pci, ioc3_id_table); /********************* * Module management * *********************/ /* Module load */ static int __init ioc3_init(void) { if (ia64_platform_is("sn2")) return pci_register_driver(&ioc3_driver); return -ENODEV; } /* Module unload */ static void __exit ioc3_exit(void) { pci_unregister_driver(&ioc3_driver); } module_init(ioc3_init); module_exit(ioc3_exit); MODULE_AUTHOR("Stanislaw Skowronek <skylark@linux-mips.org>"); MODULE_DESCRIPTION("PCI driver for SGI IOC3"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL_GPL(ioc3_register_submodule); EXPORT_SYMBOL_GPL(ioc3_unregister_submodule); EXPORT_SYMBOL_GPL(ioc3_ack); EXPORT_SYMBOL_GPL(ioc3_gpcr_set); EXPORT_SYMBOL_GPL(ioc3_disable); EXPORT_SYMBOL_GPL(ioc3_enable);
gpl-2.0
googyanas/GoogyMax-6P
drivers/infiniband/hw/amso1100/c2_cm.c
11569
10009
/* * Copyright (c) 2005 Ammasso, Inc. All rights reserved. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/slab.h> #include "c2.h" #include "c2_wr.h" #include "c2_vq.h" #include <rdma/iw_cm.h> int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct c2_dev *c2dev = to_c2dev(cm_id->device); struct ib_qp *ibqp; struct c2_qp *qp; struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */ struct c2_vq_req *vq_req; int err; ibqp = c2_get_qp(cm_id->device, iw_param->qpn); if (!ibqp) return -EINVAL; qp = to_c2qp(ibqp); /* Associate QP <--> CM_ID */ cm_id->provider_data = qp; cm_id->add_ref(cm_id); qp->cm_id = cm_id; /* * only support the max private_data length */ if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { err = -EINVAL; goto bail0; } /* * Set the rdma read limits */ err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); if (err) goto bail0; /* * Create and send a WR_QP_CONNECT... */ wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail0; } vq_req = vq_req_alloc(c2dev); if (!vq_req) { err = -ENOMEM; goto bail1; } c2_wr_set_id(wr, CCWR_QP_CONNECT); wr->hdr.context = 0; wr->rnic_handle = c2dev->adapter_handle; wr->qp_handle = qp->adapter_handle; wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr; wr->remote_port = cm_id->remote_addr.sin_port; /* * Move any private data from the callers's buf into * the WR. */ if (iw_param->private_data) { wr->private_data_length = cpu_to_be32(iw_param->private_data_len); memcpy(&wr->private_data[0], iw_param->private_data, iw_param->private_data_len); } else wr->private_data_length = 0; /* * Send WR to adapter. NOTE: There is no synch reply from * the adapter. */ err = vq_send_wr(c2dev, (union c2wr *) wr); vq_req_free(c2dev, vq_req); bail1: kfree(wr); bail0: if (err) { /* * If we fail, release reference on QP and * disassociate QP from CM_ID */ cm_id->provider_data = NULL; qp->cm_id = NULL; cm_id->rem_ref(cm_id); } return err; } int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog) { struct c2_dev *c2dev; struct c2wr_ep_listen_create_req wr; struct c2wr_ep_listen_create_rep *reply; struct c2_vq_req *vq_req; int err; c2dev = to_c2dev(cm_id->device); if (c2dev == NULL) return -EINVAL; /* * Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; /* * Build the WR */ c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE); wr.hdr.context = (u64) (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.local_addr = cm_id->local_addr.sin_addr.s_addr; wr.local_port = cm_id->local_addr.sin_port; wr.backlog = cpu_to_be32(backlog); wr.user_context = (u64) (unsigned long) cm_id; /* * Reference the request struct. Dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; /* * Process reply */ reply = (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail1; } if ((err = c2_errno(reply)) != 0) goto bail1; /* * Keep the adapter handle. Used in subsequent destroy */ cm_id->provider_data = (void*)(unsigned long) reply->ep_handle; /* * free vq stuff */ vq_repbuf_free(c2dev, reply); vq_req_free(c2dev, vq_req); return 0; bail1: vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; } int c2_llp_service_destroy(struct iw_cm_id *cm_id) { struct c2_dev *c2dev; struct c2wr_ep_listen_destroy_req wr; struct c2wr_ep_listen_destroy_rep *reply; struct c2_vq_req *vq_req; int err; c2dev = to_c2dev(cm_id->device); if (c2dev == NULL) return -EINVAL; /* * Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; /* * Build the WR */ c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.ep_handle = (u32)(unsigned long)cm_id->provider_data; /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; /* * Process reply */ reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } if ((err = c2_errno(reply)) != 0) goto bail1; bail1: vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; } int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) { struct c2_dev *c2dev = to_c2dev(cm_id->device); struct c2_qp *qp; struct ib_qp *ibqp; struct c2wr_cr_accept_req *wr; /* variable length WR */ struct c2_vq_req *vq_req; struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */ int err; ibqp = c2_get_qp(cm_id->device, iw_param->qpn); if (!ibqp) return -EINVAL; qp = to_c2qp(ibqp); /* Set the RDMA read limits */ err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); if (err) goto bail0; /* Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) { err = -ENOMEM; goto bail0; } vq_req->qp = qp; vq_req->cm_id = cm_id; vq_req->event = IW_CM_EVENT_ESTABLISHED; wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); if (!wr) { err = -ENOMEM; goto bail1; } /* Build the WR */ c2_wr_set_id(wr, CCWR_CR_ACCEPT); wr->hdr.context = (unsigned long) vq_req; wr->rnic_handle = c2dev->adapter_handle; wr->ep_handle = (u32) (unsigned long) cm_id->provider_data; wr->qp_handle = qp->adapter_handle; /* Replace the cr_handle with the QP after accept */ cm_id->provider_data = qp; cm_id->add_ref(cm_id); qp->cm_id = cm_id; cm_id->provider_data = qp; /* Validate private_data length */ if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) { err = -EINVAL; goto bail1; } if (iw_param->private_data) { wr->private_data_length = cpu_to_be32(iw_param->private_data_len); memcpy(&wr->private_data[0], iw_param->private_data, iw_param->private_data_len); } else wr->private_data_length = 0; /* Reference the request struct. Dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) wr); if (err) { vq_req_put(c2dev, vq_req); goto bail1; } /* Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail1; /* Check that reply is present */ reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail1; } err = c2_errno(reply); vq_repbuf_free(c2dev, reply); if (!err) c2_set_qp_state(qp, C2_QP_STATE_RTS); bail1: kfree(wr); vq_req_free(c2dev, vq_req); bail0: if (err) { /* * If we fail, release reference on QP and * disassociate QP from CM_ID */ cm_id->provider_data = NULL; qp->cm_id = NULL; cm_id->rem_ref(cm_id); } return err; } int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { struct c2_dev *c2dev; struct c2wr_cr_reject_req wr; struct c2_vq_req *vq_req; struct c2wr_cr_reject_rep *reply; int err; c2dev = to_c2dev(cm_id->device); /* * Allocate verbs request. */ vq_req = vq_req_alloc(c2dev); if (!vq_req) return -ENOMEM; /* * Build the WR */ c2_wr_set_id(&wr, CCWR_CR_REJECT); wr.hdr.context = (unsigned long) vq_req; wr.rnic_handle = c2dev->adapter_handle; wr.ep_handle = (u32) (unsigned long) cm_id->provider_data; /* * reference the request struct. dereferenced in the int handler. */ vq_req_get(c2dev, vq_req); /* * Send WR to adapter */ err = vq_send_wr(c2dev, (union c2wr *) & wr); if (err) { vq_req_put(c2dev, vq_req); goto bail0; } /* * Wait for reply from adapter */ err = vq_wait_for_reply(c2dev, vq_req); if (err) goto bail0; /* * Process reply */ reply = (struct c2wr_cr_reject_rep *) (unsigned long) vq_req->reply_msg; if (!reply) { err = -ENOMEM; goto bail0; } err = c2_errno(reply); /* * free vq stuff */ vq_repbuf_free(c2dev, reply); bail0: vq_req_free(c2dev, vq_req); return err; }
gpl-2.0
randomblame/kernel_u8800pro
drivers/media/dvb/b2c2/flexcop-hw-filter.c
13617
6638
/* * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III * flexcop-hw-filter.c - pid and mac address filtering and control functions * see flexcop.c for copyright information */ #include "flexcop.h" static void flexcop_rcv_data_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, Rcv_Data_sig, onoff); deb_ts("rcv_data is now: '%s'\n", onoff ? "on" : "off"); } void flexcop_smc_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, SMC_Enable_sig, onoff); } static void flexcop_null_filter_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, Null_filter_sig, onoff); } void flexcop_set_mac_filter(struct flexcop_device *fc, u8 mac[6]) { flexcop_ibi_value v418, v41c; v41c = fc->read_ibi_reg(fc, mac_address_41c); v418.mac_address_418.MAC1 = mac[0]; v418.mac_address_418.MAC2 = mac[1]; v418.mac_address_418.MAC3 = mac[2]; v418.mac_address_418.MAC6 = mac[3]; v41c.mac_address_41c.MAC7 = mac[4]; v41c.mac_address_41c.MAC8 = mac[5]; fc->write_ibi_reg(fc, mac_address_418, v418); fc->write_ibi_reg(fc, mac_address_41c, v41c); } void flexcop_mac_filter_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, MAC_filter_Mode_sig, onoff); } static void flexcop_pid_group_filter(struct flexcop_device *fc, u16 pid, u16 mask) { /* index_reg_310.extra_index_reg need to 0 or 7 to work */ flexcop_ibi_value v30c; v30c.pid_filter_30c_ext_ind_0_7.Group_PID = pid; v30c.pid_filter_30c_ext_ind_0_7.Group_mask = mask; fc->write_ibi_reg(fc, pid_filter_30c, v30c); } static void flexcop_pid_group_filter_ctrl(struct flexcop_device *fc, int onoff) { flexcop_set_ibi_value(ctrl_208, Mask_filter_sig, onoff); } /* this fancy define reduces the code size of the quite similar PID controlling of * the first 6 PIDs */ #define pid_ctrl(vregname,field,enablefield,trans_field,transval) \ flexcop_ibi_value vpid = fc->read_ibi_reg(fc, vregname), \ v208 = fc->read_ibi_reg(fc, ctrl_208); \ vpid.vregname.field = onoff ? pid : 0x1fff; \ vpid.vregname.trans_field = transval; \ v208.ctrl_208.enablefield = onoff; \ fc->write_ibi_reg(fc, vregname, vpid); \ fc->write_ibi_reg(fc, ctrl_208, v208); static void flexcop_pid_Stream1_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_300, Stream1_PID, Stream1_filter_sig, Stream1_trans, 0); } static void flexcop_pid_Stream2_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_300, Stream2_PID, Stream2_filter_sig, Stream2_trans, 0); } static void flexcop_pid_PCR_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_304, PCR_PID, PCR_filter_sig, PCR_trans, 0); } static void flexcop_pid_PMT_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_304, PMT_PID, PMT_filter_sig, PMT_trans, 0); } static void flexcop_pid_EMM_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_308, EMM_PID, EMM_filter_sig, EMM_trans, 0); } static void flexcop_pid_ECM_PID_ctrl(struct flexcop_device *fc, u16 pid, int onoff) { pid_ctrl(pid_filter_308, ECM_PID, ECM_filter_sig, ECM_trans, 0); } static void flexcop_pid_control(struct flexcop_device *fc, int index, u16 pid, int onoff) { if (pid == 0x2000) return; deb_ts("setting pid: %5d %04x at index %d '%s'\n", pid, pid, index, onoff ? "on" : "off"); /* We could use bit magic here to reduce source code size. * I decided against it, but to use the real register names */ switch (index) { case 0: flexcop_pid_Stream1_PID_ctrl(fc, pid, onoff); break; case 1: flexcop_pid_Stream2_PID_ctrl(fc, pid, onoff); break; case 2: flexcop_pid_PCR_PID_ctrl(fc, pid, onoff); break; case 3: flexcop_pid_PMT_PID_ctrl(fc, pid, onoff); break; case 4: flexcop_pid_EMM_PID_ctrl(fc, pid, onoff); break; case 5: flexcop_pid_ECM_PID_ctrl(fc, pid, onoff); break; default: if (fc->has_32_hw_pid_filter && index < 38) { flexcop_ibi_value vpid, vid; /* set the index */ vid = fc->read_ibi_reg(fc, index_reg_310); vid.index_reg_310.index_reg = index - 6; fc->write_ibi_reg(fc, index_reg_310, vid); vpid = fc->read_ibi_reg(fc, pid_n_reg_314); vpid.pid_n_reg_314.PID = onoff ? pid : 0x1fff; vpid.pid_n_reg_314.PID_enable_bit = onoff; fc->write_ibi_reg(fc, pid_n_reg_314, vpid); } break; } } static int flexcop_toggle_fullts_streaming(struct flexcop_device *fc, int onoff) { if (fc->fullts_streaming_state != onoff) { deb_ts("%s full TS transfer\n",onoff ? "enabling" : "disabling"); flexcop_pid_group_filter(fc, 0, 0x1fe0 * (!onoff)); flexcop_pid_group_filter_ctrl(fc, onoff); fc->fullts_streaming_state = onoff; } return 0; } int flexcop_pid_feed_control(struct flexcop_device *fc, struct dvb_demux_feed *dvbdmxfeed, int onoff) { int max_pid_filter = 6 + fc->has_32_hw_pid_filter*32; fc->feedcount += onoff ? 1 : -1; /* the number of PIDs/Feed currently requested */ if (dvbdmxfeed->index >= max_pid_filter) fc->extra_feedcount += onoff ? 1 : -1; /* toggle complete-TS-streaming when: * - pid_filtering is not enabled and it is the first or last feed requested * - pid_filtering is enabled, * - but the number of requested feeds is exceeded * - or the requested pid is 0x2000 */ if (!fc->pid_filtering && fc->feedcount == onoff) flexcop_toggle_fullts_streaming(fc, onoff); if (fc->pid_filtering) { flexcop_pid_control \ (fc, dvbdmxfeed->index, dvbdmxfeed->pid, onoff); if (fc->extra_feedcount > 0) flexcop_toggle_fullts_streaming(fc, 1); else if (dvbdmxfeed->pid == 0x2000) flexcop_toggle_fullts_streaming(fc, onoff); else flexcop_toggle_fullts_streaming(fc, 0); } /* if it was the first or last feed request change the stream-status */ if (fc->feedcount == onoff) { flexcop_rcv_data_ctrl(fc, onoff); if (fc->stream_control) /* device specific stream control */ fc->stream_control(fc, onoff); /* feeding stopped -> reset the flexcop filter*/ if (onoff == 0) { flexcop_reset_block_300(fc); flexcop_hw_filter_init(fc); } } return 0; } EXPORT_SYMBOL(flexcop_pid_feed_control); void flexcop_hw_filter_init(struct flexcop_device *fc) { int i; flexcop_ibi_value v; for (i = 0; i < 6 + 32*fc->has_32_hw_pid_filter; i++) flexcop_pid_control(fc, i, 0x1fff, 0); flexcop_pid_group_filter(fc, 0, 0x1fe0); flexcop_pid_group_filter_ctrl(fc, 0); v = fc->read_ibi_reg(fc, pid_filter_308); v.pid_filter_308.EMM_filter_4 = 1; v.pid_filter_308.EMM_filter_6 = 0; fc->write_ibi_reg(fc, pid_filter_308, v); flexcop_null_filter_ctrl(fc, 1); }
gpl-2.0
Vajnar/linux-stable-hx4700
drivers/infiniband/hw/qib/qib_sysfs.c
50
18660
/* * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/ctype.h> #include "qib.h" /** * qib_parse_ushort - parse an unsigned short value in an arbitrary base * @str: the string containing the number * @valp: where to put the result * * Returns the number of bytes consumed, or negative value on error. */ static int qib_parse_ushort(const char *str, unsigned short *valp) { unsigned long val; char *end; int ret; if (!isdigit(str[0])) { ret = -EINVAL; goto bail; } val = simple_strtoul(str, &end, 0); if (val > 0xffff) { ret = -EINVAL; goto bail; } *valp = val; ret = end + 1 - str; if (ret == 0) ret = -EINVAL; bail: return ret; } /* start of per-port functions */ /* * Get/Set heartbeat enable. OR of 1=enabled, 2=auto */ static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf) { struct qib_devdata *dd = ppd->dd; int ret; ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT); ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret); return ret; } static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf, size_t count) { struct qib_devdata *dd = ppd->dd; int ret; u16 val; ret = qib_parse_ushort(buf, &val); /* * Set the "intentional" heartbeat enable per either of * "Enable" and "Auto", as these are normally set together. * This bit is consulted when leaving loopback mode, * because entering loopback mode overrides it and automatically * disables heartbeat. */ if (ret >= 0) ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val); if (ret < 0) qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n"); return ret < 0 ? ret : count; } static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf, size_t count) { struct qib_devdata *dd = ppd->dd; int ret = count, r; r = dd->f_set_ib_loopback(ppd, buf); if (r < 0) ret = r; return ret; } static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf, size_t count) { struct qib_devdata *dd = ppd->dd; int ret; u16 val; ret = qib_parse_ushort(buf, &val); if (ret > 0) qib_set_led_override(ppd, val); else qib_dev_err(dd, "attempt to set invalid LED override\n"); return ret < 0 ? ret : count; } static ssize_t show_status(struct qib_pportdata *ppd, char *buf) { ssize_t ret; if (!ppd->statusp) ret = -EINVAL; else ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long) *(ppd->statusp)); return ret; } /* * For userland compatibility, these offsets must remain fixed. * They are strings for QIB_STATUS_* */ static const char * const qib_status_str[] = { "Initted", "", "", "", "", "Present", "IB_link_up", "IB_configured", "", "Fatal_Hardware_Error", NULL, }; static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf) { int i, any; u64 s; ssize_t ret; if (!ppd->statusp) { ret = -EINVAL; goto bail; } s = *(ppd->statusp); *buf = '\0'; for (any = i = 0; s && qib_status_str[i]; i++) { if (s & 1) { /* if overflow */ if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) break; if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >= PAGE_SIZE) break; any = 1; } s >>= 1; } if (any) strlcat(buf, "\n", PAGE_SIZE); ret = strlen(buf); bail: return ret; } /* end of per-port functions */ /* * Start of per-port file structures and support code * Because we are fitting into other infrastructure, we have to supply the * full set of kobject/sysfs_ops structures and routines. */ #define QIB_PORT_ATTR(name, mode, show, store) \ static struct qib_port_attr qib_port_attr_##name = \ __ATTR(name, mode, show, store) struct qib_port_attr { struct attribute attr; ssize_t (*show)(struct qib_pportdata *, char *); ssize_t (*store)(struct qib_pportdata *, const char *, size_t); }; QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback); QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override); QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb, store_hrtbt_enb); QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL); QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL); static struct attribute *port_default_attributes[] = { &qib_port_attr_loopback.attr, &qib_port_attr_led_override.attr, &qib_port_attr_hrtbt_enable.attr, &qib_port_attr_status.attr, &qib_port_attr_status_str.attr, NULL }; static ssize_t qib_portattr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct qib_port_attr *pattr = container_of(attr, struct qib_port_attr, attr); struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, pport_kobj); return pattr->show(ppd, buf); } static ssize_t qib_portattr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct qib_port_attr *pattr = container_of(attr, struct qib_port_attr, attr); struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, pport_kobj); return pattr->store(ppd, buf, len); } static void qib_port_release(struct kobject *kobj) { /* nothing to do since memory is freed by qib_free_devdata() */ } static const struct sysfs_ops qib_port_ops = { .show = qib_portattr_show, .store = qib_portattr_store, }; static struct kobj_type qib_port_ktype = { .release = qib_port_release, .sysfs_ops = &qib_port_ops, .default_attrs = port_default_attributes }; /* Start sl2vl */ #define QIB_SL2VL_ATTR(N) \ static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \ .attr = { .name = __stringify(N), .mode = 0444 }, \ .sl = N \ } struct qib_sl2vl_attr { struct attribute attr; int sl; }; QIB_SL2VL_ATTR(0); QIB_SL2VL_ATTR(1); QIB_SL2VL_ATTR(2); QIB_SL2VL_ATTR(3); QIB_SL2VL_ATTR(4); QIB_SL2VL_ATTR(5); QIB_SL2VL_ATTR(6); QIB_SL2VL_ATTR(7); QIB_SL2VL_ATTR(8); QIB_SL2VL_ATTR(9); QIB_SL2VL_ATTR(10); QIB_SL2VL_ATTR(11); QIB_SL2VL_ATTR(12); QIB_SL2VL_ATTR(13); QIB_SL2VL_ATTR(14); QIB_SL2VL_ATTR(15); static struct attribute *sl2vl_default_attributes[] = { &qib_sl2vl_attr_0.attr, &qib_sl2vl_attr_1.attr, &qib_sl2vl_attr_2.attr, &qib_sl2vl_attr_3.attr, &qib_sl2vl_attr_4.attr, &qib_sl2vl_attr_5.attr, &qib_sl2vl_attr_6.attr, &qib_sl2vl_attr_7.attr, &qib_sl2vl_attr_8.attr, &qib_sl2vl_attr_9.attr, &qib_sl2vl_attr_10.attr, &qib_sl2vl_attr_11.attr, &qib_sl2vl_attr_12.attr, &qib_sl2vl_attr_13.attr, &qib_sl2vl_attr_14.attr, &qib_sl2vl_attr_15.attr, NULL }; static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct qib_sl2vl_attr *sattr = container_of(attr, struct qib_sl2vl_attr, attr); struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, sl2vl_kobj); struct qib_ibport *qibp = &ppd->ibport_data; return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]); } static const struct sysfs_ops qib_sl2vl_ops = { .show = sl2vl_attr_show, }; static struct kobj_type qib_sl2vl_ktype = { .release = qib_port_release, .sysfs_ops = &qib_sl2vl_ops, .default_attrs = sl2vl_default_attributes }; /* End sl2vl */ /* Start diag_counters */ #define QIB_DIAGC_ATTR(N) \ static struct qib_diagc_attr qib_diagc_attr_##N = { \ .attr = { .name = __stringify(N), .mode = 0664 }, \ .counter = offsetof(struct qib_ibport, n_##N) \ } struct qib_diagc_attr { struct attribute attr; size_t counter; }; QIB_DIAGC_ATTR(rc_resends); QIB_DIAGC_ATTR(rc_acks); QIB_DIAGC_ATTR(rc_qacks); QIB_DIAGC_ATTR(rc_delayed_comp); QIB_DIAGC_ATTR(seq_naks); QIB_DIAGC_ATTR(rdma_seq); QIB_DIAGC_ATTR(rnr_naks); QIB_DIAGC_ATTR(other_naks); QIB_DIAGC_ATTR(rc_timeouts); QIB_DIAGC_ATTR(loop_pkts); QIB_DIAGC_ATTR(pkt_drops); QIB_DIAGC_ATTR(dmawait); QIB_DIAGC_ATTR(unaligned); QIB_DIAGC_ATTR(rc_dupreq); QIB_DIAGC_ATTR(rc_seqnak); static struct attribute *diagc_default_attributes[] = { &qib_diagc_attr_rc_resends.attr, &qib_diagc_attr_rc_acks.attr, &qib_diagc_attr_rc_qacks.attr, &qib_diagc_attr_rc_delayed_comp.attr, &qib_diagc_attr_seq_naks.attr, &qib_diagc_attr_rdma_seq.attr, &qib_diagc_attr_rnr_naks.attr, &qib_diagc_attr_other_naks.attr, &qib_diagc_attr_rc_timeouts.attr, &qib_diagc_attr_loop_pkts.attr, &qib_diagc_attr_pkt_drops.attr, &qib_diagc_attr_dmawait.attr, &qib_diagc_attr_unaligned.attr, &qib_diagc_attr_rc_dupreq.attr, &qib_diagc_attr_rc_seqnak.attr, NULL }; static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct qib_diagc_attr *dattr = container_of(attr, struct qib_diagc_attr, attr); struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, diagc_kobj); struct qib_ibport *qibp = &ppd->ibport_data; return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); } static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t size) { struct qib_diagc_attr *dattr = container_of(attr, struct qib_diagc_attr, attr); struct qib_pportdata *ppd = container_of(kobj, struct qib_pportdata, diagc_kobj); struct qib_ibport *qibp = &ppd->ibport_data; char *endp; long val = simple_strtol(buf, &endp, 0); if (val < 0 || endp == buf) return -EINVAL; *(u32 *)((char *) qibp + dattr->counter) = val; return size; } static const struct sysfs_ops qib_diagc_ops = { .show = diagc_attr_show, .store = diagc_attr_store, }; static struct kobj_type qib_diagc_ktype = { .release = qib_port_release, .sysfs_ops = &qib_diagc_ops, .default_attrs = diagc_default_attributes }; /* End diag_counters */ /* end of per-port file structures and support code */ /* * Start of per-unit (or driver, in some cases, but replicated * per unit) functions (these get a device *) */ static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev); } static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); int ret; if (!dd->boardname) ret = -EINVAL; else ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname); return ret; } static ssize_t show_version(struct device *device, struct device_attribute *attr, char *buf) { /* The string printed here is already newline-terminated. */ return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version); } static ssize_t show_boardversion(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); /* The string printed here is already newline-terminated. */ return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion); } static ssize_t show_localbus_info(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); /* The string printed here is already newline-terminated. */ return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info); } static ssize_t show_nctxts(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); /* Return the number of user ports (contexts) available. */ /* The calculation below deals with a special case where * cfgctxts is set to 1 on a single-port board. */ return scnprintf(buf, PAGE_SIZE, "%u\n", (dd->first_user_ctxt > dd->cfgctxts) ? 0 : (dd->cfgctxts - dd->first_user_ctxt)); } static ssize_t show_nfreectxts(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); /* Return the number of free user ports (contexts) available. */ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts); } static ssize_t show_serial(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); buf[sizeof dd->serial] = '\0'; memcpy(buf, dd->serial, sizeof dd->serial); strcat(buf, "\n"); return strlen(buf); } static ssize_t store_chip_reset(struct device *device, struct device_attribute *attr, const char *buf, size_t count) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); int ret; if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) { ret = -EINVAL; goto bail; } ret = qib_reset_device(dd->unit); bail: return ret < 0 ? ret : count; } static ssize_t show_logged_errs(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); int idx, count; /* force consistency with actual EEPROM */ if (qib_update_eeprom_log(dd) != 0) return -ENXIO; count = 0; for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", dd->eep_st_errs[idx], idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' '); } return count; } /* * Dump tempsense regs. in decimal, to ease shell-scripts. */ static ssize_t show_tempsense(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); int ret; int idx; u8 regvals[8]; ret = -ENXIO; for (idx = 0; idx < 8; ++idx) { if (idx == 6) continue; ret = dd->f_tempsense_rd(dd, idx); if (ret < 0) break; regvals[idx] = ret; } if (idx == 8) ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n", *(signed char *)(regvals), *(signed char *)(regvals + 1), regvals[2], regvals[3], *(signed char *)(regvals + 5), *(signed char *)(regvals + 7)); return ret; } /* * end of per-unit (or driver, in some cases, but replicated * per unit) functions */ /* start of per-unit file structures and support code */ static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL); static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); static struct device_attribute *qib_attributes[] = { &dev_attr_hw_rev, &dev_attr_hca_type, &dev_attr_board_id, &dev_attr_version, &dev_attr_nctxts, &dev_attr_nfreectxts, &dev_attr_serial, &dev_attr_boardversion, &dev_attr_logged_errors, &dev_attr_tempsense, &dev_attr_localbus_info, &dev_attr_chip_reset, }; int qib_create_port_files(struct ib_device *ibdev, u8 port_num, struct kobject *kobj) { struct qib_pportdata *ppd; struct qib_devdata *dd = dd_from_ibdev(ibdev); int ret; if (!port_num || port_num > dd->num_pports) { qib_dev_err(dd, "Skipping infiniband class with " "invalid port %u\n", port_num); ret = -ENODEV; goto bail; } ppd = &dd->pport[port_num - 1]; ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj, "linkcontrol"); if (ret) { qib_dev_err(dd, "Skipping linkcontrol sysfs info, " "(err %d) port %u\n", ret, port_num); goto bail; } kobject_uevent(&ppd->pport_kobj, KOBJ_ADD); ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj, "sl2vl"); if (ret) { qib_dev_err(dd, "Skipping sl2vl sysfs info, " "(err %d) port %u\n", ret, port_num); goto bail_sl; } kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD); ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj, "diag_counters"); if (ret) { qib_dev_err(dd, "Skipping diag_counters sysfs info, " "(err %d) port %u\n", ret, port_num); goto bail_diagc; } kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD); return 0; bail_diagc: kobject_put(&ppd->sl2vl_kobj); bail_sl: kobject_put(&ppd->pport_kobj); bail: return ret; } /* * Register and create our files in /sys/class/infiniband. */ int qib_verbs_register_sysfs(struct qib_devdata *dd) { struct ib_device *dev = &dd->verbs_dev.ibdev; int i, ret; for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) { ret = device_create_file(&dev->dev, qib_attributes[i]); if (ret) return ret; } return 0; } /* * Unregister and remove our files in /sys/class/infiniband. */ void qib_verbs_unregister_sysfs(struct qib_devdata *dd) { struct qib_pportdata *ppd; int i; for (i = 0; i < dd->num_pports; i++) { ppd = &dd->pport[i]; kobject_put(&ppd->pport_kobj); kobject_put(&ppd->sl2vl_kobj); } }
gpl-2.0
browsxd/gunship
src/server/scripts/Northrend/ChamberOfAspects/RubySanctum/boss_general_zarithrian.cpp
50
11105
/* * Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "ScriptMgr.h" #include "ScriptedCreature.h" #include "ScriptedEscortAI.h" #include "ruby_sanctum.h" enum Texts { SAY_AGGRO = 0, // Alexstrasza has chosen capable allies.... A pity that I must END YOU! SAY_KILL = 1, // You thought you stood a chance? - It's for the best. SAY_ADDS = 2, // Turn them to ash, minions! SAY_DEATH = 3, // HALION! I... }; enum Spells { // General Zarithrian SPELL_INTIMIDATING_ROAR = 74384, SPELL_CLEAVE_ARMOR = 74367, // Zarithrian Spawn Stalker SPELL_SUMMON_FLAMECALLER = 74398, // Onyx Flamecaller SPELL_BLAST_NOVA = 74392, SPELL_LAVA_GOUT = 74394, }; enum Events { // General Zarithrian EVENT_CLEAVE = 1, EVENT_INTIDMDATING_ROAR = 2, EVENT_SUMMON_ADDS = 3, // Onyx Flamecaller EVENT_BLAST_NOVA = 4, EVENT_LAVA_GOUT = 5, }; uint32 const MAX_PATH_FLAMECALLER_WAYPOINTS = 12; Position const FlamecallerWaypoints[MAX_PATH_FLAMECALLER_WAYPOINTS*2] = { // East {3042.971f, 419.8809f, 86.94320f, 0.0f}, {3043.971f, 419.8809f, 86.94320f, 0.0f}, {3044.885f, 428.8281f, 86.19320f, 0.0f}, {3045.494f, 434.7930f, 85.56398f, 0.0f}, {3045.900f, 438.7695f, 84.81398f, 0.0f}, {3045.657f, 456.8290f, 85.95601f, 0.0f}, {3043.657f, 459.0790f, 87.20601f, 0.0f}, {3042.157f, 460.5790f, 87.70601f, 0.0f}, {3040.907f, 462.0790f, 88.45601f, 0.0f}, {3038.907f, 464.0790f, 89.20601f, 0.0f}, {3025.907f, 478.0790f, 89.70601f, 0.0f}, {3003.832f, 501.2510f, 89.47303f, 0.0f}, // West {3062.596f, 636.9980f, 82.50338f, 0.0f}, {3062.514f, 624.9980f, 83.70634f, 0.0f}, {3062.486f, 620.9980f, 84.33134f, 0.0f}, {3062.445f, 613.9930f, 84.45634f, 0.0f}, {3062.445f, 613.9930f, 84.45634f, 0.0f}, {3059.208f, 610.6501f, 85.39581f, 0.0f}, {3055.958f, 606.9001f, 86.14581f, 0.0f}, {3046.458f, 596.4001f, 86.89581f, 0.0f}, {3043.958f, 593.4001f, 87.64581f, 0.0f}, {3040.458f, 589.9001f, 88.39581f, 0.0f}, {3034.458f, 583.1501f, 88.89581f, 0.0f}, {3014.970f, 561.8073f, 88.83527f, 0.0f}, }; class boss_general_zarithrian : public CreatureScript { public: boss_general_zarithrian() : CreatureScript("boss_general_zarithrian") { } struct boss_general_zarithrianAI : public BossAI { boss_general_zarithrianAI(Creature* creature) : BossAI(creature, DATA_GENERAL_ZARITHRIAN) { } void Reset() { _Reset(); if (instance->GetBossState(DATA_SAVIANA_RAGEFIRE) == DONE && instance->GetBossState(DATA_BALTHARUS_THE_WARBORN) == DONE) me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_IMMUNE_TO_PC); } void EnterCombat(Unit* /*who*/) { _EnterCombat(); Talk(SAY_AGGRO); events.Reset(); events.ScheduleEvent(EVENT_CLEAVE, 15000); events.ScheduleEvent(EVENT_INTIDMDATING_ROAR, 42000); events.ScheduleEvent(EVENT_SUMMON_ADDS, 40000); } void JustReachedHome() { _JustReachedHome(); instance->SetBossState(DATA_GENERAL_ZARITHRIAN, FAIL); } // Override to not set adds in combat yet. void JustSummoned(Creature* summon) { summons.Summon(summon); } void JustDied(Unit* /*killer*/) { _JustDied(); Talk(SAY_DEATH); } void KilledUnit(Unit* victim) { if (victim->GetTypeId() == TYPEID_PLAYER) Talk(SAY_KILL); } bool CanAIAttack(Unit const* /*target*/) const { return (instance->GetBossState(DATA_SAVIANA_RAGEFIRE) == DONE && instance->GetBossState(DATA_BALTHARUS_THE_WARBORN) == DONE); } void UpdateAI(uint32 diff) { if (!UpdateVictim()) return; // Can't use room boundary here, the gameobject is spawned at the same position as the boss. This is just as good anyway. if (me->GetPositionX() > 3058.0f) { EnterEvadeMode(); return; } events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = events.ExecuteEvent()) { switch (eventId) { case EVENT_SUMMON_ADDS: { if (Creature* stalker1 = ObjectAccessor::GetCreature(*me, instance->GetData64(DATA_ZARITHRIAN_SPAWN_STALKER_1))) stalker1->CastSpell(stalker1, SPELL_SUMMON_FLAMECALLER, false); if (Creature* stalker2 = ObjectAccessor::GetCreature(*me, instance->GetData64(DATA_ZARITHRIAN_SPAWN_STALKER_2))) stalker2->CastSpell(stalker2, SPELL_SUMMON_FLAMECALLER, false); Talk(SAY_ADDS); events.ScheduleEvent(EVENT_SUMMON_ADDS, 42000); break; } case EVENT_INTIDMDATING_ROAR: DoCast(me, SPELL_INTIMIDATING_ROAR, true); events.ScheduleEvent(EVENT_INTIDMDATING_ROAR, 42000); case EVENT_CLEAVE: DoCastVictim(SPELL_CLEAVE_ARMOR); events.ScheduleEvent(EVENT_CLEAVE, 15000); break; default: break; } } DoMeleeAttackIfReady(); } }; CreatureAI* GetAI(Creature* creature) const { return GetRubySanctumAI<boss_general_zarithrianAI>(creature); } }; class npc_onyx_flamecaller : public CreatureScript { public: npc_onyx_flamecaller() : CreatureScript("npc_onyx_flamecaller") { } struct npc_onyx_flamecallerAI : public npc_escortAI { npc_onyx_flamecallerAI(Creature* creature) : npc_escortAI(creature), _instance(creature->GetInstanceScript()) { npc_escortAI::SetDespawnAtEnd(false); } void Reset() { _lavaGoutCount = 0; me->setActive(true); AddWaypoints(); Start(true, true); } void EnterCombat(Unit* /*who*/) { _events.Reset(); _events.ScheduleEvent(EVENT_BLAST_NOVA, urand(20000, 30000)); _events.ScheduleEvent(EVENT_LAVA_GOUT, 5000); } void EnterEvadeMode() { // Prevent EvadeMode } void IsSummonedBy(Unit* /*summoner*/) { // Let Zarithrian count as summoner. _instance cant be null since we got GetRubySanctumAI if (Creature* zarithrian = ObjectAccessor::GetCreature(*me, _instance->GetData64(DATA_GENERAL_ZARITHRIAN))) zarithrian->AI()->JustSummoned(me); } void WaypointReached(uint32 waypointId) { if (waypointId == MAX_PATH_FLAMECALLER_WAYPOINTS || waypointId == MAX_PATH_FLAMECALLER_WAYPOINTS*2) { DoZoneInCombat(); SetEscortPaused(true); } } void AddWaypoints() { if (me->GetPositionY() < 500.0f) { for (uint8 i = 0; i < MAX_PATH_FLAMECALLER_WAYPOINTS; i++) AddWaypoint(i, FlamecallerWaypoints[i].GetPositionX(), FlamecallerWaypoints[i].GetPositionY(), FlamecallerWaypoints[i].GetPositionZ()); } else { for (uint8 i = 0, j = MAX_PATH_FLAMECALLER_WAYPOINTS; j < MAX_PATH_FLAMECALLER_WAYPOINTS*2; j++, i++) AddWaypoint(i, FlamecallerWaypoints[j].GetPositionX(), FlamecallerWaypoints[j].GetPositionY(), FlamecallerWaypoints[j].GetPositionZ()); } } void UpdateEscortAI(uint32 const diff) { if (!UpdateVictim()) return; _events.Update(diff); if (me->HasUnitState(UNIT_STATE_CASTING)) return; while (uint32 eventId = _events.ExecuteEvent()) { switch (eventId) { case EVENT_BLAST_NOVA: DoCastAOE(SPELL_BLAST_NOVA); _events.ScheduleEvent(EVENT_BLAST_NOVA, urand(20000, 30000)); break; case EVENT_LAVA_GOUT: if (_lavaGoutCount >= 3) { _lavaGoutCount = 0; _events.ScheduleEvent(EVENT_LAVA_GOUT, 8000); break; } DoCastVictim(SPELL_LAVA_GOUT); _lavaGoutCount++; _events.ScheduleEvent(EVENT_LAVA_GOUT, 1500); break; default: break; } } DoMeleeAttackIfReady(); } private: EventMap _events; InstanceScript* _instance; uint8 _lavaGoutCount; }; CreatureAI* GetAI(Creature* creature) const { return GetRubySanctumAI<npc_onyx_flamecallerAI>(creature); } }; void AddSC_boss_general_zarithrian() { new boss_general_zarithrian(); new npc_onyx_flamecaller(); }
gpl-2.0
dh-electronics/linux-imx25
drivers/rtc/rtc-mpc5121.c
50
10774
/* * Real-time clock driver for MPC5121 * * Copyright 2007, Domen Puncer <domen.puncer@telargo.com> * Copyright 2008, Freescale Semiconductor, Inc. All rights reserved. * Copyright 2011, Dmitry Eremin-Solenikov * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/rtc.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/io.h> #include <linux/slab.h> struct mpc5121_rtc_regs { u8 set_time; /* RTC + 0x00 */ u8 hour_set; /* RTC + 0x01 */ u8 minute_set; /* RTC + 0x02 */ u8 second_set; /* RTC + 0x03 */ u8 set_date; /* RTC + 0x04 */ u8 month_set; /* RTC + 0x05 */ u8 weekday_set; /* RTC + 0x06 */ u8 date_set; /* RTC + 0x07 */ u8 write_sw; /* RTC + 0x08 */ u8 sw_set; /* RTC + 0x09 */ u16 year_set; /* RTC + 0x0a */ u8 alm_enable; /* RTC + 0x0c */ u8 alm_hour_set; /* RTC + 0x0d */ u8 alm_min_set; /* RTC + 0x0e */ u8 int_enable; /* RTC + 0x0f */ u8 reserved1; u8 hour; /* RTC + 0x11 */ u8 minute; /* RTC + 0x12 */ u8 second; /* RTC + 0x13 */ u8 month; /* RTC + 0x14 */ u8 wday_mday; /* RTC + 0x15 */ u16 year; /* RTC + 0x16 */ u8 int_alm; /* RTC + 0x18 */ u8 int_sw; /* RTC + 0x19 */ u8 alm_status; /* RTC + 0x1a */ u8 sw_minute; /* RTC + 0x1b */ u8 bus_error_1; /* RTC + 0x1c */ u8 int_day; /* RTC + 0x1d */ u8 int_min; /* RTC + 0x1e */ u8 int_sec; /* RTC + 0x1f */ /* * target_time: * intended to be used for hibernation but hibernation * does not work on silicon rev 1.5 so use it for non-volatile * storage of offset between the actual_time register and linux * time */ u32 target_time; /* RTC + 0x20 */ /* * actual_time: * readonly time since VBAT_RTC was last connected */ u32 actual_time; /* RTC + 0x24 */ u32 keep_alive; /* RTC + 0x28 */ }; struct mpc5121_rtc_data { unsigned irq; unsigned irq_periodic; struct mpc5121_rtc_regs __iomem *regs; struct rtc_device *rtc; struct rtc_wkalrm wkalarm; }; /* * Update second/minute/hour registers. * * This is just so alarm will work. */ static void mpc5121_rtc_update_smh(struct mpc5121_rtc_regs __iomem *regs, struct rtc_time *tm) { out_8(&regs->second_set, tm->tm_sec); out_8(&regs->minute_set, tm->tm_min); out_8(&regs->hour_set, tm->tm_hour); /* set time sequence */ out_8(&regs->set_time, 0x1); out_8(&regs->set_time, 0x3); out_8(&regs->set_time, 0x1); out_8(&regs->set_time, 0x0); } static int mpc5121_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; unsigned long now; /* * linux time is actual_time plus the offset saved in target_time */ now = in_be32(&regs->actual_time) + in_be32(&regs->target_time); rtc_time_to_tm(now, tm); /* * update second minute hour registers * so alarms will work */ mpc5121_rtc_update_smh(regs, tm); return rtc_valid_tm(tm); } static int mpc5121_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; int ret; unsigned long now; /* * The actual_time register is read only so we write the offset * between it and linux time to the target_time register. */ ret = rtc_tm_to_time(tm, &now); if (ret == 0) out_be32(&regs->target_time, now - in_be32(&regs->actual_time)); /* * update second minute hour registers * so alarms will work */ mpc5121_rtc_update_smh(regs, tm); return 0; } static int mpc5200_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; int tmp; tm->tm_sec = in_8(&regs->second); tm->tm_min = in_8(&regs->minute); /* 12 hour format? */ if (in_8(&regs->hour) & 0x20) tm->tm_hour = (in_8(&regs->hour) >> 1) + (in_8(&regs->hour) & 1 ? 12 : 0); else tm->tm_hour = in_8(&regs->hour); tmp = in_8(&regs->wday_mday); tm->tm_mday = tmp & 0x1f; tm->tm_mon = in_8(&regs->month) - 1; tm->tm_year = in_be16(&regs->year) - 1900; tm->tm_wday = (tmp >> 5) % 7; tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); tm->tm_isdst = 0; return 0; } static int mpc5200_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; mpc5121_rtc_update_smh(regs, tm); /* date */ out_8(&regs->month_set, tm->tm_mon + 1); out_8(&regs->weekday_set, tm->tm_wday ? tm->tm_wday : 7); out_8(&regs->date_set, tm->tm_mday); out_be16(&regs->year_set, tm->tm_year + 1900); /* set date sequence */ out_8(&regs->set_date, 0x1); out_8(&regs->set_date, 0x3); out_8(&regs->set_date, 0x1); out_8(&regs->set_date, 0x0); return 0; } static int mpc5121_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; *alarm = rtc->wkalarm; alarm->pending = in_8(&regs->alm_status); return 0; } static int mpc5121_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; /* * the alarm has no seconds so deal with it */ if (alarm->time.tm_sec) { alarm->time.tm_sec = 0; alarm->time.tm_min++; if (alarm->time.tm_min >= 60) { alarm->time.tm_min = 0; alarm->time.tm_hour++; if (alarm->time.tm_hour >= 24) alarm->time.tm_hour = 0; } } alarm->time.tm_mday = -1; alarm->time.tm_mon = -1; alarm->time.tm_year = -1; out_8(&regs->alm_min_set, alarm->time.tm_min); out_8(&regs->alm_hour_set, alarm->time.tm_hour); out_8(&regs->alm_enable, alarm->enabled); rtc->wkalarm = *alarm; return 0; } static irqreturn_t mpc5121_rtc_handler(int irq, void *dev) { struct mpc5121_rtc_data *rtc = dev_get_drvdata((struct device *)dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; if (in_8(&regs->int_alm)) { /* acknowledge and clear status */ out_8(&regs->int_alm, 1); out_8(&regs->alm_status, 1); rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF); return IRQ_HANDLED; } return IRQ_NONE; } static irqreturn_t mpc5121_rtc_handler_upd(int irq, void *dev) { struct mpc5121_rtc_data *rtc = dev_get_drvdata((struct device *)dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; if (in_8(&regs->int_sec) && (in_8(&regs->int_enable) & 0x1)) { /* acknowledge */ out_8(&regs->int_sec, 1); rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_UF); return IRQ_HANDLED; } return IRQ_NONE; } static int mpc5121_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; int val; if (enabled) val = 1; else val = 0; out_8(&regs->alm_enable, val); rtc->wkalarm.enabled = val; return 0; } static const struct rtc_class_ops mpc5121_rtc_ops = { .read_time = mpc5121_rtc_read_time, .set_time = mpc5121_rtc_set_time, .read_alarm = mpc5121_rtc_read_alarm, .set_alarm = mpc5121_rtc_set_alarm, .alarm_irq_enable = mpc5121_rtc_alarm_irq_enable, }; static const struct rtc_class_ops mpc5200_rtc_ops = { .read_time = mpc5200_rtc_read_time, .set_time = mpc5200_rtc_set_time, .read_alarm = mpc5121_rtc_read_alarm, .set_alarm = mpc5121_rtc_set_alarm, .alarm_irq_enable = mpc5121_rtc_alarm_irq_enable, }; static int __devinit mpc5121_rtc_probe(struct platform_device *op) { struct mpc5121_rtc_data *rtc; int err = 0; rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); if (!rtc) return -ENOMEM; rtc->regs = of_iomap(op->dev.of_node, 0); if (!rtc->regs) { dev_err(&op->dev, "%s: couldn't map io space\n", __func__); err = -ENOSYS; goto out_free; } device_init_wakeup(&op->dev, 1); dev_set_drvdata(&op->dev, rtc); rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1); err = request_irq(rtc->irq, mpc5121_rtc_handler, IRQF_DISABLED, "mpc5121-rtc", &op->dev); if (err) { dev_err(&op->dev, "%s: could not request irq: %i\n", __func__, rtc->irq); goto out_dispose; } rtc->irq_periodic = irq_of_parse_and_map(op->dev.of_node, 0); err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd, IRQF_DISABLED, "mpc5121-rtc_upd", &op->dev); if (err) { dev_err(&op->dev, "%s: could not request irq: %i\n", __func__, rtc->irq_periodic); goto out_dispose2; } if (of_device_is_compatible(op->dev.of_node, "fsl,mpc5121-rtc")) { u32 ka; ka = in_be32(&rtc->regs->keep_alive); if (ka & 0x02) { dev_warn(&op->dev, "mpc5121-rtc: Battery or oscillator failure!\n"); out_be32(&rtc->regs->keep_alive, ka); } rtc->rtc = rtc_device_register("mpc5121-rtc", &op->dev, &mpc5121_rtc_ops, THIS_MODULE); } else { rtc->rtc = rtc_device_register("mpc5200-rtc", &op->dev, &mpc5200_rtc_ops, THIS_MODULE); } rtc->rtc->uie_unsupported = 1; if (IS_ERR(rtc->rtc)) { err = PTR_ERR(rtc->rtc); goto out_free_irq; } return 0; out_free_irq: free_irq(rtc->irq_periodic, &op->dev); out_dispose2: irq_dispose_mapping(rtc->irq_periodic); free_irq(rtc->irq, &op->dev); out_dispose: irq_dispose_mapping(rtc->irq); iounmap(rtc->regs); out_free: kfree(rtc); return err; } static int __devexit mpc5121_rtc_remove(struct platform_device *op) { struct mpc5121_rtc_data *rtc = dev_get_drvdata(&op->dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; /* disable interrupt, so there are no nasty surprises */ out_8(&regs->alm_enable, 0); out_8(&regs->int_enable, in_8(&regs->int_enable) & ~0x1); rtc_device_unregister(rtc->rtc); iounmap(rtc->regs); free_irq(rtc->irq, &op->dev); free_irq(rtc->irq_periodic, &op->dev); irq_dispose_mapping(rtc->irq); irq_dispose_mapping(rtc->irq_periodic); dev_set_drvdata(&op->dev, NULL); kfree(rtc); return 0; } static struct of_device_id mpc5121_rtc_match[] __devinitdata = { { .compatible = "fsl,mpc5121-rtc", }, { .compatible = "fsl,mpc5200-rtc", }, {}, }; static struct platform_driver mpc5121_rtc_driver = { .driver = { .name = "mpc5121-rtc", .owner = THIS_MODULE, .of_match_table = mpc5121_rtc_match, }, .probe = mpc5121_rtc_probe, .remove = __devexit_p(mpc5121_rtc_remove), }; static int __init mpc5121_rtc_init(void) { return platform_driver_register(&mpc5121_rtc_driver); } module_init(mpc5121_rtc_init); static void __exit mpc5121_rtc_exit(void) { platform_driver_unregister(&mpc5121_rtc_driver); } module_exit(mpc5121_rtc_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Rigby <jcrigby@gmail.com>");
gpl-2.0
AndroidDeveloperAlliance/kernel_mapphone_kexec
drivers/net/wireless/bcmdhd/dhd_linux.c
50
135653
/* * Broadcom Dongle Host Driver (DHD), Linux-specific network interface * Basically selected code segments from usb-cdc.c and usb-rndis.c * * Copyright (C) 1999-2011, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: dhd_linux.c 333885 2012-05-18 00:39:03Z $ */ #include <typedefs.h> #include <linuxver.h> #include <osl.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> #include <linux/random.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/fcntl.h> #include <linux/fs.h> #include <asm/uaccess.h> #include <asm/unaligned.h> #include <epivers.h> #include <bcmutils.h> #include <bcmendian.h> #include <bcmdevs.h> #include <proto/ethernet.h> #include <dngl_stats.h> #include <dhd.h> #include <dhd_bus.h> #include <dhd_proto.h> #include <dhd_dbg.h> #ifdef CONFIG_HAS_WAKELOCK #include <linux/wakelock.h> #endif #ifdef WL_CFG80211 #include <wl_cfg80211.h> #endif #include <proto/802.11_bta.h> #include <proto/bt_amp_hci.h> #include <dhd_bta.h> #ifdef WLMEDIA_HTSF #include <linux/time.h> #include <htsf.h> #define HTSF_MINLEN 200 /* min. packet length to timestamp */ #define HTSF_BUS_DELAY 150 /* assume a fix propagation in us */ #define TSMAX 1000 /* max no. of timing record kept */ #define NUMBIN 34 static uint32 tsidx = 0; static uint32 htsf_seqnum = 0; uint32 tsfsync; struct timeval tsync; static uint32 tsport = 5010; typedef struct histo_ { uint32 bin[NUMBIN]; } histo_t; #if !ISPOWEROF2(DHD_SDALIGN) #error DHD_SDALIGN is not a power of 2! #endif static histo_t vi_d1, vi_d2, vi_d3, vi_d4; #endif /* WLMEDIA_HTSF */ #if defined(SOFTAP) extern bool ap_cfg_running; extern bool ap_fw_loaded; #endif /* enable HOSTIP cache update from the host side when an eth0:N is up */ #define AOE_IP_ALIAS_SUPPORT 1 #ifdef PROP_TXSTATUS #include <wlfc_proto.h> #include <dhd_wlfc.h> #endif #include <wl_android.h> #ifdef ARP_OFFLOAD_SUPPORT void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add); static int dhd_device_event(struct notifier_block *this, unsigned long event, void *ptr); static struct notifier_block dhd_notifier = { .notifier_call = dhd_device_event }; #endif /* ARP_OFFLOAD_SUPPORT */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) #include <linux/suspend.h> volatile bool dhd_mmc_suspend = FALSE; DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ #if defined(OOB_INTR_ONLY) extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable); #endif /* defined(OOB_INTR_ONLY) */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) static void dhd_hang_process(struct work_struct *work); #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) MODULE_LICENSE("GPL v2"); #endif /* LinuxVer */ #include <dhd_bus.h> #ifndef PROP_TXSTATUS #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen) #else #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128) #endif #if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) const char * print_tainted() { return ""; } #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 15) */ /* Linux wireless extension support */ #if defined(WL_WIRELESS_EXT) #include <wl_iw.h> extern wl_iw_extra_params_t g_wl_iw_params; #endif /* defined(WL_WIRELESS_EXT) */ #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) #include <linux/earlysuspend.h> #endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ extern int dhd_get_dtim_skip(dhd_pub_t *dhd); #ifdef PKT_FILTER_SUPPORT extern void dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg); extern void dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode); #endif /* Interface control information */ typedef struct dhd_if { struct dhd_info *info; /* back pointer to dhd_info */ /* OS/stack specifics */ struct net_device *net; struct net_device_stats stats; int idx; /* iface idx in dongle */ dhd_if_state_t state; /* interface state */ uint subunit; /* subunit */ uint8 mac_addr[ETHER_ADDR_LEN]; /* assigned MAC address */ bool attached; /* Delayed attachment when unset */ bool txflowcontrol; /* Per interface flow control indicator */ char name[IFNAMSIZ+1]; /* linux interface name */ uint8 bssidx; /* bsscfg index for the interface */ bool set_multicast; } dhd_if_t; #ifdef WLMEDIA_HTSF typedef struct { uint32 low; uint32 high; } tsf_t; typedef struct { uint32 last_cycle; uint32 last_sec; uint32 last_tsf; uint32 coef; /* scaling factor */ uint32 coefdec1; /* first decimal */ uint32 coefdec2; /* second decimal */ } htsf_t; typedef struct { uint32 t1; uint32 t2; uint32 t3; uint32 t4; } tstamp_t; static tstamp_t ts[TSMAX]; static tstamp_t maxdelayts; static uint32 maxdelay = 0, tspktcnt = 0, maxdelaypktno = 0; #endif /* WLMEDIA_HTSF */ /* Local private structure (extension of pub) */ typedef struct dhd_info { #if defined(WL_WIRELESS_EXT) wl_iw_t iw; /* wireless extensions state (must be first) */ #endif /* defined(WL_WIRELESS_EXT) */ dhd_pub_t pub; /* For supporting multiple interfaces */ dhd_if_t *iflist[DHD_MAX_IFS]; struct semaphore proto_sem; #ifdef PROP_TXSTATUS spinlock_t wlfc_spinlock; #endif /* PROP_TXSTATUS */ #ifdef WLMEDIA_HTSF htsf_t htsf; #endif wait_queue_head_t ioctl_resp_wait; struct timer_list timer; bool wd_timer_valid; struct tasklet_struct tasklet; spinlock_t sdlock; spinlock_t txqlock; spinlock_t dhd_lock; #ifdef DHDTHREAD /* Thread based operation */ bool threads_only; struct semaphore sdsem; tsk_ctl_t thr_dpc_ctl; tsk_ctl_t thr_wdt_ctl; #else bool dhd_tasklet_create; #endif /* DHDTHREAD */ tsk_ctl_t thr_sysioc_ctl; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) struct work_struct work_hang; #endif /* Wakelocks */ #if defined(CONFIG_HAS_WAKELOCK) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) struct wake_lock wl_wifi; /* Wifi wakelock */ struct wake_lock wl_rxwake; /* Wifi rx wakelock */ struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) /* net_device interface lock, prevent race conditions among net_dev interface * calls and wifi_on or wifi_off */ struct mutex dhd_net_if_mutex; struct mutex dhd_suspend_mutex; #endif spinlock_t wakelock_spinlock; int wakelock_counter; int wakelock_rx_timeout_enable; int wakelock_ctrl_timeout_enable; /* Thread to issue ioctl for multicast */ bool set_macaddress; struct ether_addr macvalue; wait_queue_head_t ctrl_wait; atomic_t pend_8021x_cnt; dhd_attach_states_t dhd_state; #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) struct early_suspend early_suspend; #endif /* CONFIG_HAS_EARLYSUSPEND */ #ifdef ARP_OFFLOAD_SUPPORT u32 pend_ipaddr; #endif /* ARP_OFFLOAD_SUPPORT */ } dhd_info_t; /* Definitions to provide path to the firmware and nvram * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt" */ char firmware_path[MOD_PARAM_PATHLEN]; char nvram_path[MOD_PARAM_PATHLEN]; int op_mode = 0; module_param(op_mode, int, 0644); extern int wl_control_wl_start(struct net_device *dev); extern int net_os_send_hang_message(struct net_device *dev); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) struct semaphore dhd_registration_sem; #define DHD_REGISTRATION_TIMEOUT 12000 /* msec : allowed time to finished dhd registration */ #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ /* Spawn a thread for system ioctls (set mac, set mcast) */ uint dhd_sysioc = TRUE; module_param(dhd_sysioc, uint, 0); /* Error bits */ module_param(dhd_msg_level, int, 0); /* load firmware and/or nvram values from the filesystem */ module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660); module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0); /* Watchdog interval */ uint dhd_watchdog_ms = 10; module_param(dhd_watchdog_ms, uint, 0); #if defined(DHD_DEBUG) /* Console poll interval */ uint dhd_console_ms = 0; module_param(dhd_console_ms, uint, 0644); #endif /* defined(DHD_DEBUG) */ /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */ uint dhd_arp_mode = 0xb; module_param(dhd_arp_mode, uint, 0); /* ARP offload enable */ uint dhd_arp_enable = TRUE; module_param(dhd_arp_enable, uint, 0); /* Global Pkt filter enable control */ uint dhd_pkt_filter_enable = TRUE; module_param(dhd_pkt_filter_enable, uint, 0); /* Pkt filter init setup */ uint dhd_pkt_filter_init = 0; module_param(dhd_pkt_filter_init, uint, 0); /* Pkt filter mode control */ uint dhd_master_mode = TRUE; module_param(dhd_master_mode, uint, 0); #ifdef DHDTHREAD /* Watchdog thread priority, -1 to use kernel timer */ int dhd_watchdog_prio = 97; module_param(dhd_watchdog_prio, int, 0); /* DPC thread priority, -1 to use tasklet */ int dhd_dpc_prio = 98; module_param(dhd_dpc_prio, int, 0); /* DPC thread priority, -1 to use tasklet */ extern int dhd_dongle_memsize; module_param(dhd_dongle_memsize, int, 0); #endif /* DHDTHREAD */ /* Control fw roaming */ uint dhd_roam_disable = 0; /* Control radio state */ uint dhd_radio_up = 1; /* Network inteface name */ char iface_name[IFNAMSIZ] = {'\0'}; module_param_string(iface_name, iface_name, IFNAMSIZ, 0); /* The following are specific to the SDIO dongle */ /* IOCTL response timeout */ int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT; /* Idle timeout for backplane clock */ int dhd_idletime = DHD_IDLETIME_TICKS; module_param(dhd_idletime, int, 0); /* Use polling */ uint dhd_poll = FALSE; module_param(dhd_poll, uint, 0); /* Use interrupts */ uint dhd_intr = TRUE; module_param(dhd_intr, uint, 0); /* SDIO Drive Strength (in milliamps) */ uint dhd_sdiod_drive_strength = 6; module_param(dhd_sdiod_drive_strength, uint, 0); /* Tx/Rx bounds */ extern uint dhd_txbound; extern uint dhd_rxbound; module_param(dhd_txbound, uint, 0); module_param(dhd_rxbound, uint, 0); /* Deferred transmits */ extern uint dhd_deferred_tx; module_param(dhd_deferred_tx, uint, 0); #ifdef BCMDBGFS extern void dhd_dbg_init(dhd_pub_t *dhdp); extern void dhd_dbg_remove(void); #endif /* BCMDBGFS */ #ifdef SDTEST /* Echo packet generator (pkts/s) */ uint dhd_pktgen = 0; module_param(dhd_pktgen, uint, 0); /* Echo packet len (0 => sawtooth, max 2040) */ uint dhd_pktgen_len = 0; module_param(dhd_pktgen_len, uint, 0); #endif /* SDTEST */ /* Version string to report */ #ifdef DHD_DEBUG #ifndef SRCBASE #define SRCBASE "drivers/net/wireless/bcmdhd" #endif #define DHD_COMPILED "\nCompiled in " SRCBASE #else #define DHD_COMPILED #endif /* DHD_DEBUG */ static char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR #ifdef DHD_DEBUG "\nCompiled in " SRCBASE " on " __DATE__ " at " __TIME__ #endif ; static void dhd_net_if_lock_local(dhd_info_t *dhd); static void dhd_net_if_unlock_local(dhd_info_t *dhd); static void dhd_suspend_lock(dhd_pub_t *dhdp); static void dhd_suspend_unlock(dhd_pub_t *dhdp); #if !defined(AP) && defined(WLP2P) && defined(WL_ENABLE_P2P_IF) static u32 dhd_concurrent_fw(dhd_pub_t *dhd); #endif #ifdef WLMEDIA_HTSF void htsf_update(dhd_info_t *dhd, void *data); tsf_t prev_tsf, cur_tsf; uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx); static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx); static void dhd_dump_latency(void); static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf); static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf); static void dhd_dump_htsfhisto(histo_t *his, char *s); #endif /* WLMEDIA_HTSF */ /* Monitor interface */ int dhd_monitor_init(void *dhd_pub); int dhd_monitor_uninit(void); #if defined(WL_WIRELESS_EXT) struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev); #endif /* defined(WL_WIRELESS_EXT) */ static void dhd_dpc(ulong data); /* forward decl */ extern int dhd_wait_pend8021x(struct net_device *dev); #ifdef TOE #ifndef BDC #error TOE requires BDC #endif /* !BDC */ static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol); static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol); #endif /* TOE */ static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, wl_event_msg_t *event_ptr, void **data_ptr); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) static int dhd_sleep_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored) { int ret = NOTIFY_DONE; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39)) switch (action) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: dhd_mmc_suspend = TRUE; ret = NOTIFY_OK; break; case PM_POST_HIBERNATION: case PM_POST_SUSPEND: dhd_mmc_suspend = FALSE; ret = NOTIFY_OK; break; } smp_mb(); #endif return ret; } static struct notifier_block dhd_sleep_pm_notifier = { .notifier_call = dhd_sleep_pm_callback, .priority = 10 }; extern int register_pm_notifier(struct notifier_block *nb); extern int unregister_pm_notifier(struct notifier_block *nb); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ static void dhd_set_packet_filter(int value, dhd_pub_t *dhd) { #ifdef PKT_FILTER_SUPPORT DHD_TRACE(("%s: %d\n", __FUNCTION__, value)); /* 1 - Enable packet filter, only allow unicast packet to send up */ /* 0 - Disable packet filter */ if (dhd_pkt_filter_enable && (!value || (dhd_check_ap_wfd_mode_set(dhd) == FALSE))) { int i; for (i = 0; i < dhd->pktfilter_count; i++) { dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]); dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], value, dhd_master_mode); } } #endif } static int dhd_set_suspend(int value, dhd_pub_t *dhd) { int power_mode = PM_MAX; /* wl_pkt_filter_enable_t enable_parm; */ char iovbuf[32]; int bcn_li_dtim = 3; uint roamvar = 1; DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n", __FUNCTION__, value, dhd->in_suspend)); dhd_suspend_lock(dhd); if (dhd && dhd->up) { if (value && dhd->in_suspend) { /* Kernel suspended */ DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__)); dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); /* Enable packet filter, only allow unicast packet to send up */ dhd_set_packet_filter(1, dhd); /* If DTIM skip is set up as default, force it to wake * each third DTIM for better power savings. Note that * one side effect is a chance to miss BC/MC packet. */ bcn_li_dtim = dhd_get_dtim_skip(dhd); bcm_mkiovar("bcn_li_dtim", (char *)&bcn_li_dtim, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); /* Disable firmware roaming during suspend */ bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); } else { /* Kernel resumed */ DHD_TRACE(("%s: Remove extra suspend setting \n", __FUNCTION__)); power_mode = PM_FAST; dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); /* disable pkt filter */ dhd_set_packet_filter(0, dhd); /* restore pre-suspend setting for dtim_skip */ bcm_mkiovar("bcn_li_dtim", (char *)&dhd->dtim_skip, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); roamvar = dhd_roam_disable; bcm_mkiovar("roam_off", (char *)&roamvar, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); } } dhd_suspend_unlock(dhd); return 0; } static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force) { dhd_pub_t *dhdp = &dhd->pub; int ret = 0; DHD_OS_WAKE_LOCK(dhdp); /* Set flag when early suspend was called */ dhdp->in_suspend = val; if ((force || !dhdp->suspend_disable_flag) && (dhd_check_ap_wfd_mode_set(dhdp) == FALSE)) { ret = dhd_set_suspend(val, dhdp); } DHD_OS_WAKE_UNLOCK(dhdp); return ret; } #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) static void dhd_early_suspend(struct early_suspend *h) { struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); DHD_TRACE(("%s: enter\n", __FUNCTION__)); if (dhd) dhd_suspend_resume_helper(dhd, 1, 0); } static void dhd_late_resume(struct early_suspend *h) { struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend); DHD_TRACE(("%s: enter\n", __FUNCTION__)); if (dhd) dhd_suspend_resume_helper(dhd, 0, 0); } #endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ /* * Generalized timeout mechanism. Uses spin sleep with exponential back-off until * the sleep time reaches one jiffy, then switches over to task delay. Usage: * * dhd_timeout_start(&tmo, usec); * while (!dhd_timeout_expired(&tmo)) * if (poll_something()) * break; * if (dhd_timeout_expired(&tmo)) * fatal(); */ void dhd_timeout_start(dhd_timeout_t *tmo, uint usec) { tmo->limit = usec; tmo->increment = 0; tmo->elapsed = 0; tmo->tick = 1000000 / HZ; } int dhd_timeout_expired(dhd_timeout_t *tmo) { /* Does nothing the first call */ if (tmo->increment == 0) { tmo->increment = 1; return 0; } if (tmo->elapsed >= tmo->limit) return 1; /* Add the delay that's about to take place */ tmo->elapsed += tmo->increment; if (tmo->increment < tmo->tick) { OSL_DELAY(tmo->increment); tmo->increment *= 2; if (tmo->increment > tmo->tick) tmo->increment = tmo->tick; } else { wait_queue_head_t delay_wait; DECLARE_WAITQUEUE(wait, current); init_waitqueue_head(&delay_wait); add_wait_queue(&delay_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); remove_wait_queue(&delay_wait, &wait); set_current_state(TASK_RUNNING); } return 0; } int dhd_net2idx(dhd_info_t *dhd, struct net_device *net) { int i = 0; ASSERT(dhd); while (i < DHD_MAX_IFS) { if (dhd->iflist[i] && (dhd->iflist[i]->net == net)) return i; i++; } return DHD_BAD_IF; } struct net_device * dhd_idx2net(void *pub, int ifidx) { struct dhd_pub *dhd_pub = (struct dhd_pub *)pub; struct dhd_info *dhd_info; if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS) return NULL; dhd_info = dhd_pub->info; if (dhd_info && dhd_info->iflist[ifidx]) return dhd_info->iflist[ifidx]->net; return NULL; } int dhd_ifname2idx(dhd_info_t *dhd, char *name) { int i = DHD_MAX_IFS; ASSERT(dhd); if (name == NULL || *name == '\0') return 0; while (--i > 0) if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->name, name, IFNAMSIZ)) break; DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name)); return i; /* default - the primary interface */ } char * dhd_ifname(dhd_pub_t *dhdp, int ifidx) { dhd_info_t *dhd = (dhd_info_t *)dhdp->info; ASSERT(dhd); if (ifidx < 0 || ifidx >= DHD_MAX_IFS) { DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx)); return "<if_bad>"; } if (dhd->iflist[ifidx] == NULL) { DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx)); return "<if_null>"; } if (dhd->iflist[ifidx]->net) return dhd->iflist[ifidx]->net->name; return "<if_none>"; } uint8 * dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx) { int i; dhd_info_t *dhd = (dhd_info_t *)dhdp; ASSERT(dhd); for (i = 0; i < DHD_MAX_IFS; i++) if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx) return dhd->iflist[i]->mac_addr; return NULL; } static void _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx) { struct net_device *dev; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) struct netdev_hw_addr *ha; #else struct dev_mc_list *mclist; #endif uint32 allmulti, cnt; wl_ioctl_t ioc; char *buf, *bufp; uint buflen; int ret; ASSERT(dhd && dhd->iflist[ifidx]); dev = dhd->iflist[ifidx]->net; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_lock_bh(dev); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) cnt = netdev_mc_count(dev); #else cnt = dev->mc_count; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_unlock_bh(dev); #endif /* Determine initial value of allmulti flag */ allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE; /* Send down the multicast list first. */ buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN); if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) { DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n", dhd_ifname(&dhd->pub, ifidx), cnt)); return; } strcpy(bufp, "mcast_list"); bufp += strlen("mcast_list") + 1; cnt = htol32(cnt); memcpy(bufp, &cnt, sizeof(cnt)); bufp += sizeof(cnt); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_lock_bh(dev); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) netdev_for_each_mc_addr(ha, dev) { if (!cnt) break; memcpy(bufp, ha->addr, ETHER_ADDR_LEN); bufp += ETHER_ADDR_LEN; cnt--; } #else for (mclist = dev->mc_list; (mclist && (cnt > 0)); cnt--, mclist = mclist->next) { memcpy(bufp, (void *)mclist->dmi_addr, ETHER_ADDR_LEN); bufp += ETHER_ADDR_LEN; } #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) netif_addr_unlock_bh(dev); #endif memset(&ioc, 0, sizeof(ioc)); ioc.cmd = WLC_SET_VAR; ioc.buf = buf; ioc.len = buflen; ioc.set = TRUE; ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); if (ret < 0) { DHD_ERROR(("%s: set mcast_list failed, cnt %d\n", dhd_ifname(&dhd->pub, ifidx), cnt)); allmulti = cnt ? TRUE : allmulti; } MFREE(dhd->pub.osh, buf, buflen); /* Now send the allmulti setting. This is based on the setting in the * net_device flags, but might be modified above to be turned on if we * were trying to set some addresses and dongle rejected it... */ buflen = sizeof("allmulti") + sizeof(allmulti); if (!(buf = MALLOC(dhd->pub.osh, buflen))) { DHD_ERROR(("%s: out of memory for allmulti\n", dhd_ifname(&dhd->pub, ifidx))); return; } allmulti = htol32(allmulti); if (!bcm_mkiovar("allmulti", (void*)&allmulti, sizeof(allmulti), buf, buflen)) { DHD_ERROR(("%s: mkiovar failed for allmulti, datalen %d buflen %u\n", dhd_ifname(&dhd->pub, ifidx), (int)sizeof(allmulti), buflen)); MFREE(dhd->pub.osh, buf, buflen); return; } memset(&ioc, 0, sizeof(ioc)); ioc.cmd = WLC_SET_VAR; ioc.buf = buf; ioc.len = buflen; ioc.set = TRUE; ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); if (ret < 0) { DHD_ERROR(("%s: set allmulti %d failed\n", dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); } MFREE(dhd->pub.osh, buf, buflen); /* Finally, pick up the PROMISC flag as well, like the NIC driver does */ allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE; allmulti = htol32(allmulti); memset(&ioc, 0, sizeof(ioc)); ioc.cmd = WLC_SET_PROMISC; ioc.buf = &allmulti; ioc.len = sizeof(allmulti); ioc.set = TRUE; ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); if (ret < 0) { DHD_ERROR(("%s: set promisc %d failed\n", dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti))); } } static int _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, struct ether_addr *addr) { char buf[32]; wl_ioctl_t ioc; int ret; if (!bcm_mkiovar("cur_etheraddr", (char*)addr, ETHER_ADDR_LEN, buf, 32)) { DHD_ERROR(("%s: mkiovar failed for cur_etheraddr\n", dhd_ifname(&dhd->pub, ifidx))); return -1; } memset(&ioc, 0, sizeof(ioc)); ioc.cmd = WLC_SET_VAR; ioc.buf = buf; ioc.len = 32; ioc.set = TRUE; ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len); if (ret < 0) { DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx))); } else { memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN); memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN); } return ret; } #ifdef SOFTAP extern struct net_device *ap_net_dev; extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */ #endif static void dhd_op_if(dhd_if_t *ifp) { dhd_info_t *dhd; int ret = 0, err = 0; #ifdef SOFTAP unsigned long flags; #endif if (!ifp || !ifp->info || !ifp->idx) return; ASSERT(ifp && ifp->info && ifp->idx); /* Virtual interfaces only */ dhd = ifp->info; DHD_TRACE(("%s: idx %d, state %d\n", __FUNCTION__, ifp->idx, ifp->state)); #ifdef WL_CFG80211 if (wl_cfg80211_is_progress_ifchange()) return; #endif switch (ifp->state) { case DHD_IF_ADD: /* * Delete the existing interface before overwriting it * in case we missed the WLC_E_IF_DEL event. */ if (ifp->net != NULL) { DHD_ERROR(("%s: ERROR: netdev:%s already exists, try free & unregister \n", __FUNCTION__, ifp->net->name)); netif_stop_queue(ifp->net); unregister_netdev(ifp->net); free_netdev(ifp->net); } /* Allocate etherdev, including space for private structure */ if (!(ifp->net = alloc_etherdev(sizeof(dhd)))) { DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__)); ret = -ENOMEM; } if (ret == 0) { strncpy(ifp->net->name, ifp->name, IFNAMSIZ); ifp->net->name[IFNAMSIZ - 1] = '\0'; memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd)); #ifdef WL_CFG80211 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) if (!wl_cfg80211_notify_ifadd(ifp->net, ifp->idx, ifp->bssidx, (void*)dhd_net_attach)) { ifp->state = DHD_IF_NONE; return; } #endif if ((err = dhd_net_attach(&dhd->pub, ifp->idx)) != 0) { DHD_ERROR(("%s: dhd_net_attach failed, err %d\n", __FUNCTION__, err)); ret = -EOPNOTSUPP; } else { #if defined(SOFTAP) if (ap_fw_loaded && !(dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { /* semaphore that the soft AP CODE waits on */ flags = dhd_os_spin_lock(&dhd->pub); /* save ptr to wl0.1 netdev for use in wl_iw.c */ ap_net_dev = ifp->net; /* signal to the SOFTAP 'sleeper' thread, wl0.1 is ready */ up(&ap_eth_ctl.sema); dhd_os_spin_unlock(&dhd->pub, flags); } #endif DHD_TRACE(("\n ==== pid:%x, net_device for if:%s created ===\n\n", current->pid, ifp->net->name)); ifp->state = DHD_IF_NONE; } } break; case DHD_IF_DEL: /* Make sure that we don't enter again here if .. */ /* dhd_op_if is called again from some other context */ ifp->state = DHD_IF_DELETING; if (ifp->net != NULL) { DHD_TRACE(("\n%s: got 'DHD_IF_DEL' state\n", __FUNCTION__)); #ifdef WL_CFG80211 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { wl_cfg80211_ifdel_ops(ifp->net); } #endif netif_stop_queue(ifp->net); unregister_netdev(ifp->net); ret = DHD_DEL_IF; #ifdef WL_CFG80211 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { wl_cfg80211_notify_ifdel(); } #endif } break; case DHD_IF_DELETING: break; default: DHD_ERROR(("%s: bad op %d\n", __FUNCTION__, ifp->state)); ASSERT(!ifp->state); break; } if (ret < 0) { ifp->set_multicast = FALSE; if (ifp->net) { free_netdev(ifp->net); ifp->net = NULL; } dhd->iflist[ifp->idx] = NULL; #ifdef SOFTAP flags = dhd_os_spin_lock(&dhd->pub); if (ifp->net == ap_net_dev) ap_net_dev = NULL; /* NULL SOFTAP global wl0.1 as well */ dhd_os_spin_unlock(&dhd->pub, flags); #endif /* SOFTAP */ MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); } } static int _dhd_sysioc_thread(void *data) { tsk_ctl_t *tsk = (tsk_ctl_t *)data; dhd_info_t *dhd = (dhd_info_t *)tsk->parent; int i; #ifdef SOFTAP bool in_ap = FALSE; unsigned long flags; #endif DAEMONIZE("dhd_sysioc"); complete(&tsk->completed); while (down_interruptible(&tsk->sema) == 0) { SMP_RD_BARRIER_DEPENDS(); if (tsk->terminated) { break; } dhd_net_if_lock_local(dhd); DHD_OS_WAKE_LOCK(&dhd->pub); for (i = 0; i < DHD_MAX_IFS; i++) { if (dhd->iflist[i]) { DHD_TRACE(("%s: interface %d\n", __FUNCTION__, i)); #ifdef SOFTAP flags = dhd_os_spin_lock(&dhd->pub); in_ap = (ap_net_dev != NULL); dhd_os_spin_unlock(&dhd->pub, flags); #endif /* SOFTAP */ if (dhd->iflist[i] && dhd->iflist[i]->state) dhd_op_if(dhd->iflist[i]); if (dhd->iflist[i] == NULL) { DHD_TRACE(("\n\n %s: interface %d just been removed," "!\n\n", __FUNCTION__, i)); continue; } #ifdef SOFTAP if (in_ap && dhd->set_macaddress) { DHD_TRACE(("attempt to set MAC for %s in AP Mode," "blocked. \n", dhd->iflist[i]->net->name)); dhd->set_macaddress = FALSE; continue; } if (in_ap && dhd->iflist[i]->set_multicast) { DHD_TRACE(("attempt to set MULTICAST list for %s" "in AP Mode, blocked. \n", dhd->iflist[i]->net->name)); dhd->iflist[i]->set_multicast = FALSE; continue; } #endif /* SOFTAP */ if (dhd->iflist[i]->set_multicast) { dhd->iflist[i]->set_multicast = FALSE; _dhd_set_multicast_list(dhd, i); } if (dhd->set_macaddress) { dhd->set_macaddress = FALSE; _dhd_set_mac_address(dhd, i, &dhd->macvalue); } } } DHD_OS_WAKE_UNLOCK(&dhd->pub); dhd_net_if_unlock_local(dhd); } DHD_TRACE(("%s: stopped\n", __FUNCTION__)); complete_and_exit(&tsk->completed, 0); } static int dhd_set_mac_address(struct net_device *dev, void *addr) { int ret = 0; dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); struct sockaddr *sa = (struct sockaddr *)addr; int ifidx; ifidx = dhd_net2idx(dhd, dev); if (ifidx == DHD_BAD_IF) return -1; ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0); memcpy(&dhd->macvalue, sa->sa_data, ETHER_ADDR_LEN); dhd->set_macaddress = TRUE; up(&dhd->thr_sysioc_ctl.sema); return ret; } static void dhd_set_multicast_list(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); int ifidx; ifidx = dhd_net2idx(dhd, dev); if (ifidx == DHD_BAD_IF) return; ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0); dhd->iflist[ifidx]->set_multicast = TRUE; up(&dhd->thr_sysioc_ctl.sema); } #ifdef PROP_TXSTATUS int dhd_os_wlfc_block(dhd_pub_t *pub) { dhd_info_t *di = (dhd_info_t *)(pub->info); ASSERT(di != NULL); spin_lock_bh(&di->wlfc_spinlock); return 1; } int dhd_os_wlfc_unblock(dhd_pub_t *pub) { dhd_info_t *di = (dhd_info_t *)(pub->info); ASSERT(di != NULL); spin_unlock_bh(&di->wlfc_spinlock); return 1; } const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 }; uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]] #endif /* PROP_TXSTATUS */ int dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf) { int ret; dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); struct ether_header *eh = NULL; /* Reject if down */ if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) { /* free the packet here since the caller won't */ PKTFREE(dhdp->osh, pktbuf, TRUE); return -ENODEV; } /* Update multicast statistic */ if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) { uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf); eh = (struct ether_header *)pktdata; if (ETHER_ISMULTI(eh->ether_dhost)) dhdp->tx_multicast++; if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) atomic_inc(&dhd->pend_8021x_cnt); } else { PKTFREE(dhd->pub.osh, pktbuf, TRUE); return BCME_ERROR; } /* Look into the packet and update the packet priority */ if (PKTPRIO(pktbuf) == 0) pktsetprio(pktbuf, FALSE); #ifdef PROP_TXSTATUS if (dhdp->wlfc_state) { /* store the interface ID */ DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx); /* store destination MAC in the tag as well */ DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost); /* decide which FIFO this packet belongs to */ if (ETHER_ISMULTI(eh->ether_dhost)) /* one additional queue index (highest AC + 1) is used for bc/mc queue */ DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT); else DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf))); } else #endif /* PROP_TXSTATUS */ /* If the protocol uses a data header, apply it */ dhd_prot_hdrpush(dhdp, ifidx, pktbuf); /* Use bus module to send data frame */ #ifdef WLMEDIA_HTSF dhd_htsf_addtxts(dhdp, pktbuf); #endif #ifdef PROP_TXSTATUS if (dhdp->wlfc_state && ((athost_wl_status_info_t*)dhdp->wlfc_state)->proptxstatus_mode != WLFC_FCMODE_NONE) { dhd_os_wlfc_block(dhdp); ret = dhd_wlfc_enque_sendq(dhdp->wlfc_state, DHD_PKTTAG_FIFO(PKTTAG(pktbuf)), pktbuf); dhd_wlfc_commit_packets(dhdp->wlfc_state, (f_commitpkt_t)dhd_bus_txdata, dhdp->bus); if (((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if) { ((athost_wl_status_info_t*)dhdp->wlfc_state)->toggle_host_if = 0; } dhd_os_wlfc_unblock(dhdp); } else /* non-proptxstatus way */ ret = dhd_bus_txdata(dhdp->bus, pktbuf); #else ret = dhd_bus_txdata(dhdp->bus, pktbuf); #endif /* PROP_TXSTATUS */ return ret; } int dhd_start_xmit(struct sk_buff *skb, struct net_device *net) { int ret; void *pktbuf; dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); int ifidx; #ifdef WLMEDIA_HTSF uint8 htsfdlystat_sz = dhd->pub.htsfdlystat_sz; #else uint8 htsfdlystat_sz = 0; #endif DHD_TRACE(("%s: Enter\n", __FUNCTION__)); DHD_OS_WAKE_LOCK(&dhd->pub); /* Reject if down */ if (!dhd->pub.up || (dhd->pub.busstate == DHD_BUS_DOWN)) { DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n", __FUNCTION__, dhd->pub.up, dhd->pub.busstate)); netif_stop_queue(net); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) /* Send Event when bus down detected during data session */ if (dhd->pub.busstate == DHD_BUS_DOWN) { DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__)); net_os_send_hang_message(net); } #endif DHD_OS_WAKE_UNLOCK(&dhd->pub); return -ENODEV; } ifidx = dhd_net2idx(dhd, net); if (ifidx == DHD_BAD_IF) { DHD_ERROR(("%s: bad ifidx %d\n", __FUNCTION__, ifidx)); netif_stop_queue(net); DHD_OS_WAKE_UNLOCK(&dhd->pub); return -ENODEV; } /* Make sure there's enough room for any header */ if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) { struct sk_buff *skb2; DHD_INFO(("%s: insufficient headroom\n", dhd_ifname(&dhd->pub, ifidx))); dhd->pub.tx_realloc++; skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz); dev_kfree_skb(skb); if ((skb = skb2) == NULL) { DHD_ERROR(("%s: skb_realloc_headroom failed\n", dhd_ifname(&dhd->pub, ifidx))); ret = -ENOMEM; goto done; } } /* Convert to packet */ if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) { DHD_ERROR(("%s: PKTFRMNATIVE failed\n", dhd_ifname(&dhd->pub, ifidx))); dev_kfree_skb_any(skb); ret = -ENOMEM; goto done; } #ifdef WLMEDIA_HTSF if (htsfdlystat_sz && PKTLEN(dhd->pub.osh, pktbuf) >= ETHER_ADDR_LEN) { uint8 *pktdata = (uint8 *)PKTDATA(dhd->pub.osh, pktbuf); struct ether_header *eh = (struct ether_header *)pktdata; if (!ETHER_ISMULTI(eh->ether_dhost) && (ntoh16(eh->ether_type) == ETHER_TYPE_IP)) { eh->ether_type = hton16(ETHER_TYPE_BRCM_PKTDLYSTATS); } } #endif ret = dhd_sendpkt(&dhd->pub, ifidx, pktbuf); done: if (ret) dhd->pub.dstats.tx_dropped++; else dhd->pub.tx_packets++; DHD_OS_WAKE_UNLOCK(&dhd->pub); /* Return ok: we always eat the packet */ return 0; } void dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state) { struct net_device *net; dhd_info_t *dhd = dhdp->info; int i; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); dhdp->txoff = state; ASSERT(dhd); if (ifidx == ALL_INTERFACES) { /* Flow control on all active interfaces */ for (i = 0; i < DHD_MAX_IFS; i++) { if (dhd->iflist[i]) { net = dhd->iflist[i]->net; if (state == ON) netif_stop_queue(net); else netif_wake_queue(net); } } } else { if (dhd->iflist[ifidx]) { net = dhd->iflist[ifidx]->net; if (state == ON) netif_stop_queue(net); else netif_wake_queue(net); } } } void dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan) { dhd_info_t *dhd = (dhd_info_t *)dhdp->info; struct sk_buff *skb; uchar *eth; uint len; void *data, *pnext = NULL, *save_pktbuf; int i; dhd_if_t *ifp; wl_event_msg_t event; int tout_rx = 0; int tout_ctrl = 0; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); save_pktbuf = pktbuf; for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) { struct ether_header *eh; struct dot11_llc_snap_header *lsh; ifp = dhd->iflist[ifidx]; if (ifp == NULL) { DHD_ERROR(("%s: ifp is NULL. drop packet\n", __FUNCTION__)); PKTFREE(dhdp->osh, pktbuf, TRUE); continue; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) /* Dropping packets before registering net device to avoid kernel panic */ if (!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) { DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n", __FUNCTION__)); PKTFREE(dhdp->osh, pktbuf, TRUE); continue; } #endif pnext = PKTNEXT(dhdp->osh, pktbuf); PKTSETNEXT(wl->sh.osh, pktbuf, NULL); eh = (struct ether_header *)PKTDATA(wl->sh.osh, pktbuf); lsh = (struct dot11_llc_snap_header *)&eh[1]; if ((ntoh16(eh->ether_type) < ETHER_TYPE_MIN) && (PKTLEN(wl->sh.osh, pktbuf) >= RFC1042_HDR_LEN) && bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && lsh->type == HTON16(BTA_PROT_L2CAP)) { amp_hci_ACL_data_t *ACL_data = (amp_hci_ACL_data_t *) ((uint8 *)eh + RFC1042_HDR_LEN); ACL_data = NULL; } #ifdef PROP_TXSTATUS if (dhdp->wlfc_state && PKTLEN(wl->sh.osh, pktbuf) == 0) { /* WLFC may send header only packet when there is an urgent message but no packet to piggy-back on */ ((athost_wl_status_info_t*)dhdp->wlfc_state)->stats.wlfc_header_only_pkt++; PKTFREE(dhdp->osh, pktbuf, TRUE); DHD_TRACE(("RX: wlfc header \n")); continue; } #endif skb = PKTTONATIVE(dhdp->osh, pktbuf); /* Get the protocol, maintain skb around eth_type_trans() * The main reason for this hack is for the limitation of * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len' * to perform skb_pull inside vs ETH_HLEN. Since to avoid * coping of the packet coming from the network stack to add * BDC, Hardware header etc, during network interface registration * we set the 'net->hard_header_len' to ETH_HLEN + extra space required * for BDC, Hardware header etc. and not just the ETH_HLEN */ eth = skb->data; len = skb->len; ifp = dhd->iflist[ifidx]; if (ifp == NULL) ifp = dhd->iflist[0]; ASSERT(ifp); skb->dev = ifp->net; skb->protocol = eth_type_trans(skb, skb->dev); if (skb->pkt_type == PACKET_MULTICAST) { dhd->pub.rx_multicast++; } skb->data = eth; skb->len = len; #ifdef WLMEDIA_HTSF dhd_htsf_addrxts(dhdp, pktbuf); #endif /* Strip header, count, deliver upward */ skb_pull(skb, ETH_HLEN); /* Process special event packets and then discard them */ if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) { dhd_wl_host_event(dhd, &ifidx, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) skb->mac_header, #else skb->mac.raw, #endif &event, &data); wl_event_to_host_order(&event); if (!tout_ctrl) tout_ctrl = DHD_PACKET_TIMEOUT_MS; if (event.event_type == WLC_E_BTA_HCI_EVENT) { dhd_bta_doevt(dhdp, data, event.datalen); } #ifdef PNO_SUPPORT if (event.event_type == WLC_E_PFN_NET_FOUND) { tout_ctrl *= 2; } #endif /* PNO_SUPPORT */ } else { tout_rx = DHD_PACKET_TIMEOUT_MS; } ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]); if (dhd->iflist[ifidx] && !dhd->iflist[ifidx]->state) ifp = dhd->iflist[ifidx]; if (ifp->net) ifp->net->last_rx = jiffies; dhdp->dstats.rx_bytes += skb->len; dhdp->rx_packets++; /* Local count */ if (in_interrupt()) { netif_rx(skb); } else { /* If the receive is not processed inside an ISR, * the softirqd must be woken explicitly to service * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled * by netif_rx_ni(), but in earlier kernels, we need * to do it manually. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) netif_rx_ni(skb); #else ulong flags; netif_rx(skb); local_irq_save(flags); RAISE_RX_SOFTIRQ(); local_irq_restore(flags); #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ } } DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx); DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl); } void dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx) { /* Linux version has nothing to do */ return; } void dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success) { uint ifidx; dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); struct ether_header *eh; uint16 type; uint len; dhd_prot_hdrpull(dhdp, &ifidx, txp); eh = (struct ether_header *)PKTDATA(dhdp->osh, txp); type = ntoh16(eh->ether_type); if (type == ETHER_TYPE_802_1X) atomic_dec(&dhd->pend_8021x_cnt); /* Crack open the packet and check to see if it is BT HCI ACL data packet. * If yes generate packet completion event. */ len = PKTLEN(dhdp->osh, txp); /* Generate ACL data tx completion event locally to avoid SDIO bus transaction */ if ((type < ETHER_TYPE_MIN) && (len >= RFC1042_HDR_LEN)) { struct dot11_llc_snap_header *lsh = (struct dot11_llc_snap_header *)&eh[1]; if (bcmp(lsh, BT_SIG_SNAP_MPROT, DOT11_LLC_SNAP_HDR_LEN - 2) == 0 && ntoh16(lsh->type) == BTA_PROT_L2CAP) { dhd_bta_tx_hcidata_complete(dhdp, txp, success); } } } static struct net_device_stats * dhd_get_stats(struct net_device *net) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); dhd_if_t *ifp; int ifidx; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); ifidx = dhd_net2idx(dhd, net); if (ifidx == DHD_BAD_IF) { DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__)); return NULL; } ifp = dhd->iflist[ifidx]; ASSERT(dhd && ifp); if (dhd->pub.up) { /* Use the protocol to get dongle stats */ dhd_prot_dstats(&dhd->pub); } /* Copy dongle stats to net device stats */ ifp->stats.rx_packets = dhd->pub.dstats.rx_packets; ifp->stats.tx_packets = dhd->pub.dstats.tx_packets; ifp->stats.rx_bytes = dhd->pub.dstats.rx_bytes; ifp->stats.tx_bytes = dhd->pub.dstats.tx_bytes; ifp->stats.rx_errors = dhd->pub.dstats.rx_errors; ifp->stats.tx_errors = dhd->pub.dstats.tx_errors; ifp->stats.rx_dropped = dhd->pub.dstats.rx_dropped; ifp->stats.tx_dropped = dhd->pub.dstats.tx_dropped; ifp->stats.multicast = dhd->pub.dstats.multicast; return &ifp->stats; } #ifdef DHDTHREAD static int dhd_watchdog_thread(void *data) { tsk_ctl_t *tsk = (tsk_ctl_t *)data; dhd_info_t *dhd = (dhd_info_t *)tsk->parent; /* This thread doesn't need any user-level access, * so get rid of all our resources */ if (dhd_watchdog_prio > 0) { struct sched_param param; param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)? dhd_watchdog_prio:(MAX_RT_PRIO-1); setScheduler(current, SCHED_FIFO, &param); } DAEMONIZE("dhd_watchdog"); /* Run until signal received */ complete(&tsk->completed); while (1) if (down_interruptible (&tsk->sema) == 0) { unsigned long flags; SMP_RD_BARRIER_DEPENDS(); if (tsk->terminated) { break; } dhd_os_sdlock(&dhd->pub); if (dhd->pub.dongle_reset == FALSE) { DHD_TIMER(("%s:\n", __FUNCTION__)); /* Call the bus module watchdog */ dhd_bus_watchdog(&dhd->pub); flags = dhd_os_spin_lock(&dhd->pub); /* Count the tick for reference */ dhd->pub.tickcnt++; /* Reschedule the watchdog */ if (dhd->wd_timer_valid) mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); dhd_os_spin_unlock(&dhd->pub, flags); } dhd_os_sdunlock(&dhd->pub); DHD_OS_WAKE_UNLOCK(&dhd->pub); } else { break; } complete_and_exit(&tsk->completed, 0); } #endif /* DHDTHREAD */ static void dhd_watchdog(ulong data) { dhd_info_t *dhd = (dhd_info_t *)data; unsigned long flags; DHD_OS_WAKE_LOCK(&dhd->pub); if (dhd->pub.dongle_reset) { DHD_OS_WAKE_UNLOCK(&dhd->pub); return; } #ifdef DHDTHREAD if (dhd->thr_wdt_ctl.thr_pid >= 0) { up(&dhd->thr_wdt_ctl.sema); return; } #endif /* DHDTHREAD */ dhd_os_sdlock(&dhd->pub); /* Call the bus module watchdog */ dhd_bus_watchdog(&dhd->pub); flags = dhd_os_spin_lock(&dhd->pub); /* Count the tick for reference */ dhd->pub.tickcnt++; /* Reschedule the watchdog */ if (dhd->wd_timer_valid) mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); dhd_os_spin_unlock(&dhd->pub, flags); dhd_os_sdunlock(&dhd->pub); DHD_OS_WAKE_UNLOCK(&dhd->pub); } #ifdef DHDTHREAD static int dhd_dpc_thread(void *data) { tsk_ctl_t *tsk = (tsk_ctl_t *)data; dhd_info_t *dhd = (dhd_info_t *)tsk->parent; /* This thread doesn't need any user-level access, * so get rid of all our resources */ if (dhd_dpc_prio > 0) { struct sched_param param; param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1); setScheduler(current, SCHED_FIFO, &param); } DAEMONIZE("dhd_dpc"); /* DHD_OS_WAKE_LOCK is called in dhd_sched_dpc[dhd_linux.c] down below */ /* signal: thread has started */ complete(&tsk->completed); /* Run until signal received */ while (1) { if (down_interruptible(&tsk->sema) == 0) { SMP_RD_BARRIER_DEPENDS(); if (tsk->terminated) { break; } /* Call bus dpc unless it indicated down (then clean stop) */ if (dhd->pub.busstate != DHD_BUS_DOWN) { if (dhd_bus_dpc(dhd->pub.bus)) { up(&tsk->sema); } else { DHD_OS_WAKE_UNLOCK(&dhd->pub); } } else { if (dhd->pub.up) dhd_bus_stop(dhd->pub.bus, TRUE); DHD_OS_WAKE_UNLOCK(&dhd->pub); } } else break; } complete_and_exit(&tsk->completed, 0); } #endif /* DHDTHREAD */ static void dhd_dpc(ulong data) { dhd_info_t *dhd; dhd = (dhd_info_t *)data; /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c] * down below , wake lock is set, * the tasklet is initialized in dhd_attach() */ /* Call bus dpc unless it indicated down (then clean stop) */ if (dhd->pub.busstate != DHD_BUS_DOWN) { if (dhd_bus_dpc(dhd->pub.bus)) tasklet_schedule(&dhd->tasklet); else DHD_OS_WAKE_UNLOCK(&dhd->pub); } else { dhd_bus_stop(dhd->pub.bus, TRUE); DHD_OS_WAKE_UNLOCK(&dhd->pub); } } void dhd_sched_dpc(dhd_pub_t *dhdp) { dhd_info_t *dhd = (dhd_info_t *)dhdp->info; DHD_OS_WAKE_LOCK(dhdp); #ifdef DHDTHREAD if (dhd->thr_dpc_ctl.thr_pid >= 0) { up(&dhd->thr_dpc_ctl.sema); return; } #endif /* DHDTHREAD */ tasklet_schedule(&dhd->tasklet); } #ifdef TOE /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */ static int dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol) { wl_ioctl_t ioc; char buf[32]; int ret; memset(&ioc, 0, sizeof(ioc)); ioc.cmd = WLC_GET_VAR; ioc.buf = buf; ioc.len = (uint)sizeof(buf); ioc.set = FALSE; strcpy(buf, "toe_ol"); if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { /* Check for older dongle image that doesn't support toe_ol */ if (ret == -EIO) { DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub, ifidx))); return -EOPNOTSUPP; } DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); return ret; } memcpy(toe_ol, buf, sizeof(uint32)); return 0; } /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */ static int dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol) { wl_ioctl_t ioc; char buf[32]; int toe, ret; memset(&ioc, 0, sizeof(ioc)); ioc.cmd = WLC_SET_VAR; ioc.buf = buf; ioc.len = (uint)sizeof(buf); ioc.set = TRUE; /* Set toe_ol as requested */ strcpy(buf, "toe_ol"); memcpy(&buf[sizeof("toe_ol")], &toe_ol, sizeof(uint32)); if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { DHD_ERROR(("%s: could not set toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); return ret; } /* Enable toe globally only if any components are enabled. */ toe = (toe_ol != 0); strcpy(buf, "toe"); memcpy(&buf[sizeof("toe")], &toe, sizeof(uint32)); if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret)); return ret; } return 0; } #endif /* TOE */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) static void dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); sprintf(info->driver, "wl"); sprintf(info->version, "%lu", dhd->pub.drv_version); } struct ethtool_ops dhd_ethtool_ops = { .get_drvinfo = dhd_ethtool_get_drvinfo }; #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) static int dhd_ethtool(dhd_info_t *dhd, void *uaddr) { struct ethtool_drvinfo info; char drvname[sizeof(info.driver)]; uint32 cmd; #ifdef TOE struct ethtool_value edata; uint32 toe_cmpnt, csum_dir; int ret; #endif DHD_TRACE(("%s: Enter\n", __FUNCTION__)); /* all ethtool calls start with a cmd word */ if (copy_from_user(&cmd, uaddr, sizeof (uint32))) return -EFAULT; switch (cmd) { case ETHTOOL_GDRVINFO: /* Copy out any request driver name */ if (copy_from_user(&info, uaddr, sizeof(info))) return -EFAULT; strncpy(drvname, info.driver, sizeof(info.driver)); drvname[sizeof(info.driver)-1] = '\0'; /* clear struct for return */ memset(&info, 0, sizeof(info)); info.cmd = cmd; /* if dhd requested, identify ourselves */ if (strcmp(drvname, "?dhd") == 0) { sprintf(info.driver, "dhd"); strcpy(info.version, EPI_VERSION_STR); } /* otherwise, require dongle to be up */ else if (!dhd->pub.up) { DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__)); return -ENODEV; } /* finally, report dongle driver type */ else if (dhd->pub.iswl) sprintf(info.driver, "wl"); else sprintf(info.driver, "xx"); sprintf(info.version, "%lu", dhd->pub.drv_version); if (copy_to_user(uaddr, &info, sizeof(info))) return -EFAULT; DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__, (int)sizeof(drvname), drvname, info.driver)); break; #ifdef TOE /* Get toe offload components from dongle */ case ETHTOOL_GRXCSUM: case ETHTOOL_GTXCSUM: if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) return ret; csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; edata.cmd = cmd; edata.data = (toe_cmpnt & csum_dir) ? 1 : 0; if (copy_to_user(uaddr, &edata, sizeof(edata))) return -EFAULT; break; /* Set toe offload components in dongle */ case ETHTOOL_SRXCSUM: case ETHTOOL_STXCSUM: if (copy_from_user(&edata, uaddr, sizeof(edata))) return -EFAULT; /* Read the current settings, update and write back */ if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0) return ret; csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL; if (edata.data != 0) toe_cmpnt |= csum_dir; else toe_cmpnt &= ~csum_dir; if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0) return ret; /* If setting TX checksum mode, tell Linux the new mode */ if (cmd == ETHTOOL_STXCSUM) { if (edata.data) dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM; else dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM; } break; #endif /* TOE */ default: return -EOPNOTSUPP; } return 0; } #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) if (!dhdp) return FALSE; if ((error == -ETIMEDOUT) || ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) { DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate)); net_os_send_hang_message(net); return TRUE; } #endif return FALSE; } static int dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); dhd_ioctl_t ioc; int bcmerror = 0; int buflen = 0; void *buf = NULL; uint driver = 0; int ifidx; int ret; DHD_OS_WAKE_LOCK(&dhd->pub); /* send to dongle only if we are not waiting for reload already */ if (dhd->pub.hang_was_sent) { DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__)); DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS); DHD_OS_WAKE_UNLOCK(&dhd->pub); return OSL_ERROR(BCME_DONGLE_DOWN); } ifidx = dhd_net2idx(dhd, net); DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd)); if (ifidx == DHD_BAD_IF) { DHD_ERROR(("%s: BAD IF\n", __FUNCTION__)); DHD_OS_WAKE_UNLOCK(&dhd->pub); return -1; } #if defined(WL_WIRELESS_EXT) /* linux wireless extensions */ if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) { /* may recurse, do NOT lock */ ret = wl_iw_ioctl(net, ifr, cmd); DHD_OS_WAKE_UNLOCK(&dhd->pub); return ret; } #endif /* defined(WL_WIRELESS_EXT) */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) if (cmd == SIOCETHTOOL) { ret = dhd_ethtool(dhd, (void*)ifr->ifr_data); DHD_OS_WAKE_UNLOCK(&dhd->pub); return ret; } #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 4, 2) */ if (cmd == SIOCDEVPRIVATE+1) { ret = wl_android_priv_cmd(net, ifr, cmd); dhd_check_hang(net, &dhd->pub, ret); DHD_OS_WAKE_UNLOCK(&dhd->pub); return ret; } if (cmd != SIOCDEVPRIVATE) { DHD_OS_WAKE_UNLOCK(&dhd->pub); return -EOPNOTSUPP; } memset(&ioc, 0, sizeof(ioc)); /* Copy the ioc control structure part of ioctl request */ if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) { bcmerror = -BCME_BADADDR; goto done; } /* Copy out any buffer passed */ if (ioc.buf) { buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN); /* optimization for direct ioctl calls from kernel */ /* if (segment_eq(get_fs(), KERNEL_DS)) { buf = ioc.buf; } else { */ { if (!(buf = (char*)MALLOC(dhd->pub.osh, buflen))) { bcmerror = -BCME_NOMEM; goto done; } if (copy_from_user(buf, ioc.buf, buflen)) { bcmerror = -BCME_BADADDR; goto done; } } } /* To differentiate between wl and dhd read 4 more byes */ if ((copy_from_user(&driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t), sizeof(uint)) != 0)) { bcmerror = -BCME_BADADDR; goto done; } if (!capable(CAP_NET_ADMIN)) { bcmerror = -BCME_EPERM; goto done; } /* check for local dhd ioctl and handle it */ if (driver == DHD_IOCTL_MAGIC) { bcmerror = dhd_ioctl((void *)&dhd->pub, &ioc, buf, buflen); if (bcmerror) dhd->pub.bcmerror = bcmerror; goto done; } /* send to dongle (must be up, and wl). */ if (dhd->pub.busstate != DHD_BUS_DATA) { bcmerror = BCME_DONGLE_DOWN; goto done; } if (!dhd->pub.iswl) { bcmerror = BCME_DONGLE_DOWN; goto done; } /* * Flush the TX queue if required for proper message serialization: * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to * prevent M4 encryption and * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to * prevent disassoc frame being sent before WPS-DONE frame. */ if (ioc.cmd == WLC_SET_KEY || (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL && strncmp("wsec_key", ioc.buf, 9) == 0) || (ioc.cmd == WLC_SET_VAR && ioc.buf != NULL && strncmp("bsscfg:wsec_key", ioc.buf, 15) == 0) || ioc.cmd == WLC_DISASSOC) dhd_wait_pend8021x(net); #ifdef WLMEDIA_HTSF if (ioc.buf) { /* short cut wl ioctl calls here */ if (strcmp("htsf", ioc.buf) == 0) { dhd_ioctl_htsf_get(dhd, 0); return BCME_OK; } if (strcmp("htsflate", ioc.buf) == 0) { if (ioc.set) { memset(ts, 0, sizeof(tstamp_t)*TSMAX); memset(&maxdelayts, 0, sizeof(tstamp_t)); maxdelay = 0; tspktcnt = 0; maxdelaypktno = 0; memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN); memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN); memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN); memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN); } else { dhd_dump_latency(); } return BCME_OK; } if (strcmp("htsfclear", ioc.buf) == 0) { memset(&vi_d1.bin, 0, sizeof(uint32)*NUMBIN); memset(&vi_d2.bin, 0, sizeof(uint32)*NUMBIN); memset(&vi_d3.bin, 0, sizeof(uint32)*NUMBIN); memset(&vi_d4.bin, 0, sizeof(uint32)*NUMBIN); htsf_seqnum = 0; return BCME_OK; } if (strcmp("htsfhis", ioc.buf) == 0) { dhd_dump_htsfhisto(&vi_d1, "H to D"); dhd_dump_htsfhisto(&vi_d2, "D to D"); dhd_dump_htsfhisto(&vi_d3, "D to H"); dhd_dump_htsfhisto(&vi_d4, "H to H"); return BCME_OK; } if (strcmp("tsport", ioc.buf) == 0) { if (ioc.set) { memcpy(&tsport, ioc.buf + 7, 4); } else { DHD_ERROR(("current timestamp port: %d \n", tsport)); } return BCME_OK; } } #endif /* WLMEDIA_HTSF */ bcmerror = dhd_wl_ioctl(&dhd->pub, ifidx, (wl_ioctl_t *)&ioc, buf, buflen); done: dhd_check_hang(net, &dhd->pub, bcmerror); if (!bcmerror && buf && ioc.buf) { if (copy_to_user(ioc.buf, buf, buflen)) bcmerror = -EFAULT; } if (buf) MFREE(dhd->pub.osh, buf, buflen); DHD_OS_WAKE_UNLOCK(&dhd->pub); return OSL_ERROR(bcmerror); } #ifdef WL_CFG80211 static int dhd_cleanup_virt_ifaces(dhd_info_t *dhd) { int i = 1; /* Leave ifidx 0 [Primary Interface] */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) int rollback_lock = FALSE; #endif DHD_TRACE(("%s: Enter \n", __func__)); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) /* release lock for unregister_netdev */ if (rtnl_is_locked()) { rtnl_unlock(); rollback_lock = TRUE; } #endif for (i = 1; i < DHD_MAX_IFS; i++) { dhd_net_if_lock_local(dhd); if (dhd->iflist[i]) { DHD_TRACE(("Deleting IF: %d \n", i)); if ((dhd->iflist[i]->state != DHD_IF_DEL) && (dhd->iflist[i]->state != DHD_IF_DELETING)) { dhd->iflist[i]->state = DHD_IF_DEL; dhd->iflist[i]->idx = i; dhd_op_if(dhd->iflist[i]); } } dhd_net_if_unlock_local(dhd); } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) if (rollback_lock) rtnl_lock(); #endif return 0; } #endif /* WL_CFG80211 */ static int dhd_stop(struct net_device *net) { int ifidx = 0; dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); DHD_OS_WAKE_LOCK(&dhd->pub); DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net)); if (dhd->pub.up == 0) { goto exit; } ifidx = dhd_net2idx(dhd, net); #ifdef WL_CFG80211 if (ifidx == 0) { wl_cfg80211_down(NULL); /* * For CFG80211: Clean up all the left over virtual interfaces * when the primary Interface is brought down. [ifconfig wlan0 down] */ if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) && (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) { dhd_cleanup_virt_ifaces(dhd); } } #endif #ifdef PROP_TXSTATUS dhd_wlfc_cleanup(&dhd->pub); #endif /* Set state and stop OS transmissions */ dhd->pub.up = 0; netif_stop_queue(net); /* Stop the protocol module */ dhd_prot_stop(&dhd->pub); OLD_MOD_DEC_USE_COUNT; exit: #if defined(WL_CFG80211) if (ifidx == 0 && !dhd_download_fw_on_driverload) wl_android_wifi_off(net); #endif dhd->pub.rxcnt_timeout = 0; dhd->pub.txcnt_timeout = 0; DHD_OS_WAKE_UNLOCK(&dhd->pub); return 0; } static int dhd_open(struct net_device *net) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(net); #ifdef TOE uint32 toe_ol; #endif int ifidx; int32 ret = 0; DHD_OS_WAKE_LOCK(&dhd->pub); /* Update FW path if it was changed */ if ((firmware_path != NULL) && (firmware_path[0] != '\0')) { if (firmware_path[strlen(firmware_path)-1] == '\n') firmware_path[strlen(firmware_path)-1] = '\0'; strcpy(fw_path, firmware_path); firmware_path[0] = '\0'; } dhd->pub.hang_was_sent = 0; #if !defined(WL_CFG80211) /* * Force start if ifconfig_up gets called before START command * We keep WEXT's wl_control_wl_start to provide backward compatibility * This should be removed in the future */ ret = wl_control_wl_start(net); if (ret != 0) { DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); ret = -1; goto exit; } #endif ifidx = dhd_net2idx(dhd, net); DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); if (ifidx < 0) { DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__)); ret = -1; goto exit; } if (!dhd->iflist[ifidx] || dhd->iflist[ifidx]->state == DHD_IF_DEL) { DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__)); ret = -1; goto exit; } if (ifidx == 0) { atomic_set(&dhd->pend_8021x_cnt, 0); #if defined(WL_CFG80211) DHD_ERROR(("\n%s\n", dhd_version)); if (!dhd_download_fw_on_driverload) { ret = wl_android_wifi_on(net); if (ret != 0) { DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); ret = -1; goto exit; } } #endif /* defined(WL_CFG80211) */ if (dhd->pub.busstate != DHD_BUS_DATA) { /* try to bring up bus */ if ((ret = dhd_bus_start(&dhd->pub)) != 0) { DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret)); ret = -1; goto exit; } } /* dhd_prot_init has been called in dhd_bus_start or wl_android_wifi_on */ memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN); #ifdef TOE /* Get current TOE mode from dongle */ if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM; else dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM; #endif /* TOE */ #if defined(WL_CFG80211) if (unlikely(wl_cfg80211_up(NULL))) { DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__)); ret = -1; goto exit; } #endif /* WL_CFG80211 */ } /* Allow transmit calls */ netif_start_queue(net); dhd->pub.up = 1; #ifdef BCMDBGFS dhd_dbg_init(&dhd->pub); #endif OLD_MOD_INC_USE_COUNT; exit: if (ret) dhd_stop(net); DHD_OS_WAKE_UNLOCK(&dhd->pub); return ret; } int dhd_do_driver_init(struct net_device *net) { dhd_info_t *dhd = NULL; if (!net) { DHD_ERROR(("Primary Interface not initialized \n")); return -EINVAL; } dhd = *(dhd_info_t **)netdev_priv(net); /* If driver is already initialized, do nothing */ if (dhd->pub.busstate == DHD_BUS_DATA) { DHD_TRACE(("Driver already Inititalized. Nothing to do")); return 0; } if (dhd_open(net) < 0) { DHD_ERROR(("Driver Init Failed \n")); return -1; } return 0; } osl_t * dhd_osl_attach(void *pdev, uint bustype) { return osl_attach(pdev, bustype, TRUE); } void dhd_osl_detach(osl_t *osh) { if (MALLOCED(osh)) { DHD_ERROR(("%s: MEMORY LEAK %d bytes\n", __FUNCTION__, MALLOCED(osh))); } osl_detach(osh); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) up(&dhd_registration_sem); #endif } int dhd_add_if(dhd_info_t *dhd, int ifidx, void *handle, char *name, uint8 *mac_addr, uint32 flags, uint8 bssidx) { dhd_if_t *ifp; DHD_TRACE(("%s: idx %d, handle->%p\n", __FUNCTION__, ifidx, handle)); ASSERT(dhd && (ifidx < DHD_MAX_IFS)); ifp = dhd->iflist[ifidx]; if (ifp != NULL) { if (ifp->net != NULL) { netif_stop_queue(ifp->net); unregister_netdev(ifp->net); free_netdev(ifp->net); } } else if ((ifp = MALLOC(dhd->pub.osh, sizeof(dhd_if_t))) == NULL) { DHD_ERROR(("%s: OOM - dhd_if_t\n", __FUNCTION__)); return -ENOMEM; } memset(ifp, 0, sizeof(dhd_if_t)); ifp->info = dhd; dhd->iflist[ifidx] = ifp; strncpy(ifp->name, name, IFNAMSIZ); ifp->name[IFNAMSIZ] = '\0'; if (mac_addr != NULL) memcpy(&ifp->mac_addr, mac_addr, ETHER_ADDR_LEN); if (handle == NULL) { ifp->state = DHD_IF_ADD; ifp->idx = ifidx; ifp->bssidx = bssidx; ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0); up(&dhd->thr_sysioc_ctl.sema); } else ifp->net = (struct net_device *)handle; return 0; } void dhd_del_if(dhd_info_t *dhd, int ifidx) { dhd_if_t *ifp; DHD_TRACE(("%s: idx %d\n", __FUNCTION__, ifidx)); ASSERT(dhd && ifidx && (ifidx < DHD_MAX_IFS)); ifp = dhd->iflist[ifidx]; if (!ifp) { DHD_ERROR(("%s: Null interface\n", __FUNCTION__)); return; } ifp->state = DHD_IF_DEL; ifp->idx = ifidx; ASSERT(dhd->thr_sysioc_ctl.thr_pid >= 0); up(&dhd->thr_sysioc_ctl.sema); } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) static struct net_device_ops dhd_ops_pri = { .ndo_open = dhd_open, .ndo_stop = dhd_stop, .ndo_get_stats = dhd_get_stats, .ndo_do_ioctl = dhd_ioctl_entry, .ndo_start_xmit = dhd_start_xmit, .ndo_set_mac_address = dhd_set_mac_address, .ndo_set_multicast_list = dhd_set_multicast_list, }; static struct net_device_ops dhd_ops_virt = { .ndo_get_stats = dhd_get_stats, .ndo_do_ioctl = dhd_ioctl_entry, .ndo_start_xmit = dhd_start_xmit, .ndo_set_mac_address = dhd_set_mac_address, .ndo_set_multicast_list = dhd_set_multicast_list, }; #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) */ dhd_pub_t * dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) { dhd_info_t *dhd = NULL; struct net_device *net = NULL; dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); /* updates firmware nvram path if it was provided as module parameters */ if ((firmware_path != NULL) && (firmware_path[0] != '\0')) strcpy(fw_path, firmware_path); if ((nvram_path != NULL) && (nvram_path[0] != '\0')) strcpy(nv_path, nvram_path); /* Allocate etherdev, including space for private structure */ if (!(net = alloc_etherdev(sizeof(dhd)))) { DHD_ERROR(("%s: OOM - alloc_etherdev\n", __FUNCTION__)); goto fail; } dhd_state |= DHD_ATTACH_STATE_NET_ALLOC; /* Allocate primary dhd_info */ if (!(dhd = MALLOC(osh, sizeof(dhd_info_t)))) { DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__)); goto fail; } memset(dhd, 0, sizeof(dhd_info_t)); #ifdef DHDTHREAD dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID; dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID; #else dhd->dhd_tasklet_create = FALSE; #endif /* DHDTHREAD */ dhd->thr_sysioc_ctl.thr_pid = DHD_PID_KT_INVALID; dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC; /* * Save the dhd_info into the priv */ memcpy((void *)netdev_priv(net), &dhd, sizeof(dhd)); dhd->pub.osh = osh; /* Link to info module */ dhd->pub.info = dhd; /* Link to bus module */ dhd->pub.bus = bus; dhd->pub.hdrlen = bus_hdrlen; /* Set network interface name if it was provided as module parameter */ if (iface_name[0]) { int len; char ch; strncpy(net->name, iface_name, IFNAMSIZ); net->name[IFNAMSIZ - 1] = 0; len = strlen(net->name); ch = net->name[len - 1]; if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2)) strcat(net->name, "%d"); } if (dhd_add_if(dhd, 0, (void *)net, net->name, NULL, 0, 0) == DHD_BAD_IF) goto fail; dhd_state |= DHD_ATTACH_STATE_ADD_IF; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) net->open = NULL; #else net->netdev_ops = NULL; #endif sema_init(&dhd->proto_sem, 1); #ifdef PROP_TXSTATUS spin_lock_init(&dhd->wlfc_spinlock); dhd->pub.wlfc_enabled = TRUE; #endif /* PROP_TXSTATUS */ /* Initialize other structure content */ init_waitqueue_head(&dhd->ioctl_resp_wait); init_waitqueue_head(&dhd->ctrl_wait); /* Initialize the spinlocks */ spin_lock_init(&dhd->sdlock); spin_lock_init(&dhd->txqlock); spin_lock_init(&dhd->dhd_lock); /* Initialize Wakelock stuff */ spin_lock_init(&dhd->wakelock_spinlock); dhd->wakelock_counter = 0; dhd->wakelock_rx_timeout_enable = 0; dhd->wakelock_ctrl_timeout_enable = 0; #ifdef CONFIG_HAS_WAKELOCK wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake"); wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake"); wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake"); #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) mutex_init(&dhd->dhd_net_if_mutex); mutex_init(&dhd->dhd_suspend_mutex); #endif dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT; /* Attach and link in the protocol */ if (dhd_prot_attach(&dhd->pub) != 0) { DHD_ERROR(("dhd_prot_attach failed\n")); goto fail; } dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH; #ifdef WL_CFG80211 /* Attach and link in the cfg80211 */ if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) { DHD_ERROR(("wl_cfg80211_attach failed\n")); goto fail; } dhd_monitor_init(&dhd->pub); dhd_state |= DHD_ATTACH_STATE_CFG80211; #endif #if defined(WL_WIRELESS_EXT) /* Attach and link in the iw */ if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) { if (wl_iw_attach(net, (void *)&dhd->pub) != 0) { DHD_ERROR(("wl_iw_attach failed\n")); goto fail; } dhd_state |= DHD_ATTACH_STATE_WL_ATTACH; } #endif /* defined(WL_WIRELESS_EXT) */ /* Set up the watchdog timer */ init_timer(&dhd->timer); dhd->timer.data = (ulong)dhd; dhd->timer.function = dhd_watchdog; #ifdef DHDTHREAD /* Initialize thread based operation and lock */ sema_init(&dhd->sdsem, 1); if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) { dhd->threads_only = TRUE; } else { dhd->threads_only = FALSE; } if (dhd_dpc_prio >= 0) { /* Initialize watchdog thread */ PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0); } else { dhd->thr_wdt_ctl.thr_pid = -1; } /* Set up the bottom half handler */ if (dhd_dpc_prio >= 0) { /* Initialize DPC thread */ PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0); } else { /* use tasklet for dpc */ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); dhd->thr_dpc_ctl.thr_pid = -1; } #else /* Set up the bottom half handler */ tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd); dhd->dhd_tasklet_create = TRUE; #endif /* DHDTHREAD */ if (dhd_sysioc) { PROC_START(_dhd_sysioc_thread, dhd, &dhd->thr_sysioc_ctl, 0); } else { dhd->thr_sysioc_ctl.thr_pid = -1; } dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) INIT_WORK(&dhd->work_hang, dhd_hang_process); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ /* * Save the dhd_info into the priv */ memcpy(netdev_priv(net), &dhd, sizeof(dhd)); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) register_pm_notifier(&dhd_sleep_pm_notifier); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20; dhd->early_suspend.suspend = dhd_early_suspend; dhd->early_suspend.resume = dhd_late_resume; register_early_suspend(&dhd->early_suspend); dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE; #endif #ifdef ARP_OFFLOAD_SUPPORT dhd->pend_ipaddr = 0; register_inetaddr_notifier(&dhd_notifier); #endif /* ARP_OFFLOAD_SUPPORT */ dhd_state |= DHD_ATTACH_STATE_DONE; dhd->dhd_state = dhd_state; return &dhd->pub; fail: if (dhd_state < DHD_ATTACH_STATE_DHD_ALLOC) { if (net) free_netdev(net); } else { DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n", __FUNCTION__, dhd_state, &dhd->pub)); dhd->dhd_state = dhd_state; dhd_detach(&dhd->pub); dhd_free(&dhd->pub); } return NULL; } int dhd_bus_start(dhd_pub_t *dhdp) { int ret = -1; dhd_info_t *dhd = (dhd_info_t*)dhdp->info; unsigned long flags; ASSERT(dhd); DHD_TRACE(("Enter %s:\n", __FUNCTION__)); #ifdef DHDTHREAD if (dhd->threads_only) dhd_os_sdlock(dhdp); #endif /* DHDTHREAD */ /* try to download image and nvram to the dongle */ if ((dhd->pub.busstate == DHD_BUS_DOWN) && (fw_path != NULL) && (fw_path[0] != '\0') && (nv_path != NULL) && (nv_path[0] != '\0')) { /* wake lock moved to dhdsdio_download_firmware */ if (!(dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh, fw_path, nv_path))) { DHD_ERROR(("%s: dhdsdio_probe_download failed. firmware = %s nvram = %s\n", __FUNCTION__, fw_path, nv_path)); #ifdef DHDTHREAD if (dhd->threads_only) dhd_os_sdunlock(dhdp); #endif /* DHDTHREAD */ return -1; } } if (dhd->pub.busstate != DHD_BUS_LOAD) { #ifdef DHDTHREAD if (dhd->threads_only) dhd_os_sdunlock(dhdp); #endif /* DHDTHREAD */ return -ENETDOWN; } /* Start the watchdog timer */ dhd->pub.tickcnt = 0; dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms); /* Bring up the bus */ if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) { DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret)); #ifdef DHDTHREAD if (dhd->threads_only) dhd_os_sdunlock(dhdp); #endif /* DHDTHREAD */ return ret; } #if defined(OOB_INTR_ONLY) /* Host registration for OOB interrupt */ if (bcmsdh_register_oob_intr(dhdp)) { /* deactivate timer and wait for the handler to finish */ flags = dhd_os_spin_lock(&dhd->pub); dhd->wd_timer_valid = FALSE; dhd_os_spin_unlock(&dhd->pub, flags); del_timer_sync(&dhd->timer); DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__)); #ifdef DHDTHREAD if (dhd->threads_only) dhd_os_sdunlock(dhdp); #endif /* DHDTHREAD */ return -ENODEV; } /* Enable oob at firmware */ dhd_enable_oob_intr(dhd->pub.bus, TRUE); #endif /* defined(OOB_INTR_ONLY) */ /* If bus is not ready, can't come up */ if (dhd->pub.busstate != DHD_BUS_DATA) { flags = dhd_os_spin_lock(&dhd->pub); dhd->wd_timer_valid = FALSE; dhd_os_spin_unlock(&dhd->pub, flags); del_timer_sync(&dhd->timer); DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__)); #ifdef DHDTHREAD if (dhd->threads_only) dhd_os_sdunlock(dhdp); #endif /* DHDTHREAD */ return -ENODEV; } #ifdef DHDTHREAD if (dhd->threads_only) dhd_os_sdunlock(dhdp); #endif /* DHDTHREAD */ #ifdef READ_MACADDR dhd_read_macaddr(dhd); #endif /* Bus is ready, do any protocol initialization */ if ((ret = dhd_prot_init(&dhd->pub)) < 0) return ret; #ifdef WRITE_MACADDR dhd_write_macaddr(dhd->pub.mac.octet); #endif #ifdef ARP_OFFLOAD_SUPPORT if (dhd->pend_ipaddr) { #ifdef AOE_IP_ALIAS_SUPPORT aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE); #endif /* AOE_IP_ALIAS_SUPPORT */ dhd->pend_ipaddr = 0; } #endif /* ARP_OFFLOAD_SUPPORT */ return 0; } #if !defined(AP) && defined(WLP2P) && defined(WL_ENABLE_P2P_IF) /* For Android ICS MR2 release, the concurrent mode is enabled by default and the firmware * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware * would still be named as fw_bcmdhd_apsta. */ static u32 dhd_concurrent_fw(dhd_pub_t *dhd) { int ret = 0; char buf[WLC_IOCTL_SMLEN]; if ((!op_mode) && (strstr(fw_path, "_p2p") == NULL) && (strstr(fw_path, "_apsta") == NULL)) { /* Given path is for the STA firmware. Check whether P2P support is present in * the firmware. If so, set mode as P2P (concurrent support). */ memset(buf, 0, sizeof(buf)); bcm_mkiovar("p2p", 0, 0, buf, sizeof(buf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) { DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret)); } else if (buf[0] == 1) { DHD_TRACE(("%s: P2P is supported\n", __FUNCTION__)); return 1; } } return ret; } #endif /* * dhd_preinit_ioctls makes special pre-setting in the firmware before radio turns on * returns : 0 if all settings passed or negative value if anything failed */ int dhd_preinit_ioctls(dhd_pub_t *dhd) { int ret = 0; char eventmask[WL_EVENTING_MASK_LEN]; char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ #if !defined(WL_CFG80211) uint up = 0; #endif /* defined(WL_CFG80211) */ uint power_mode = PM_FAST; uint32 dongle_align = DHD_SDALIGN; uint32 glom = 0; uint bcn_timeout = DHD_BEACON_TIMEOUT_NORMAL; uint retry_max = 3; #if defined(ARP_OFFLOAD_SUPPORT) int arpoe = 1; #endif #if defined(KEEP_ALIVE) int res; #endif /* defined(KEEP_ALIVE) */ int scan_assoc_time = DHD_SCAN_ACTIVE_TIME; int scan_unassoc_time = 40; int scan_passive_time = DHD_SCAN_PASSIVE_TIME; char buf[WLC_IOCTL_SMLEN]; char *ptr; uint32 listen_interval = LISTEN_INTERVAL; /* Default Listen Interval in Beacons */ uint16 chipID; #if defined(SOFTAP) uint dtim = 1; #endif #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211)) uint32 mpc = 0; /* Turn MPC off for AP/APSTA mode */ #endif #if defined(AP) || defined(WLP2P) uint32 apsta = 1; /* Enable APSTA mode */ #endif /* defined(AP) || defined(WLP2P) */ #ifdef GET_CUSTOM_MAC_ENABLE struct ether_addr ea_addr; #endif /* GET_CUSTOM_MAC_ENABLE */ DHD_TRACE(("Enter %s\n", __FUNCTION__)); dhd->op_mode = 0; #ifdef GET_CUSTOM_MAC_ENABLE ret = dhd_custom_get_mac_address(ea_addr.octet); if (!ret) { memset(buf, 0, sizeof(buf)); bcm_mkiovar("cur_etheraddr", (void *)&ea_addr, ETHER_ADDR_LEN, buf, sizeof(buf)); ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); if (ret < 0) { DHD_ERROR(("%s: can't set custom MAC address , error=%d\n", __FUNCTION__, ret)); return BCME_NOTUP; } memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN); } else { #endif /* GET_CUSTOM_MAC_ENABLE */ /* Get the default device MAC address directly from firmware */ memset(buf, 0, sizeof(buf)); bcm_mkiovar("cur_etheraddr", 0, 0, buf, sizeof(buf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) { DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret)); return BCME_NOTUP; } /* Update public MAC address after reading from Firmware */ memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN); #ifdef GET_CUSTOM_MAC_ENABLE } #endif /* GET_CUSTOM_MAC_ENABLE */ #ifdef SET_RANDOM_MAC_SOFTAP if ((!op_mode && strstr(fw_path, "_apsta") != NULL) || (op_mode == HOSTAPD_MASK)) { uint rand_mac; srandom32((uint)jiffies); rand_mac = random32(); iovbuf[0] = 0x02; /* locally administered bit */ iovbuf[1] = 0x1A; iovbuf[2] = 0x11; iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0; iovbuf[4] = (unsigned char)(rand_mac >> 8); iovbuf[5] = (unsigned char)(rand_mac >> 16); bcm_mkiovar("cur_etheraddr", (void *)iovbuf, ETHER_ADDR_LEN, buf, sizeof(buf)); ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, sizeof(buf), TRUE, 0); if (ret < 0) { DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret)); } else memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN); } #endif /* SET_RANDOM_MAC_SOFTAP */ DHD_TRACE(("Firmware = %s\n", fw_path)); #if !defined(AP) && defined(WLP2P) /* Check if firmware with WFD support used */ #if defined(WL_ENABLE_P2P_IF) if ((ret = dhd_concurrent_fw(dhd)) < 0) { DHD_ERROR(("%s error : firmware can't support p2p mode\n", __FUNCTION__)); goto done; } #endif /* (WL_ENABLE_P2P_IF) */ if ((!op_mode && strstr(fw_path, "_p2p") != NULL) #if defined(WL_ENABLE_P2P_IF) || (op_mode == WFD_MASK) || (dhd_concurrent_fw(dhd) == 1) #endif ) { bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { DHD_ERROR(("%s APSTA setting failed ret= %d\n", __FUNCTION__, ret)); } else { dhd->op_mode |= WFD_MASK; #if !defined(WL_ENABLE_P2P_IF) /* ICS back capability : disable any packet filtering for p2p only mode */ dhd_pkt_filter_enable = FALSE; #endif /*!defined(WL_ENABLE_P2P_IF) */ } } #endif #if !defined(AP) && defined(WL_CFG80211) /* Check if firmware with HostAPD support used */ if ((!op_mode && strstr(fw_path, "_apsta") != NULL) || (op_mode == HOSTAPD_MASK)) { /* Disable A-band for HostAPD */ uint band = WLC_BAND_2G; if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_BAND, (char *)&band, sizeof(band), TRUE, 0)) < 0) { DHD_ERROR(("%s:set band failed error (%d)\n", __FUNCTION__, ret)); } /* Turn off wme if we are having only g ONLY firmware */ bcm_mkiovar("nmode", 0, 0, buf, sizeof(buf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) { DHD_ERROR(("%s:get nmode failed error (%d)\n", __FUNCTION__, ret)); } else { DHD_TRACE(("%s:get nmode returned %d\n", __FUNCTION__,buf[0])); } if (buf[0] == 0) { int wme = 0; bcm_mkiovar("wme", (char *)&wme, 4, iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { DHD_ERROR(("%s set wme for HostAPD failed %d\n", __FUNCTION__, ret)); } else { DHD_TRACE(("%s set wme succeeded for g ONLY firmware\n", __FUNCTION__)); } } /* Turn off MPC in AP mode */ bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { DHD_ERROR(("%s mpc for HostAPD failed %d\n", __FUNCTION__, ret)); } else { dhd->op_mode |= HOSTAPD_MASK; #if defined(ARP_OFFLOAD_SUPPORT) arpoe = 0; #endif /* (ARP_OFFLOAD_SUPPORT) */ /* disable any filtering for SoftAP mode */ dhd_pkt_filter_enable = FALSE; } } #endif #if !defined(WL_ENABLE_P2P_IF) /* ICS mode setting for sta */ if ((dhd->op_mode != WFD_MASK) && (dhd->op_mode != HOSTAPD_MASK)) { /* STA only operation mode */ dhd->op_mode |= STA_MASK; dhd_pkt_filter_enable = TRUE; } #endif /* !defined(WL_ENABLE_P2P_IF) */ DHD_ERROR(("Firmware up: op_mode=%d, " "Broadcom Dongle Host Driver mac=%.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", dhd->op_mode, dhd->mac.octet[0], dhd->mac.octet[1], dhd->mac.octet[2], dhd->mac.octet[3], dhd->mac.octet[4], dhd->mac.octet[5])); /* Set Country code */ if (dhd->dhd_cspec.ccode[0] != 0) { bcm_mkiovar("country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t), iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__)); } /* Set Listen Interval */ bcm_mkiovar("assoc_listen", (char *)&listen_interval, 4, iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret)); /* Set PowerSave mode */ dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0); /* Match Host and Dongle rx alignment */ bcm_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); /* disable glom option for some chips */ chipID = (uint16)dhd_bus_chip_id(dhd); if ((chipID == BCM4330_CHIP_ID) || (chipID == BCM4329_CHIP_ID)) { DHD_INFO(("%s disable glom for chipID=0x%X\n", __FUNCTION__, chipID)); bcm_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); } /* Setup timeout if Beacons are lost and roam is off to report link down */ bcm_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); /* Setup assoc_retry_max count to reconnect target AP in dongle */ bcm_mkiovar("assoc_retry_max", (char *)&retry_max, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); #if defined(AP) && !defined(WLP2P) /* Turn off MPC in AP mode */ bcm_mkiovar("mpc", (char *)&mpc, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); bcm_mkiovar("apsta", (char *)&apsta, 4, iovbuf, sizeof(iovbuf)); dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0); #endif /* defined(AP) && !defined(WLP2P) */ #if defined(SOFTAP) if (ap_fw_loaded == TRUE) { dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0); } #endif #if defined(KEEP_ALIVE) /* Set Keep Alive : be sure to use FW with -keepalive */ #if defined(SOFTAP) if (ap_fw_loaded == FALSE) #endif if ((res = dhd_keep_alive_onoff(dhd)) < 0) DHD_ERROR(("%s set keeplive failed %d\n", __FUNCTION__, res)); #endif /* defined(KEEP_ALIVE) */ /* Read event_msgs mask */ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0)) < 0) { DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret)); goto done; } bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN); /* Setup event_msgs */ setbit(eventmask, WLC_E_SET_SSID); setbit(eventmask, WLC_E_PRUNE); setbit(eventmask, WLC_E_AUTH); setbit(eventmask, WLC_E_REASSOC); setbit(eventmask, WLC_E_REASSOC_IND); setbit(eventmask, WLC_E_DEAUTH); setbit(eventmask, WLC_E_DEAUTH_IND); setbit(eventmask, WLC_E_DISASSOC_IND); setbit(eventmask, WLC_E_DISASSOC); setbit(eventmask, WLC_E_JOIN); setbit(eventmask, WLC_E_ASSOC_IND); setbit(eventmask, WLC_E_PSK_SUP); setbit(eventmask, WLC_E_LINK); setbit(eventmask, WLC_E_NDIS_LINK); setbit(eventmask, WLC_E_MIC_ERROR); setbit(eventmask, WLC_E_ASSOC_REQ_IE); setbit(eventmask, WLC_E_ASSOC_RESP_IE); setbit(eventmask, WLC_E_PMKID_CACHE); setbit(eventmask, WLC_E_TXFAIL); setbit(eventmask, WLC_E_JOIN_START); setbit(eventmask, WLC_E_SCAN_COMPLETE); #ifdef WLMEDIA_HTSF setbit(eventmask, WLC_E_HTSFSYNC); #endif /* WLMEDIA_HTSF */ #ifdef PNO_SUPPORT setbit(eventmask, WLC_E_PFN_NET_FOUND); #endif /* PNO_SUPPORT */ /* enable dongle roaming event */ setbit(eventmask, WLC_E_ROAM); #ifdef WL_CFG80211 setbit(eventmask, WLC_E_ESCAN_RESULT); if ((dhd->op_mode & WFD_MASK) == WFD_MASK) { setbit(eventmask, WLC_E_ACTION_FRAME_RX); setbit(eventmask, WLC_E_ACTION_FRAME_COMPLETE); setbit(eventmask, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE); setbit(eventmask, WLC_E_P2P_PROBREQ_MSG); setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE); } #endif /* WL_CFG80211 */ /* Write updated Event mask */ bcm_mkiovar("event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf, sizeof(iovbuf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, sizeof(iovbuf), TRUE, 0)) < 0) { DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret)); goto done; } dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time, sizeof(scan_assoc_time), TRUE, 0); dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time, sizeof(scan_unassoc_time), TRUE, 0); dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time, sizeof(scan_passive_time), TRUE, 0); #ifdef ARP_OFFLOAD_SUPPORT /* Set and enable ARP offload feature for STA only */ #if defined(SOFTAP) if (arpoe && !ap_fw_loaded) { #else if (arpoe) { #endif dhd_arp_offload_set(dhd, dhd_arp_mode); dhd_arp_offload_enable(dhd, arpoe); } else { dhd_arp_offload_set(dhd, 0); dhd_arp_offload_enable(dhd, FALSE); } #endif /* ARP_OFFLOAD_SUPPORT */ #ifdef PKT_FILTER_SUPPORT /* Setup defintions for pktfilter , enable in suspend */ dhd->pktfilter_count = 5; /* Setup filter to allow only unicast */ dhd->pktfilter[0] = "100 0 0 0 0x01 0x00"; dhd->pktfilter[1] = NULL; dhd->pktfilter[2] = NULL; dhd->pktfilter[3] = NULL; /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */ dhd->pktfilter[4] = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB"; #if defined(SOFTAP) if (ap_fw_loaded) { int i; for (i = 0; i < dhd->pktfilter_count; i++) { dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i], 0, dhd_master_mode); } } #endif /* defined(SOFTAP) */ #endif /* PKT_FILTER_SUPPORT */ #if !defined(WL_CFG80211) /* Force STA UP */ if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_UP, (char *)&up, sizeof(up), TRUE, 0)) < 0) { DHD_ERROR(("%s Setting WL UP failed %d\n", __FUNCTION__, ret)); goto done; } #endif /* query for 'ver' to get version info from firmware */ memset(buf, 0, sizeof(buf)); ptr = buf; bcm_mkiovar("ver", (char *)&buf, 4, buf, sizeof(buf)); if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, buf, sizeof(buf), FALSE, 0)) < 0) DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret)); else { bcmstrtok(&ptr, "\n", 0); /* Print fw version info */ DHD_ERROR(("Firmware version = %s\n", buf)); DHD_BLOG(buf, strlen(buf) + 1); DHD_BLOG(dhd_version, strlen(dhd_version) + 1); /* Check and adjust IOCTL response timeout for Manufactring firmware */ if (strstr(buf, MANUFACTRING_FW) != NULL) { dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT * 10); DHD_ERROR(("%s : adjust IOCTL response time for Manufactring Firmware\n", __FUNCTION__)); } } done: return ret; } int dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf, uint cmd_len, int set) { char buf[strlen(name) + 1 + cmd_len]; int len = sizeof(buf); wl_ioctl_t ioc; int ret; len = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len); memset(&ioc, 0, sizeof(ioc)); ioc.cmd = set? WLC_SET_VAR : WLC_GET_VAR; ioc.buf = buf; ioc.len = len; ioc.set = TRUE; ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len); if (!set && ret >= 0) memcpy(cmd_buf, buf, cmd_len); return ret; } int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx) { struct dhd_info *dhd = dhdp->info; struct net_device *dev = NULL; ASSERT(dhd && dhd->iflist[ifidx]); dev = dhd->iflist[ifidx]->net; ASSERT(dev); if (netif_running(dev)) { DHD_ERROR(("%s: Must be down to change its MTU", dev->name)); return BCME_NOTDOWN; } #define DHD_MIN_MTU 1500 #define DHD_MAX_MTU 1752 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) { DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu)); return BCME_BADARG; } dev->mtu = new_mtu; return 0; } #ifdef ARP_OFFLOAD_SUPPORT /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */ void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add) { u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */ int i; int ret; bzero(ipv4_buf, sizeof(ipv4_buf)); /* display what we've got */ ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf)); DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__)); #ifdef AOE_DBG dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ #endif /* now we saved hoste_ip table, clr it in the dongle AOE */ dhd_aoe_hostip_clr(dhd_pub); if (ret) { DHD_ERROR(("%s failed\n", __FUNCTION__)); return; } for (i = 0; i < MAX_IPV4_ENTRIES; i++) { if (add && (ipv4_buf[i] == 0)) { ipv4_buf[i] = ipa; add = FALSE; /* added ipa to local table */ DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n", __FUNCTION__, i)); } else if (ipv4_buf[i] == ipa) { ipv4_buf[i] = 0; DHD_ARPOE(("%s: removed IP:%x from temp table %d\n", __FUNCTION__, ipa, i)); } if (ipv4_buf[i] != 0) { /* add back host_ip entries from our local cache */ dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i]); DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n", __FUNCTION__, ipv4_buf[i], i)); } } #ifdef AOE_DBG /* see the resulting hostip table */ dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf)); DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__)); dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */ #endif } static int dhd_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; dhd_info_t *dhd; dhd_pub_t *dhd_pub; if (!ifa) return NOTIFY_DONE; dhd = *(dhd_info_t **)netdev_priv(ifa->ifa_dev->dev); dhd_pub = &dhd->pub; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) if (ifa->ifa_dev->dev->netdev_ops == &dhd_ops_pri) { #else if (ifa->ifa_dev->dev) { #endif switch (event) { case NETDEV_UP: DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n", __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); if (dhd->pub.busstate != DHD_BUS_DATA) { DHD_ERROR(("%s: bus not ready, exit\n", __FUNCTION__)); if (dhd->pend_ipaddr) { DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n", __FUNCTION__, dhd->pend_ipaddr)); } dhd->pend_ipaddr = ifa->ifa_address; break; } #ifdef AOE_IP_ALIAS_SUPPORT if (ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a) { DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n", __FUNCTION__)); aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE); } else aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE); #endif break; case NETDEV_DOWN: DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n", __FUNCTION__, ifa->ifa_label, ifa->ifa_address)); dhd->pend_ipaddr = 0; #ifdef AOE_IP_ALIAS_SUPPORT if (!(ifa->ifa_label[strlen(ifa->ifa_label)-2] == 0x3a)) { DHD_ARPOE(("%s: primary interface is down, AOE clr all\n", __FUNCTION__)); dhd_aoe_hostip_clr(&dhd->pub); dhd_aoe_arp_clr(&dhd->pub); } else aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE); #else dhd_aoe_hostip_clr(&dhd->pub); dhd_aoe_arp_clr(&dhd->pub); #endif break; default: DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n", __func__, ifa->ifa_label, event)); break; } } return NOTIFY_DONE; } #endif /* ARP_OFFLOAD_SUPPORT */ int dhd_net_attach(dhd_pub_t *dhdp, int ifidx) { dhd_info_t *dhd = (dhd_info_t *)dhdp->info; struct net_device *net = NULL; int err = 0; uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 }; DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx)); ASSERT(dhd && dhd->iflist[ifidx]); net = dhd->iflist[ifidx]->net; ASSERT(net); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) ASSERT(!net->open); net->get_stats = dhd_get_stats; net->do_ioctl = dhd_ioctl_entry; net->hard_start_xmit = dhd_start_xmit; net->set_mac_address = dhd_set_mac_address; net->set_multicast_list = dhd_set_multicast_list; net->open = net->stop = NULL; #else ASSERT(!net->netdev_ops); net->netdev_ops = &dhd_ops_virt; #endif /* Ok, link into the network layer... */ if (ifidx == 0) { /* * device functions for the primary interface only */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) net->open = dhd_open; net->stop = dhd_stop; #else net->netdev_ops = &dhd_ops_pri; #endif } else { /* * We have to use the primary MAC for virtual interfaces */ memcpy(temp_addr, dhd->iflist[ifidx]->mac_addr, ETHER_ADDR_LEN); /* * Android sets the locally administered bit to indicate that this is a * portable hotspot. This will not work in simultaneous AP/STA mode, * nor with P2P. Need to set the Donlge's MAC address, and then use that. */ if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr, ETHER_ADDR_LEN)) { DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n", __func__, net->name)); temp_addr[0] |= 0x02; } } net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) net->ethtool_ops = &dhd_ethtool_ops; #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) */ #if defined(WL_WIRELESS_EXT) #if WIRELESS_EXT < 19 net->get_wireless_stats = dhd_get_wireless_stats; #endif /* WIRELESS_EXT < 19 */ #if WIRELESS_EXT > 12 net->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def; #endif /* WIRELESS_EXT > 12 */ #endif /* defined(WL_WIRELESS_EXT) */ dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net); memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN); if ((err = register_netdev(net)) != 0) { DHD_ERROR(("couldn't register the net device, err %d\n", err)); goto fail; } printf("Broadcom Dongle Host Driver: register interface [%s]" " MAC: %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", net->name, net->dev_addr[0], net->dev_addr[1], net->dev_addr[2], net->dev_addr[3], net->dev_addr[4], net->dev_addr[5]); #if defined(SOFTAP) && defined(WL_WIRELESS_EXT) && !defined(WL_CFG80211) wl_iw_iscan_set_scan_broadcast_prep(net, 1); #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) if (ifidx == 0) { up(&dhd_registration_sem); } #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ return 0; fail: #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) net->open = NULL; #else net->netdev_ops = NULL; #endif return err; } void dhd_bus_detach(dhd_pub_t *dhdp) { dhd_info_t *dhd; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); if (dhdp) { dhd = (dhd_info_t *)dhdp->info; if (dhd) { /* * In case of Android cfg80211 driver, the bus is down in dhd_stop, * calling stop again will cuase SD read/write errors. */ if (dhd->pub.busstate != DHD_BUS_DOWN) { /* Stop the protocol module */ dhd_prot_stop(&dhd->pub); /* Stop the bus module */ dhd_bus_stop(dhd->pub.bus, TRUE); } #if defined(OOB_INTR_ONLY) bcmsdh_unregister_oob_intr(); #endif /* defined(OOB_INTR_ONLY) */ } } } void dhd_detach(dhd_pub_t *dhdp) { dhd_info_t *dhd; unsigned long flags; int timer_valid = FALSE; if (!dhdp) return; dhd = (dhd_info_t *)dhdp->info; if (!dhd) return; DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state)); if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) { /* Give sufficient time for threads to start running in case * dhd_attach() has failed */ osl_delay(1000*100); } #ifdef ARP_OFFLOAD_SUPPORT unregister_inetaddr_notifier(&dhd_notifier); #endif /* ARP_OFFLOAD_SUPPORT */ #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) { if (dhd->early_suspend.suspend) unregister_early_suspend(&dhd->early_suspend); } #endif /* defined(CONFIG_HAS_EARLYSUSPEND) */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) cancel_work_sync(&dhd->work_hang); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ #if defined(WL_WIRELESS_EXT) if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) { /* Detatch and unlink in the iw */ wl_iw_detach(); } #endif /* defined(WL_WIRELESS_EXT) */ if (dhd->thr_sysioc_ctl.thr_pid >= 0) { PROC_STOP(&dhd->thr_sysioc_ctl); } /* delete all interfaces, start with virtual */ if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) { int i = 1; dhd_if_t *ifp; /* Cleanup virtual interfaces */ for (i = 1; i < DHD_MAX_IFS; i++) { dhd_net_if_lock_local(dhd); if (dhd->iflist[i]) { dhd->iflist[i]->state = DHD_IF_DEL; dhd->iflist[i]->idx = i; dhd_op_if(dhd->iflist[i]); } dhd_net_if_unlock_local(dhd); } /* delete primary interface 0 */ ifp = dhd->iflist[0]; ASSERT(ifp); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)) if (ifp->net->open) #else if (ifp->net->netdev_ops == &dhd_ops_pri) #endif { if (ifp->net) { unregister_netdev(ifp->net); free_netdev(ifp->net); ifp->net = NULL; } MFREE(dhd->pub.osh, ifp, sizeof(*ifp)); dhd->iflist[0] = NULL; } } /* Clear the watchdog timer */ flags = dhd_os_spin_lock(&dhd->pub); timer_valid = dhd->wd_timer_valid; dhd->wd_timer_valid = FALSE; dhd_os_spin_unlock(&dhd->pub, flags); if (timer_valid) del_timer_sync(&dhd->timer); if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) { #ifdef DHDTHREAD if (dhd->thr_wdt_ctl.thr_pid >= 0) { PROC_STOP(&dhd->thr_wdt_ctl); } if (dhd->thr_dpc_ctl.thr_pid >= 0) { PROC_STOP(&dhd->thr_dpc_ctl); } else #endif /* DHDTHREAD */ tasklet_kill(&dhd->tasklet); } if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) { dhd_bus_detach(dhdp); if (dhdp->prot) dhd_prot_detach(dhdp); } #ifdef WL_CFG80211 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) { wl_cfg80211_detach(NULL); dhd_monitor_uninit(); } #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) unregister_pm_notifier(&dhd_sleep_pm_notifier); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP) */ if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) { #ifdef CONFIG_HAS_WAKELOCK wake_lock_destroy(&dhd->wl_wifi); wake_lock_destroy(&dhd->wl_rxwake); wake_lock_destroy(&dhd->wl_ctrlwake); #endif } } void dhd_free(dhd_pub_t *dhdp) { dhd_info_t *dhd; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); if (dhdp) { dhd = (dhd_info_t *)dhdp->info; if (dhd) MFREE(dhd->pub.osh, dhd, sizeof(*dhd)); } } static void __exit dhd_module_cleanup(void) { DHD_TRACE(("%s: Enter\n", __FUNCTION__)); dhd_bus_unregister(); #if defined(CONFIG_WIFI_CONTROL_FUNC) wl_android_wifictrl_func_del(); #endif /* CONFIG_WIFI_CONTROL_FUNC */ wl_android_exit(); /* Call customer gpio to turn off power with WL_REG_ON signal */ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF); } static int __init dhd_module_init(void) { int error = 0; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); wl_android_init(); #ifdef DHDTHREAD /* Sanity check on the module parameters */ do { /* Both watchdog and DPC as tasklets are ok */ if ((dhd_watchdog_prio < 0) && (dhd_dpc_prio < 0)) break; /* If both watchdog and DPC are threads, TX must be deferred */ if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0) && dhd_deferred_tx) break; DHD_ERROR(("Invalid module parameters.\n")); return -EINVAL; } while (0); #endif /* DHDTHREAD */ /* Call customer gpio to turn on power with WL_REG_ON signal */ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_ON); #if defined(CONFIG_WIFI_CONTROL_FUNC) if (wl_android_wifictrl_func_add() < 0) goto fail_1; #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) sema_init(&dhd_registration_sem, 0); #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ error = dhd_bus_register(); if (!error) printf("\n%s\n", dhd_version); else { DHD_ERROR(("%s: sdio_register_driver failed\n", __FUNCTION__)); goto fail_1; } #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) /* * Wait till MMC sdio_register_driver callback called and made driver attach. * It's needed to make sync up exit from dhd insmod and * Kernel MMC sdio device callback registration */ if (down_timeout(&dhd_registration_sem, msecs_to_jiffies(DHD_REGISTRATION_TIMEOUT)) != 0) { error = -ENODEV; DHD_ERROR(("%s: sdio_register_driver timeout\n", __FUNCTION__)); goto fail_2; } #endif #if defined(WL_CFG80211) wl_android_post_init(); #endif /* defined(WL_CFG80211) */ return error; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) fail_2: dhd_bus_unregister(); #endif fail_1: #if defined(CONFIG_WIFI_CONTROL_FUNC) wl_android_wifictrl_func_del(); #endif /* Call customer gpio to turn off power with WL_REG_ON signal */ dhd_customer_gpio_wlan_ctrl(WLAN_POWER_OFF); return error; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) late_initcall(dhd_module_init); #else module_init(dhd_module_init); #endif module_exit(dhd_module_cleanup); /* * OS specific functions required to implement DHD driver in OS independent way */ int dhd_os_proto_block(dhd_pub_t *pub) { dhd_info_t * dhd = (dhd_info_t *)(pub->info); if (dhd) { down(&dhd->proto_sem); return 1; } return 0; } int dhd_os_proto_unblock(dhd_pub_t *pub) { dhd_info_t * dhd = (dhd_info_t *)(pub->info); if (dhd) { up(&dhd->proto_sem); return 1; } return 0; } unsigned int dhd_os_get_ioctl_resp_timeout(void) { return ((unsigned int)dhd_ioctl_timeout_msec); } void dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec) { dhd_ioctl_timeout_msec = (int)timeout_msec; } int dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition, bool *pending) { dhd_info_t * dhd = (dhd_info_t *)(pub->info); int timeout = dhd_ioctl_timeout_msec; /* Convert timeout in millsecond to jiffies */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) timeout = msecs_to_jiffies(timeout); #else timeout = timeout * HZ / 1000; #endif timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout); return timeout; } int dhd_os_ioctl_resp_wake(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); if (waitqueue_active(&dhd->ioctl_resp_wait)) { wake_up(&dhd->ioctl_resp_wait); } return 0; } void dhd_os_wd_timer(void *bus, uint wdtick) { dhd_pub_t *pub = bus; dhd_info_t *dhd = (dhd_info_t *)pub->info; unsigned long flags; DHD_TRACE(("%s: Enter\n", __FUNCTION__)); flags = dhd_os_spin_lock(pub); /* don't start the wd until fw is loaded */ if (pub->busstate == DHD_BUS_DOWN) { dhd_os_spin_unlock(pub, flags); return; } /* Totally stop the timer */ if (!wdtick && dhd->wd_timer_valid == TRUE) { dhd->wd_timer_valid = FALSE; dhd_os_spin_unlock(pub, flags); #ifdef DHDTHREAD del_timer_sync(&dhd->timer); #else del_timer(&dhd->timer); #endif /* DHDTHREAD */ return; } if (wdtick) { dhd_watchdog_ms = (uint)wdtick; /* Re arm the timer, at last watchdog period */ mod_timer(&dhd->timer, jiffies + dhd_watchdog_ms * HZ / 1000); dhd->wd_timer_valid = TRUE; } dhd_os_spin_unlock(pub, flags); } void * dhd_os_open_image(char *filename) { struct file *fp; fp = filp_open(filename, O_RDONLY, 0); /* * 2.6.11 (FC4) supports filp_open() but later revs don't? * Alternative: * fp = open_namei(AT_FDCWD, filename, O_RD, 0); * ??? */ if (IS_ERR(fp)) fp = NULL; return fp; } int dhd_os_get_image_block(char *buf, int len, void *image) { struct file *fp = (struct file *)image; int rdlen; if (!image) return 0; rdlen = kernel_read(fp, fp->f_pos, buf, len); if (rdlen > 0) fp->f_pos += rdlen; return rdlen; } void dhd_os_close_image(void *image) { if (image) filp_close((struct file *)image, NULL); } void dhd_os_sdlock(dhd_pub_t *pub) { dhd_info_t *dhd; dhd = (dhd_info_t *)(pub->info); #ifdef DHDTHREAD if (dhd->threads_only) down(&dhd->sdsem); else #endif /* DHDTHREAD */ spin_lock_bh(&dhd->sdlock); } void dhd_os_sdunlock(dhd_pub_t *pub) { dhd_info_t *dhd; dhd = (dhd_info_t *)(pub->info); #ifdef DHDTHREAD if (dhd->threads_only) up(&dhd->sdsem); else #endif /* DHDTHREAD */ spin_unlock_bh(&dhd->sdlock); } void dhd_os_sdlock_txq(dhd_pub_t *pub) { dhd_info_t *dhd; dhd = (dhd_info_t *)(pub->info); spin_lock_bh(&dhd->txqlock); } void dhd_os_sdunlock_txq(dhd_pub_t *pub) { dhd_info_t *dhd; dhd = (dhd_info_t *)(pub->info); spin_unlock_bh(&dhd->txqlock); } void dhd_os_sdlock_rxq(dhd_pub_t *pub) { } void dhd_os_sdunlock_rxq(dhd_pub_t *pub) { } void dhd_os_sdtxlock(dhd_pub_t *pub) { dhd_os_sdlock(pub); } void dhd_os_sdtxunlock(dhd_pub_t *pub) { dhd_os_sdunlock(pub); } #if defined(CONFIG_DHD_USE_STATIC_BUF) uint8* dhd_os_prealloc(void *osh, int section, uint size) { return (uint8*)wl_android_prealloc(section, size); } void dhd_os_prefree(void *osh, void *addr, uint size) { } #endif /* defined(CONFIG_DHD_USE_STATIC_BUF) */ #if defined(WL_WIRELESS_EXT) struct iw_statistics * dhd_get_wireless_stats(struct net_device *dev) { int res = 0; dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); if (!dhd->pub.up) { return NULL; } res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats); if (res == 0) return &dhd->iw.wstats; else return NULL; } #endif /* defined(WL_WIRELESS_EXT) */ static int dhd_wl_host_event(dhd_info_t *dhd, int *ifidx, void *pktdata, wl_event_msg_t *event, void **data) { int bcmerror = 0; ASSERT(dhd != NULL); bcmerror = wl_host_event(&dhd->pub, ifidx, pktdata, event, data); if (bcmerror != BCME_OK) return (bcmerror); #if defined(WL_WIRELESS_EXT) if (event->bsscfgidx == 0) { /* * Wireless ext is on primary interface only */ ASSERT(dhd->iflist[*ifidx] != NULL); ASSERT(dhd->iflist[*ifidx]->net != NULL); if (dhd->iflist[*ifidx]->net) { wl_iw_event(dhd->iflist[*ifidx]->net, event, *data); } } #endif /* defined(WL_WIRELESS_EXT) */ #ifdef WL_CFG80211 if ((ntoh32(event->event_type) == WLC_E_IF) && (((dhd_if_event_t *)*data)->action == WLC_E_IF_ADD)) /* If ADD_IF has been called directly by wl utility then we * should not report this. In case if ADD_IF was called from * CFG stack, then too this event need not be reported back */ return (BCME_OK); if ((wl_cfg80211_is_progress_ifchange() || wl_cfg80211_is_progress_ifadd()) && (*ifidx != 0)) { /* * If IF_ADD/CHANGE operation is going on, * discard any event received on the virtual I/F */ return (BCME_OK); } ASSERT(dhd->iflist[*ifidx] != NULL); ASSERT(dhd->iflist[*ifidx]->net != NULL); if (dhd->iflist[*ifidx]->net) { wl_cfg80211_event(dhd->iflist[*ifidx]->net, event, *data); } #endif /* defined(WL_CFG80211) */ return (bcmerror); } /* send up locally generated event */ void dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data) { switch (ntoh32(event->event_type)) { /* Send up locally generated AMP HCI Events */ case WLC_E_BTA_HCI_EVENT: { struct sk_buff *p, *skb; bcm_event_t *msg; wl_event_msg_t *p_bcm_event; char *ptr; uint32 len; uint32 pktlen; dhd_if_t *ifp; dhd_info_t *dhd; uchar *eth; int ifidx; len = ntoh32(event->datalen); pktlen = sizeof(bcm_event_t) + len + 2; dhd = dhdp->info; ifidx = dhd_ifname2idx(dhd, event->ifname); if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) { ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32))); msg = (bcm_event_t *) PKTDATA(dhdp->osh, p); bcopy(&dhdp->mac, &msg->eth.ether_dhost, ETHER_ADDR_LEN); bcopy(&dhdp->mac, &msg->eth.ether_shost, ETHER_ADDR_LEN); ETHER_TOGGLE_LOCALADDR(&msg->eth.ether_shost); msg->eth.ether_type = hton16(ETHER_TYPE_BRCM); /* BCM Vendor specific header... */ msg->bcm_hdr.subtype = hton16(BCMILCP_SUBTYPE_VENDOR_LONG); msg->bcm_hdr.version = BCMILCP_BCM_SUBTYPEHDR_VERSION; bcopy(BRCM_OUI, &msg->bcm_hdr.oui[0], DOT11_OUI_LEN); /* vendor spec header length + pvt data length (private indication * hdr + actual message itself) */ msg->bcm_hdr.length = hton16(BCMILCP_BCM_SUBTYPEHDR_MINLENGTH + BCM_MSG_LEN + sizeof(wl_event_msg_t) + (uint16)len); msg->bcm_hdr.usr_subtype = hton16(BCMILCP_BCM_SUBTYPE_EVENT); PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2)); /* copy wl_event_msg_t into sk_buf */ /* pointer to wl_event_msg_t in sk_buf */ p_bcm_event = &msg->event; bcopy(event, p_bcm_event, sizeof(wl_event_msg_t)); /* copy hci event into sk_buf */ bcopy(data, (p_bcm_event + 1), len); msg->bcm_hdr.length = hton16(sizeof(wl_event_msg_t) + ntoh16(msg->bcm_hdr.length)); PKTSETLEN(dhdp->osh, p, (sizeof(bcm_event_t) + len + 2)); ptr = (char *)(msg + 1); /* Last 2 bytes of the message are 0x00 0x00 to signal that there * are no ethertypes which are following this */ ptr[len+0] = 0x00; ptr[len+1] = 0x00; skb = PKTTONATIVE(dhdp->osh, p); eth = skb->data; len = skb->len; ifp = dhd->iflist[ifidx]; if (ifp == NULL) ifp = dhd->iflist[0]; ASSERT(ifp); skb->dev = ifp->net; skb->protocol = eth_type_trans(skb, skb->dev); skb->data = eth; skb->len = len; /* Strip header, count, deliver upward */ skb_pull(skb, ETH_HLEN); /* Send the packet */ if (in_interrupt()) { netif_rx(skb); } else { netif_rx_ni(skb); } } else { /* Could not allocate a sk_buf */ DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__)); } break; } /* case WLC_E_BTA_HCI_EVENT */ default: break; } } void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) struct dhd_info *dhdinfo = dhd->info; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) int timeout = msecs_to_jiffies(2000); #else int timeout = 2 * HZ; #endif dhd_os_sdunlock(dhd); wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout); dhd_os_sdlock(dhd); #endif return; } void dhd_wait_event_wakeup(dhd_pub_t *dhd) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) struct dhd_info *dhdinfo = dhd->info; if (waitqueue_active(&dhdinfo->ctrl_wait)) wake_up(&dhdinfo->ctrl_wait); #endif return; } int dhd_dev_reset(struct net_device *dev, uint8 flag) { int ret; dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); ret = dhd_bus_devreset(&dhd->pub, flag); if (ret) { DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret)); return ret; } return ret; } int net_os_set_suspend_disable(struct net_device *dev, int val) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); int ret = 0; if (dhd) { ret = dhd->pub.suspend_disable_flag; dhd->pub.suspend_disable_flag = val; } return ret; } int net_os_set_suspend(struct net_device *dev, int val, int force) { int ret = 0; dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); if (dhd) { #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) ret = dhd_set_suspend(val, &dhd->pub); #else ret = dhd_suspend_resume_helper(dhd, val, force); #endif } return ret; } int net_os_set_dtim_skip(struct net_device *dev, int val) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); if (dhd) dhd->pub.dtim_skip = val; return 0; } int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); char *filterp = NULL; int ret = 0; if (!dhd || (num == DHD_UNICAST_FILTER_NUM) || (num == DHD_MDNS_FILTER_NUM)) return ret; if (num >= dhd->pub.pktfilter_count) return -EINVAL; if (add_remove) { switch (num) { case DHD_BROADCAST_FILTER_NUM: filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF"; break; case DHD_MULTICAST4_FILTER_NUM: filterp = "102 0 0 0 0xFFFFFF 0x01005E"; break; case DHD_MULTICAST6_FILTER_NUM: filterp = "103 0 0 0 0xFFFF 0x3333"; break; default: return -EINVAL; } } dhd->pub.pktfilter[num] = filterp; return ret; } int dhd_os_set_packet_filter(dhd_pub_t *dhdp, int val) { int ret = 0; /* Packet filtering is set only if we still in early-suspend and * we need either to turn it ON or turn it OFF * We can always turn it OFF in case of early-suspend, but we turn it * back ON only if suspend_disable_flag was not set */ if (dhdp && dhdp->up) { if (dhdp->in_suspend) { if (!val || (val && !dhdp->suspend_disable_flag)) dhd_set_packet_filter(val, dhdp); } } return ret; } int net_os_set_packet_filter(struct net_device *dev, int val) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); return dhd_os_set_packet_filter(&dhd->pub, val); } int dhd_dev_init_ioctl(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); return dhd_preinit_ioctls(&dhd->pub); } #ifdef PNO_SUPPORT /* Linux wrapper to call common dhd_pno_clean */ int dhd_dev_pno_reset(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); return (dhd_pno_clean(&dhd->pub)); } /* Linux wrapper to call common dhd_pno_enable */ int dhd_dev_pno_enable(struct net_device *dev, int pfn_enabled) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); return (dhd_pno_enable(&dhd->pub, pfn_enabled)); } /* Linux wrapper to call common dhd_pno_set */ int dhd_dev_pno_set(struct net_device *dev, wlc_ssid_t* ssids_local, int nssid, ushort scan_fr, int pno_repeat, int pno_freq_expo_max) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); return (dhd_pno_set(&dhd->pub, ssids_local, nssid, scan_fr, pno_repeat, pno_freq_expo_max)); } /* Linux wrapper to call common dhd_pno_set_ex */ int dhd_dev_pno_set_ex(struct net_device *dev, wl_pfn_t* ssidnet, int nssid, ushort pno_interval, int pno_repeat, int pno_expo_max, int pno_lost_time) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); return (dhd_pno_set_ex(&dhd->pub, ssidnet, nssid, pno_interval, pno_repeat, pno_expo_max, pno_lost_time)); } /* Linux wrapper to get pno status */ int dhd_dev_get_pno_status(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); return (dhd_pno_get_status(&dhd->pub)); } #endif /* PNO_SUPPORT */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) static void dhd_hang_process(struct work_struct *work) { dhd_info_t *dhd; struct net_device *dev; dhd = (dhd_info_t *)container_of(work, dhd_info_t, work_hang); dev = dhd->iflist[0]->net; if (dev) { rtnl_lock(); dev_close(dev); rtnl_unlock(); #if defined(WL_WIRELESS_EXT) wl_iw_send_priv_event(dev, "HANG"); #endif #if defined(WL_CFG80211) wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED); #endif } } int net_os_send_hang_message(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); int ret = 0; if (dhd) { if (!dhd->pub.hang_was_sent) { dhd->pub.hang_was_sent = 1; schedule_work(&dhd->work_hang); } } return ret; } #endif void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); if (dhd && dhd->pub.up) memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t)); } void dhd_net_if_lock(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); dhd_net_if_lock_local(dhd); } void dhd_net_if_unlock(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); dhd_net_if_unlock_local(dhd); } static void dhd_net_if_lock_local(dhd_info_t *dhd) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) if (dhd) mutex_lock(&dhd->dhd_net_if_mutex); #endif } static void dhd_net_if_unlock_local(dhd_info_t *dhd) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) if (dhd) mutex_unlock(&dhd->dhd_net_if_mutex); #endif } static void dhd_suspend_lock(dhd_pub_t *pub) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) dhd_info_t *dhd = (dhd_info_t *)(pub->info); if (dhd) mutex_lock(&dhd->dhd_suspend_mutex); #endif } static void dhd_suspend_unlock(dhd_pub_t *pub) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)) dhd_info_t *dhd = (dhd_info_t *)(pub->info); if (dhd) mutex_unlock(&dhd->dhd_suspend_mutex); #endif } unsigned long dhd_os_spin_lock(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags = 0; if (dhd) spin_lock_irqsave(&dhd->dhd_lock, flags); return flags; } void dhd_os_spin_unlock(dhd_pub_t *pub, unsigned long flags) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); if (dhd) spin_unlock_irqrestore(&dhd->dhd_lock, flags); } static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd) { return (atomic_read(&dhd->pend_8021x_cnt)); } #define MAX_WAIT_FOR_8021X_TX 10 int dhd_wait_pend8021x(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); int timeout = 10 * HZ / 1000; int ntimes = MAX_WAIT_FOR_8021X_TX; int pend = dhd_get_pend_8021x_cnt(dhd); while (ntimes && pend) { if (pend) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(timeout); set_current_state(TASK_RUNNING); ntimes--; } pend = dhd_get_pend_8021x_cnt(dhd); } return pend; } #ifdef DHD_DEBUG int write_to_file(dhd_pub_t *dhd, uint8 *buf, int size) { int ret = 0; struct file *fp; mm_segment_t old_fs; loff_t pos = 0; /* change to KERNEL_DS address limit */ old_fs = get_fs(); set_fs(KERNEL_DS); /* open file to write */ fp = filp_open("/tmp/mem_dump", O_WRONLY|O_CREAT, 0640); if (!fp) { printf("%s: open file error\n", __FUNCTION__); ret = -1; goto exit; } /* Write buf to file */ fp->f_op->write(fp, buf, size, &pos); exit: /* free buf before return */ MFREE(dhd->osh, buf, size); /* close file before return */ if (fp) filp_close(fp, current->files); /* restore previous address limit */ set_fs(old_fs); return ret; } #endif /* DHD_DEBUG */ int dhd_os_wake_lock_timeout(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags; int ret = 0; if (dhd) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ? dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable; #ifdef CONFIG_HAS_WAKELOCK if (dhd->wakelock_rx_timeout_enable) wake_lock_timeout(&dhd->wl_rxwake, msecs_to_jiffies(dhd->wakelock_rx_timeout_enable)); if (dhd->wakelock_ctrl_timeout_enable) wake_lock_timeout(&dhd->wl_ctrlwake, msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable)); #endif dhd->wakelock_rx_timeout_enable = 0; dhd->wakelock_ctrl_timeout_enable = 0; spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } return ret; } int net_os_wake_lock_timeout(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); int ret = 0; if (dhd) ret = dhd_os_wake_lock_timeout(&dhd->pub); return ret; } int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags; if (dhd) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); if (val > dhd->wakelock_rx_timeout_enable) dhd->wakelock_rx_timeout_enable = val; spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } return 0; } int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags; if (dhd) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); if (val > dhd->wakelock_ctrl_timeout_enable) dhd->wakelock_ctrl_timeout_enable = val; spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } return 0; } int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); int ret = 0; if (dhd) ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val); return ret; } int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); int ret = 0; if (dhd) ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val); return ret; } int dhd_os_wake_lock(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags; int ret = 0; if (dhd) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); #ifdef CONFIG_HAS_WAKELOCK if (!dhd->wakelock_counter) wake_lock(&dhd->wl_wifi); #endif dhd->wakelock_counter++; ret = dhd->wakelock_counter; spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } return ret; } int net_os_wake_lock(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); int ret = 0; if (dhd) ret = dhd_os_wake_lock(&dhd->pub); return ret; } int dhd_os_wake_unlock(dhd_pub_t *pub) { dhd_info_t *dhd = (dhd_info_t *)(pub->info); unsigned long flags; int ret = 0; dhd_os_wake_lock_timeout(pub); if (dhd) { spin_lock_irqsave(&dhd->wakelock_spinlock, flags); if (dhd->wakelock_counter) { dhd->wakelock_counter--; #ifdef CONFIG_HAS_WAKELOCK if (!dhd->wakelock_counter) wake_unlock(&dhd->wl_wifi); #endif ret = dhd->wakelock_counter; } spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags); } return ret; } int dhd_os_check_wakelock(void *dhdp) { #ifdef CONFIG_HAS_WAKELOCK dhd_pub_t *pub = (dhd_pub_t *)dhdp; dhd_info_t *dhd; if (!pub) return 0; dhd = (dhd_info_t *)(pub->info); if (dhd && wake_lock_active(&dhd->wl_wifi)) return 1; #endif return 0; } int net_os_wake_unlock(struct net_device *dev) { dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev); int ret = 0; if (dhd) ret = dhd_os_wake_unlock(&dhd->pub); return ret; } int dhd_os_check_if_up(void *dhdp) { dhd_pub_t *pub = (dhd_pub_t *)dhdp; if (!pub) return 0; return pub->up; } int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd) { int ifidx; int ret = 0; dhd_info_t *dhd = NULL; if (!net || !netdev_priv(net)) { DHD_ERROR(("%s invalid parameter\n", __FUNCTION__)); return -EINVAL; } dhd = *(dhd_info_t **)netdev_priv(net); ifidx = dhd_net2idx(dhd, net); if (ifidx == DHD_BAD_IF) { DHD_ERROR(("%s bad ifidx\n", __FUNCTION__)); return -ENODEV; } DHD_OS_WAKE_LOCK(&dhd->pub); ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len); dhd_check_hang(net, &dhd->pub, ret); DHD_OS_WAKE_UNLOCK(&dhd->pub); return ret; } bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret) { struct net_device *net; net = dhd_idx2net(dhdp, ifidx); return dhd_check_hang(net, dhdp, ret); } #ifdef PROP_TXSTATUS extern int dhd_wlfc_interface_entry_update(void* state, ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea); extern int dhd_wlfc_FIFOcreditmap_update(void* state, uint8* credits); int dhd_wlfc_interface_event(struct dhd_info *dhd, ewlfc_mac_entry_action_t action, uint8 ifid, uint8 iftype, uint8* ea) { if (dhd->pub.wlfc_state == NULL) return BCME_OK; return dhd_wlfc_interface_entry_update(dhd->pub.wlfc_state, action, ifid, iftype, ea); } int dhd_wlfc_FIFOcreditmap_event(struct dhd_info *dhd, uint8* event_data) { if (dhd->pub.wlfc_state == NULL) return BCME_OK; return dhd_wlfc_FIFOcreditmap_update(dhd->pub.wlfc_state, event_data); } int dhd_wlfc_event(struct dhd_info *dhd) { return dhd_wlfc_enable(&dhd->pub); } #endif /* PROP_TXSTATUS */ #ifdef BCMDBGFS #include <linux/debugfs.h> extern uint32 dhd_readregl(void *bp, uint32 addr); extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data); typedef struct dhd_dbgfs { struct dentry *debugfs_dir; struct dentry *debugfs_mem; dhd_pub_t *dhdp; uint32 size; } dhd_dbgfs_t; dhd_dbgfs_t g_dbgfs; static int dhd_dbg_state_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static ssize_t dhd_dbg_state_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos) { ssize_t rval; uint32 tmp; loff_t pos = *ppos; size_t ret; if (pos < 0) return -EINVAL; if (pos >= g_dbgfs.size || !count) return 0; if (count > g_dbgfs.size - pos) count = g_dbgfs.size - pos; /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */ tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3)); ret = copy_to_user(ubuf, &tmp, 4); if (ret == count) return -EFAULT; count -= ret; *ppos = pos + count; rval = count; return rval; } static ssize_t dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { loff_t pos = *ppos; size_t ret; uint32 buf; if (pos < 0) return -EINVAL; if (pos >= g_dbgfs.size || !count) return 0; if (count > g_dbgfs.size - pos) count = g_dbgfs.size - pos; ret = copy_from_user(&buf, ubuf, sizeof(uint32)); if (ret == count) return -EFAULT; /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */ dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf); return count; } loff_t dhd_debugfs_lseek(struct file *file, loff_t off, int whence) { loff_t pos = -1; switch (whence) { case 0: pos = off; break; case 1: pos = file->f_pos + off; break; case 2: pos = g_dbgfs.size - off; } return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos); } static const struct file_operations dhd_dbg_state_ops = { .read = dhd_dbg_state_read, .write = dhd_debugfs_write, .open = dhd_dbg_state_open, .llseek = dhd_debugfs_lseek }; static void dhd_dbg_create(void) { if (g_dbgfs.debugfs_dir) { g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir, NULL, &dhd_dbg_state_ops); } } void dhd_dbg_init(dhd_pub_t *dhdp) { int err; g_dbgfs.dhdp = dhdp; g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */ g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0); if (IS_ERR(g_dbgfs.debugfs_dir)) { err = PTR_ERR(g_dbgfs.debugfs_dir); g_dbgfs.debugfs_dir = NULL; return; } dhd_dbg_create(); return; } void dhd_dbg_remove(void) { debugfs_remove(g_dbgfs.debugfs_mem); debugfs_remove(g_dbgfs.debugfs_dir); bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs)); } #endif /* ifdef BCMDBGFS */ #ifdef WLMEDIA_HTSF static void dhd_htsf_addtxts(dhd_pub_t *dhdp, void *pktbuf) { dhd_info_t *dhd = (dhd_info_t *)(dhdp->info); struct sk_buff *skb; uint32 htsf = 0; uint16 dport = 0, oldmagic = 0xACAC; char *p1; htsfts_t ts; /* timestamp packet */ p1 = (char*) PKTDATA(dhdp->osh, pktbuf); if (PKTLEN(dhdp->osh, pktbuf) > HTSF_MINLEN) { /* memcpy(&proto, p1+26, 4); */ memcpy(&dport, p1+40, 2); /* proto = ((ntoh32(proto))>> 16) & 0xFF; */ dport = ntoh16(dport); } /* timestamp only if icmp or udb iperf with port 5555 */ /* if (proto == 17 && dport == tsport) { */ if (dport >= tsport && dport <= tsport + 20) { skb = (struct sk_buff *) pktbuf; htsf = dhd_get_htsf(dhd, 0); memset(skb->data + 44, 0, 2); /* clear checksum */ memcpy(skb->data+82, &oldmagic, 2); memcpy(skb->data+84, &htsf, 4); memset(&ts, 0, sizeof(htsfts_t)); ts.magic = HTSFMAGIC; ts.prio = PKTPRIO(pktbuf); ts.seqnum = htsf_seqnum++; ts.c10 = get_cycles(); ts.t10 = htsf; ts.endmagic = HTSFENDMAGIC; memcpy(skb->data + HTSF_HOSTOFFSET, &ts, sizeof(ts)); } } static void dhd_dump_htsfhisto(histo_t *his, char *s) { int pktcnt = 0, curval = 0, i; for (i = 0; i < (NUMBIN-2); i++) { curval += 500; printf("%d ", his->bin[i]); pktcnt += his->bin[i]; } printf(" max: %d TotPkt: %d neg: %d [%s]\n", his->bin[NUMBIN-2], pktcnt, his->bin[NUMBIN-1], s); } static void sorttobin(int value, histo_t *histo) { int i, binval = 0; if (value < 0) { histo->bin[NUMBIN-1]++; return; } if (value > histo->bin[NUMBIN-2]) /* store the max value */ histo->bin[NUMBIN-2] = value; for (i = 0; i < (NUMBIN-2); i++) { binval += 500; /* 500m s bins */ if (value <= binval) { histo->bin[i]++; return; } } histo->bin[NUMBIN-3]++; } static void dhd_htsf_addrxts(dhd_pub_t *dhdp, void *pktbuf) { dhd_info_t *dhd = (dhd_info_t *)dhdp->info; struct sk_buff *skb; char *p1; uint16 old_magic; int d1, d2, d3, end2end; htsfts_t *htsf_ts; uint32 htsf; skb = PKTTONATIVE(dhdp->osh, pktbuf); p1 = (char*)PKTDATA(dhdp->osh, pktbuf); if (PKTLEN(osh, pktbuf) > HTSF_MINLEN) { memcpy(&old_magic, p1+78, 2); htsf_ts = (htsfts_t*) (p1 + HTSF_HOSTOFFSET - 4); } else return; if (htsf_ts->magic == HTSFMAGIC) { htsf_ts->tE0 = dhd_get_htsf(dhd, 0); htsf_ts->cE0 = get_cycles(); } if (old_magic == 0xACAC) { tspktcnt++; htsf = dhd_get_htsf(dhd, 0); memcpy(skb->data+92, &htsf, sizeof(uint32)); memcpy(&ts[tsidx].t1, skb->data+80, 16); d1 = ts[tsidx].t2 - ts[tsidx].t1; d2 = ts[tsidx].t3 - ts[tsidx].t2; d3 = ts[tsidx].t4 - ts[tsidx].t3; end2end = ts[tsidx].t4 - ts[tsidx].t1; sorttobin(d1, &vi_d1); sorttobin(d2, &vi_d2); sorttobin(d3, &vi_d3); sorttobin(end2end, &vi_d4); if (end2end > 0 && end2end > maxdelay) { maxdelay = end2end; maxdelaypktno = tspktcnt; memcpy(&maxdelayts, &ts[tsidx], 16); } if (++tsidx >= TSMAX) tsidx = 0; } } uint32 dhd_get_htsf(dhd_info_t *dhd, int ifidx) { uint32 htsf = 0, cur_cycle, delta, delta_us; uint32 factor, baseval, baseval2; cycles_t t; t = get_cycles(); cur_cycle = t; if (cur_cycle > dhd->htsf.last_cycle) delta = cur_cycle - dhd->htsf.last_cycle; else { delta = cur_cycle + (0xFFFFFFFF - dhd->htsf.last_cycle); } delta = delta >> 4; if (dhd->htsf.coef) { /* times ten to get the first digit */ factor = (dhd->htsf.coef*10 + dhd->htsf.coefdec1); baseval = (delta*10)/factor; baseval2 = (delta*10)/(factor+1); delta_us = (baseval - (((baseval - baseval2) * dhd->htsf.coefdec2)) / 10); htsf = (delta_us << 4) + dhd->htsf.last_tsf + HTSF_BUS_DELAY; } else { DHD_ERROR(("-------dhd->htsf.coef = 0 -------\n")); } return htsf; } static void dhd_dump_latency(void) { int i, max = 0; int d1, d2, d3, d4, d5; printf("T1 T2 T3 T4 d1 d2 t4-t1 i \n"); for (i = 0; i < TSMAX; i++) { d1 = ts[i].t2 - ts[i].t1; d2 = ts[i].t3 - ts[i].t2; d3 = ts[i].t4 - ts[i].t3; d4 = ts[i].t4 - ts[i].t1; d5 = ts[max].t4-ts[max].t1; if (d4 > d5 && d4 > 0) { max = i; } printf("%08X %08X %08X %08X \t%d %d %d %d i=%d\n", ts[i].t1, ts[i].t2, ts[i].t3, ts[i].t4, d1, d2, d3, d4, i); } printf("current idx = %d \n", tsidx); printf("Highest latency %d pkt no.%d total=%d\n", maxdelay, maxdelaypktno, tspktcnt); printf("%08X %08X %08X %08X \t%d %d %d %d\n", maxdelayts.t1, maxdelayts.t2, maxdelayts.t3, maxdelayts.t4, maxdelayts.t2 - maxdelayts.t1, maxdelayts.t3 - maxdelayts.t2, maxdelayts.t4 - maxdelayts.t3, maxdelayts.t4 - maxdelayts.t1); } static int dhd_ioctl_htsf_get(dhd_info_t *dhd, int ifidx) { wl_ioctl_t ioc; char buf[32]; int ret; uint32 s1, s2; struct tsf { uint32 low; uint32 high; } tsf_buf; memset(&ioc, 0, sizeof(ioc)); memset(&tsf_buf, 0, sizeof(tsf_buf)); ioc.cmd = WLC_GET_VAR; ioc.buf = buf; ioc.len = (uint)sizeof(buf); ioc.set = FALSE; strcpy(buf, "tsf"); s1 = dhd_get_htsf(dhd, 0); if ((ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len)) < 0) { if (ret == -EIO) { DHD_ERROR(("%s: tsf is not supported by device\n", dhd_ifname(&dhd->pub, ifidx))); return -EOPNOTSUPP; } return ret; } s2 = dhd_get_htsf(dhd, 0); memcpy(&tsf_buf, buf, sizeof(tsf_buf)); printf(" TSF_h=%04X lo=%08X Calc:htsf=%08X, coef=%d.%d%d delta=%d ", tsf_buf.high, tsf_buf.low, s2, dhd->htsf.coef, dhd->htsf.coefdec1, dhd->htsf.coefdec2, s2-tsf_buf.low); printf("lasttsf=%08X lastcycle=%08X\n", dhd->htsf.last_tsf, dhd->htsf.last_cycle); return 0; } void htsf_update(dhd_info_t *dhd, void *data) { static ulong cur_cycle = 0, prev_cycle = 0; uint32 htsf, tsf_delta = 0; uint32 hfactor = 0, cyc_delta, dec1 = 0, dec2, dec3, tmp; ulong b, a; cycles_t t; /* cycles_t in inlcude/mips/timex.h */ t = get_cycles(); prev_cycle = cur_cycle; cur_cycle = t; if (cur_cycle > prev_cycle) cyc_delta = cur_cycle - prev_cycle; else { b = cur_cycle; a = prev_cycle; cyc_delta = cur_cycle + (0xFFFFFFFF - prev_cycle); } if (data == NULL) printf(" tsf update ata point er is null \n"); memcpy(&prev_tsf, &cur_tsf, sizeof(tsf_t)); memcpy(&cur_tsf, data, sizeof(tsf_t)); if (cur_tsf.low == 0) { DHD_INFO((" ---- 0 TSF, do not update, return\n")); return; } if (cur_tsf.low > prev_tsf.low) tsf_delta = (cur_tsf.low - prev_tsf.low); else { DHD_INFO((" ---- tsf low is smaller cur_tsf= %08X, prev_tsf=%08X, \n", cur_tsf.low, prev_tsf.low)); if (cur_tsf.high > prev_tsf.high) { tsf_delta = cur_tsf.low + (0xFFFFFFFF - prev_tsf.low); DHD_INFO((" ---- Wrap around tsf coutner adjusted TSF=%08X\n", tsf_delta)); } else return; /* do not update */ } if (tsf_delta) { hfactor = cyc_delta / tsf_delta; tmp = (cyc_delta - (hfactor * tsf_delta))*10; dec1 = tmp/tsf_delta; dec2 = ((tmp - dec1*tsf_delta)*10) / tsf_delta; tmp = (tmp - (dec1*tsf_delta))*10; dec3 = ((tmp - dec2*tsf_delta)*10) / tsf_delta; if (dec3 > 4) { if (dec2 == 9) { dec2 = 0; if (dec1 == 9) { dec1 = 0; hfactor++; } else { dec1++; } } else dec2++; } } if (hfactor) { htsf = ((cyc_delta * 10) / (hfactor*10+dec1)) + prev_tsf.low; dhd->htsf.coef = hfactor; dhd->htsf.last_cycle = cur_cycle; dhd->htsf.last_tsf = cur_tsf.low; dhd->htsf.coefdec1 = dec1; dhd->htsf.coefdec2 = dec2; } else { htsf = prev_tsf.low; } } #endif /* WLMEDIA_HTSF */
gpl-2.0
chijure/android_kernel_huawei_y210
Documentation/filesystems/configfs/configfs_example_explicit.c
306
12621
/* * vim: noexpandtab ts=8 sts=0 sw=8: * * configfs_example_explicit.c - This file is a demonstration module * containing a number of configfs subsystems. It explicitly defines * each structure without using the helper macros defined in * configfs.h. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * * Based on sysfs: * sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel * * configfs Copyright (C) 2005 Oracle. All rights reserved. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/configfs.h> /* * 01-childless * * This first example is a childless subsystem. It cannot create * any config_items. It just has attributes. * * Note that we are enclosing the configfs_subsystem inside a container. * This is not necessary if a subsystem has no attributes directly * on the subsystem. See the next example, 02-simple-children, for * such a subsystem. */ struct childless { struct configfs_subsystem subsys; int showme; int storeme; }; struct childless_attribute { struct configfs_attribute attr; ssize_t (*show)(struct childless *, char *); ssize_t (*store)(struct childless *, const char *, size_t); }; static inline struct childless *to_childless(struct config_item *item) { return item ? container_of(to_configfs_subsystem(to_config_group(item)), struct childless, subsys) : NULL; } static ssize_t childless_showme_read(struct childless *childless, char *page) { ssize_t pos; pos = sprintf(page, "%d\n", childless->showme); childless->showme++; return pos; } static ssize_t childless_storeme_read(struct childless *childless, char *page) { return sprintf(page, "%d\n", childless->storeme); } static ssize_t childless_storeme_write(struct childless *childless, const char *page, size_t count) { unsigned long tmp; char *p = (char *) page; tmp = simple_strtoul(p, &p, 10); if ((*p != '\0') && (*p != '\n')) return -EINVAL; if (tmp > INT_MAX) return -ERANGE; childless->storeme = tmp; return count; } static ssize_t childless_description_read(struct childless *childless, char *page) { return sprintf(page, "[01-childless]\n" "\n" "The childless subsystem is the simplest possible subsystem in\n" "configfs. It does not support the creation of child config_items.\n" "It only has a few attributes. In fact, it isn't much different\n" "than a directory in /proc.\n"); } static struct childless_attribute childless_attr_showme = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "showme", .ca_mode = S_IRUGO }, .show = childless_showme_read, }; static struct childless_attribute childless_attr_storeme = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "storeme", .ca_mode = S_IRUGO | S_IWUSR }, .show = childless_storeme_read, .store = childless_storeme_write, }; static struct childless_attribute childless_attr_description = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "description", .ca_mode = S_IRUGO }, .show = childless_description_read, }; static struct configfs_attribute *childless_attrs[] = { &childless_attr_showme.attr, &childless_attr_storeme.attr, &childless_attr_description.attr, NULL, }; static ssize_t childless_attr_show(struct config_item *item, struct configfs_attribute *attr, char *page) { struct childless *childless = to_childless(item); struct childless_attribute *childless_attr = container_of(attr, struct childless_attribute, attr); ssize_t ret = 0; if (childless_attr->show) ret = childless_attr->show(childless, page); return ret; } static ssize_t childless_attr_store(struct config_item *item, struct configfs_attribute *attr, const char *page, size_t count) { struct childless *childless = to_childless(item); struct childless_attribute *childless_attr = container_of(attr, struct childless_attribute, attr); ssize_t ret = -EINVAL; if (childless_attr->store) ret = childless_attr->store(childless, page, count); return ret; } static struct configfs_item_operations childless_item_ops = { .show_attribute = childless_attr_show, .store_attribute = childless_attr_store, }; static struct config_item_type childless_type = { .ct_item_ops = &childless_item_ops, .ct_attrs = childless_attrs, .ct_owner = THIS_MODULE, }; static struct childless childless_subsys = { .subsys = { .su_group = { .cg_item = { .ci_namebuf = "01-childless", .ci_type = &childless_type, }, }, }, }; /* ----------------------------------------------------------------- */ /* * 02-simple-children * * This example merely has a simple one-attribute child. Note that * there is no extra attribute structure, as the child's attribute is * known from the get-go. Also, there is no container for the * subsystem, as it has no attributes of its own. */ struct simple_child { struct config_item item; int storeme; }; static inline struct simple_child *to_simple_child(struct config_item *item) { return item ? container_of(item, struct simple_child, item) : NULL; } static struct configfs_attribute simple_child_attr_storeme = { .ca_owner = THIS_MODULE, .ca_name = "storeme", .ca_mode = S_IRUGO | S_IWUSR, }; static struct configfs_attribute *simple_child_attrs[] = { &simple_child_attr_storeme, NULL, }; static ssize_t simple_child_attr_show(struct config_item *item, struct configfs_attribute *attr, char *page) { ssize_t count; struct simple_child *simple_child = to_simple_child(item); count = sprintf(page, "%d\n", simple_child->storeme); return count; } static ssize_t simple_child_attr_store(struct config_item *item, struct configfs_attribute *attr, const char *page, size_t count) { struct simple_child *simple_child = to_simple_child(item); unsigned long tmp; char *p = (char *) page; tmp = simple_strtoul(p, &p, 10); if (!p || (*p && (*p != '\n'))) return -EINVAL; if (tmp > INT_MAX) return -ERANGE; simple_child->storeme = tmp; return count; } static void simple_child_release(struct config_item *item) { kfree(to_simple_child(item)); } static struct configfs_item_operations simple_child_item_ops = { .release = simple_child_release, .show_attribute = simple_child_attr_show, .store_attribute = simple_child_attr_store, }; static struct config_item_type simple_child_type = { .ct_item_ops = &simple_child_item_ops, .ct_attrs = simple_child_attrs, .ct_owner = THIS_MODULE, }; struct simple_children { struct config_group group; }; static inline struct simple_children *to_simple_children(struct config_item *item) { return item ? container_of(to_config_group(item), struct simple_children, group) : NULL; } static struct config_item *simple_children_make_item(struct config_group *group, const char *name) { struct simple_child *simple_child; simple_child = kzalloc(sizeof(struct simple_child), GFP_KERNEL); if (!simple_child) return ERR_PTR(-ENOMEM); config_item_init_type_name(&simple_child->item, name, &simple_child_type); simple_child->storeme = 0; return &simple_child->item; } static struct configfs_attribute simple_children_attr_description = { .ca_owner = THIS_MODULE, .ca_name = "description", .ca_mode = S_IRUGO, }; static struct configfs_attribute *simple_children_attrs[] = { &simple_children_attr_description, NULL, }; static ssize_t simple_children_attr_show(struct config_item *item, struct configfs_attribute *attr, char *page) { return sprintf(page, "[02-simple-children]\n" "\n" "This subsystem allows the creation of child config_items. These\n" "items have only one attribute that is readable and writeable.\n"); } static void simple_children_release(struct config_item *item) { kfree(to_simple_children(item)); } static struct configfs_item_operations simple_children_item_ops = { .release = simple_children_release, .show_attribute = simple_children_attr_show, }; /* * Note that, since no extra work is required on ->drop_item(), * no ->drop_item() is provided. */ static struct configfs_group_operations simple_children_group_ops = { .make_item = simple_children_make_item, }; static struct config_item_type simple_children_type = { .ct_item_ops = &simple_children_item_ops, .ct_group_ops = &simple_children_group_ops, .ct_attrs = simple_children_attrs, .ct_owner = THIS_MODULE, }; static struct configfs_subsystem simple_children_subsys = { .su_group = { .cg_item = { .ci_namebuf = "02-simple-children", .ci_type = &simple_children_type, }, }, }; /* ----------------------------------------------------------------- */ /* * 03-group-children * * This example reuses the simple_children group from above. However, * the simple_children group is not the subsystem itself, it is a * child of the subsystem. Creation of a group in the subsystem creates * a new simple_children group. That group can then have simple_child * children of its own. */ static struct config_group *group_children_make_group(struct config_group *group, const char *name) { struct simple_children *simple_children; simple_children = kzalloc(sizeof(struct simple_children), GFP_KERNEL); if (!simple_children) return ERR_PTR(-ENOMEM); config_group_init_type_name(&simple_children->group, name, &simple_children_type); return &simple_children->group; } static struct configfs_attribute group_children_attr_description = { .ca_owner = THIS_MODULE, .ca_name = "description", .ca_mode = S_IRUGO, }; static struct configfs_attribute *group_children_attrs[] = { &group_children_attr_description, NULL, }; static ssize_t group_children_attr_show(struct config_item *item, struct configfs_attribute *attr, char *page) { return sprintf(page, "[03-group-children]\n" "\n" "This subsystem allows the creation of child config_groups. These\n" "groups are like the subsystem simple-children.\n"); } static struct configfs_item_operations group_children_item_ops = { .show_attribute = group_children_attr_show, }; /* * Note that, since no extra work is required on ->drop_item(), * no ->drop_item() is provided. */ static struct configfs_group_operations group_children_group_ops = { .make_group = group_children_make_group, }; static struct config_item_type group_children_type = { .ct_item_ops = &group_children_item_ops, .ct_group_ops = &group_children_group_ops, .ct_attrs = group_children_attrs, .ct_owner = THIS_MODULE, }; static struct configfs_subsystem group_children_subsys = { .su_group = { .cg_item = { .ci_namebuf = "03-group-children", .ci_type = &group_children_type, }, }, }; /* ----------------------------------------------------------------- */ /* * We're now done with our subsystem definitions. * For convenience in this module, here's a list of them all. It * allows the init function to easily register them. Most modules * will only have one subsystem, and will only call register_subsystem * on it directly. */ static struct configfs_subsystem *example_subsys[] = { &childless_subsys.subsys, &simple_children_subsys, &group_children_subsys, NULL, }; static int __init configfs_example_init(void) { int ret; int i; struct configfs_subsystem *subsys; for (i = 0; example_subsys[i]; i++) { subsys = example_subsys[i]; config_group_init(&subsys->su_group); mutex_init(&subsys->su_mutex); ret = configfs_register_subsystem(subsys); if (ret) { printk(KERN_ERR "Error %d while registering subsystem %s\n", ret, subsys->su_group.cg_item.ci_namebuf); goto out_unregister; } } return 0; out_unregister: for (; i >= 0; i--) { configfs_unregister_subsystem(example_subsys[i]); } return ret; } static void __exit configfs_example_exit(void) { int i; for (i = 0; example_subsys[i]; i++) { configfs_unregister_subsystem(example_subsys[i]); } } module_init(configfs_example_init); module_exit(configfs_example_exit); MODULE_LICENSE("GPL");
gpl-2.0
minz1/xosp_kernel_xiaomi_msm8956
arch/arm/mach-pxa/mainstone.c
2098
17440
/* * linux/arch/arm/mach-pxa/mainstone.c * * Support for the Intel HCDDBBVA0 Development Platform. * (go figure how they came up with such name...) * * Author: Nicolas Pitre * Created: Nov 05, 2002 * Copyright: MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/syscore_ops.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/bitops.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/pwm_backlight.h> #include <linux/smc91x.h> #include <linux/i2c/pxa-i2c.h> #include <linux/slab.h> #include <linux/leds.h> #include <asm/types.h> #include <asm/setup.h> #include <asm/memory.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <asm/mach/flash.h> #include <mach/pxa27x.h> #include <mach/mainstone.h> #include <mach/audio.h> #include <linux/platform_data/video-pxafb.h> #include <linux/platform_data/mmc-pxamci.h> #include <linux/platform_data/irda-pxaficp.h> #include <linux/platform_data/usb-ohci-pxa27x.h> #include <linux/platform_data/keypad-pxa27x.h> #include <mach/smemc.h> #include "generic.h" #include "devices.h" static unsigned long mainstone_pin_config[] = { /* Chip Select */ GPIO15_nCS_1, /* LCD - 16bpp Active TFT */ GPIOxx_LCD_TFT_16BPP, GPIO16_PWM0_OUT, /* Backlight */ /* MMC */ GPIO32_MMC_CLK, GPIO112_MMC_CMD, GPIO92_MMC_DAT_0, GPIO109_MMC_DAT_1, GPIO110_MMC_DAT_2, GPIO111_MMC_DAT_3, /* USB Host Port 1 */ GPIO88_USBH1_PWR, GPIO89_USBH1_PEN, /* PC Card */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO85_nPCE_1, GPIO54_nPCE_2, GPIO79_PSKTSEL, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, /* AC97 */ GPIO28_AC97_BITCLK, GPIO29_AC97_SDATA_IN_0, GPIO30_AC97_SDATA_OUT, GPIO31_AC97_SYNC, GPIO45_AC97_SYSCLK, /* Keypad */ GPIO93_KP_DKIN_0, GPIO94_KP_DKIN_1, GPIO95_KP_DKIN_2, GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH, GPIO101_KP_MKIN_1 | WAKEUP_ON_LEVEL_HIGH, GPIO102_KP_MKIN_2 | WAKEUP_ON_LEVEL_HIGH, GPIO97_KP_MKIN_3 | WAKEUP_ON_LEVEL_HIGH, GPIO98_KP_MKIN_4 | WAKEUP_ON_LEVEL_HIGH, GPIO99_KP_MKIN_5 | WAKEUP_ON_LEVEL_HIGH, GPIO103_KP_MKOUT_0, GPIO104_KP_MKOUT_1, GPIO105_KP_MKOUT_2, GPIO106_KP_MKOUT_3, GPIO107_KP_MKOUT_4, GPIO108_KP_MKOUT_5, GPIO96_KP_MKOUT_6, /* I2C */ GPIO117_I2C_SCL, GPIO118_I2C_SDA, /* GPIO */ GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, }; static unsigned long mainstone_irq_enabled; static void mainstone_mask_irq(struct irq_data *d) { int mainstone_irq = (d->irq - MAINSTONE_IRQ(0)); MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq)); } static void mainstone_unmask_irq(struct irq_data *d) { int mainstone_irq = (d->irq - MAINSTONE_IRQ(0)); /* the irq can be acknowledged only if deasserted, so it's done here */ MST_INTSETCLR &= ~(1 << mainstone_irq); MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq)); } static struct irq_chip mainstone_irq_chip = { .name = "FPGA", .irq_ack = mainstone_mask_irq, .irq_mask = mainstone_mask_irq, .irq_unmask = mainstone_unmask_irq, }; static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled; do { /* clear useless edge notification */ desc->irq_data.chip->irq_ack(&desc->irq_data); if (likely(pending)) { irq = MAINSTONE_IRQ(0) + __ffs(pending); generic_handle_irq(irq); } pending = MST_INTSETCLR & mainstone_irq_enabled; } while (pending); } static void __init mainstone_init_irq(void) { int irq; pxa27x_init_irq(); /* setup extra Mainstone irqs */ for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { irq_set_chip_and_handler(irq, &mainstone_irq_chip, handle_level_irq); if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); else set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } set_irq_flags(MAINSTONE_IRQ(8), 0); set_irq_flags(MAINSTONE_IRQ(12), 0); MST_INTMSKENA = 0; MST_INTSETCLR = 0; irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler); irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING); } #ifdef CONFIG_PM static void mainstone_irq_resume(void) { MST_INTMSKENA = mainstone_irq_enabled; } static struct syscore_ops mainstone_irq_syscore_ops = { .resume = mainstone_irq_resume, }; static int __init mainstone_irq_device_init(void) { if (machine_is_mainstone()) register_syscore_ops(&mainstone_irq_syscore_ops); return 0; } device_initcall(mainstone_irq_device_init); #endif static struct resource smc91x_resources[] = { [0] = { .start = (MST_ETH_PHYS + 0x300), .end = (MST_ETH_PHYS + 0xfffff), .flags = IORESOURCE_MEM, }, [1] = { .start = MAINSTONE_IRQ(3), .end = MAINSTONE_IRQ(3), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, } }; static struct smc91x_platdata mainstone_smc91x_info = { .flags = SMC91X_USE_8BIT | SMC91X_USE_16BIT | SMC91X_USE_32BIT | SMC91X_NOWAIT | SMC91X_USE_DMA, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &mainstone_smc91x_info, }, }; static int mst_audio_startup(struct snd_pcm_substream *substream, void *priv) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) MST_MSCWR2 &= ~MST_MSCWR2_AC97_SPKROFF; return 0; } static void mst_audio_shutdown(struct snd_pcm_substream *substream, void *priv) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) MST_MSCWR2 |= MST_MSCWR2_AC97_SPKROFF; } static long mst_audio_suspend_mask; static void mst_audio_suspend(void *priv) { mst_audio_suspend_mask = MST_MSCWR2; MST_MSCWR2 |= MST_MSCWR2_AC97_SPKROFF; } static void mst_audio_resume(void *priv) { MST_MSCWR2 &= mst_audio_suspend_mask | ~MST_MSCWR2_AC97_SPKROFF; } static pxa2xx_audio_ops_t mst_audio_ops = { .startup = mst_audio_startup, .shutdown = mst_audio_shutdown, .suspend = mst_audio_suspend, .resume = mst_audio_resume, }; static struct resource flash_resources[] = { [0] = { .start = PXA_CS0_PHYS, .end = PXA_CS0_PHYS + SZ_64M - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_CS1_PHYS, .end = PXA_CS1_PHYS + SZ_64M - 1, .flags = IORESOURCE_MEM, }, }; static struct mtd_partition mainstoneflash0_partitions[] = { { .name = "Bootloader", .size = 0x00040000, .offset = 0, .mask_flags = MTD_WRITEABLE /* force read-only */ },{ .name = "Kernel", .size = 0x00400000, .offset = 0x00040000, },{ .name = "Filesystem", .size = MTDPART_SIZ_FULL, .offset = 0x00440000 } }; static struct flash_platform_data mst_flash_data[2] = { { .map_name = "cfi_probe", .parts = mainstoneflash0_partitions, .nr_parts = ARRAY_SIZE(mainstoneflash0_partitions), }, { .map_name = "cfi_probe", .parts = NULL, .nr_parts = 0, } }; static struct platform_device mst_flash_device[2] = { { .name = "pxa2xx-flash", .id = 0, .dev = { .platform_data = &mst_flash_data[0], }, .resource = &flash_resources[0], .num_resources = 1, }, { .name = "pxa2xx-flash", .id = 1, .dev = { .platform_data = &mst_flash_data[1], }, .resource = &flash_resources[1], .num_resources = 1, }, }; #if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE) static struct platform_pwm_backlight_data mainstone_backlight_data = { .pwm_id = 0, .max_brightness = 1023, .dft_brightness = 1023, .pwm_period_ns = 78770, }; static struct platform_device mainstone_backlight_device = { .name = "pwm-backlight", .dev = { .parent = &pxa27x_device_pwm0.dev, .platform_data = &mainstone_backlight_data, }, }; static void __init mainstone_backlight_register(void) { int ret = platform_device_register(&mainstone_backlight_device); if (ret) printk(KERN_ERR "mainstone: failed to register backlight device: %d\n", ret); } #else #define mainstone_backlight_register() do { } while (0) #endif static struct pxafb_mode_info toshiba_ltm04c380k_mode = { .pixclock = 50000, .xres = 640, .yres = 480, .bpp = 16, .hsync_len = 1, .left_margin = 0x9f, .right_margin = 1, .vsync_len = 44, .upper_margin = 0, .lower_margin = 0, .sync = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mode_info toshiba_ltm035a776c_mode = { .pixclock = 110000, .xres = 240, .yres = 320, .bpp = 16, .hsync_len = 4, .left_margin = 8, .right_margin = 20, .vsync_len = 3, .upper_margin = 1, .lower_margin = 10, .sync = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mach_info mainstone_pxafb_info = { .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL, }; static int mainstone_mci_init(struct device *dev, irq_handler_t mstone_detect_int, void *data) { int err; /* make sure SD/Memory Stick multiplexer's signals * are routed to MMC controller */ MST_MSCWR1 &= ~MST_MSCWR1_MS_SEL; err = request_irq(MAINSTONE_MMC_IRQ, mstone_detect_int, IRQF_DISABLED, "MMC card detect", data); if (err) printk(KERN_ERR "mainstone_mci_init: MMC/SD: can't request MMC card detect IRQ\n"); return err; } static void mainstone_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data* p_d = dev->platform_data; if (( 1 << vdd) & p_d->ocr_mask) { printk(KERN_DEBUG "%s: on\n", __func__); MST_MSCWR1 |= MST_MSCWR1_MMC_ON; MST_MSCWR1 &= ~MST_MSCWR1_MS_SEL; } else { printk(KERN_DEBUG "%s: off\n", __func__); MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON; } } static void mainstone_mci_exit(struct device *dev, void *data) { free_irq(MAINSTONE_MMC_IRQ, data); } static struct pxamci_platform_data mainstone_mci_platform_data = { .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .init = mainstone_mci_init, .setpower = mainstone_mci_setpower, .exit = mainstone_mci_exit, .gpio_card_detect = -1, .gpio_card_ro = -1, .gpio_power = -1, }; static void mainstone_irda_transceiver_mode(struct device *dev, int mode) { unsigned long flags; local_irq_save(flags); if (mode & IR_SIRMODE) { MST_MSCWR1 &= ~MST_MSCWR1_IRDA_FIR; } else if (mode & IR_FIRMODE) { MST_MSCWR1 |= MST_MSCWR1_IRDA_FIR; } pxa2xx_transceiver_mode(dev, mode); if (mode & IR_OFF) { MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_OFF; } else { MST_MSCWR1 = (MST_MSCWR1 & ~MST_MSCWR1_IRDA_MASK) | MST_MSCWR1_IRDA_FULL; } local_irq_restore(flags); } static struct pxaficp_platform_data mainstone_ficp_platform_data = { .gpio_pwdown = -1, .transceiver_cap = IR_SIRMODE | IR_FIRMODE | IR_OFF, .transceiver_mode = mainstone_irda_transceiver_mode, }; static struct gpio_keys_button gpio_keys_button[] = { [0] = { .desc = "wakeup", .code = KEY_SUSPEND, .type = EV_KEY, .gpio = 1, .wakeup = 1, }, }; static struct gpio_keys_platform_data mainstone_gpio_keys = { .buttons = gpio_keys_button, .nbuttons = 1, }; static struct platform_device mst_gpio_keys_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &mainstone_gpio_keys, }, }; static struct platform_device *platform_devices[] __initdata = { &smc91x_device, &mst_flash_device[0], &mst_flash_device[1], &mst_gpio_keys_device, }; static struct pxaohci_platform_data mainstone_ohci_platform_data = { .port_mode = PMM_PERPORT_MODE, .flags = ENABLE_PORT_ALL | POWER_CONTROL_LOW | POWER_SENSE_LOW, }; #if defined(CONFIG_KEYBOARD_PXA27x) || defined(CONFIG_KEYBOARD_PXA27x_MODULE) static unsigned int mainstone_matrix_keys[] = { KEY(0, 0, KEY_A), KEY(1, 0, KEY_B), KEY(2, 0, KEY_C), KEY(3, 0, KEY_D), KEY(4, 0, KEY_E), KEY(5, 0, KEY_F), KEY(0, 1, KEY_G), KEY(1, 1, KEY_H), KEY(2, 1, KEY_I), KEY(3, 1, KEY_J), KEY(4, 1, KEY_K), KEY(5, 1, KEY_L), KEY(0, 2, KEY_M), KEY(1, 2, KEY_N), KEY(2, 2, KEY_O), KEY(3, 2, KEY_P), KEY(4, 2, KEY_Q), KEY(5, 2, KEY_R), KEY(0, 3, KEY_S), KEY(1, 3, KEY_T), KEY(2, 3, KEY_U), KEY(3, 3, KEY_V), KEY(4, 3, KEY_W), KEY(5, 3, KEY_X), KEY(2, 4, KEY_Y), KEY(3, 4, KEY_Z), KEY(0, 4, KEY_DOT), /* . */ KEY(1, 4, KEY_CLOSE), /* @ */ KEY(4, 4, KEY_SLASH), KEY(5, 4, KEY_BACKSLASH), KEY(0, 5, KEY_HOME), KEY(1, 5, KEY_LEFTSHIFT), KEY(2, 5, KEY_SPACE), KEY(3, 5, KEY_SPACE), KEY(4, 5, KEY_ENTER), KEY(5, 5, KEY_BACKSPACE), KEY(0, 6, KEY_UP), KEY(1, 6, KEY_DOWN), KEY(2, 6, KEY_LEFT), KEY(3, 6, KEY_RIGHT), KEY(4, 6, KEY_SELECT), }; struct pxa27x_keypad_platform_data mainstone_keypad_info = { .matrix_key_rows = 6, .matrix_key_cols = 7, .matrix_key_map = mainstone_matrix_keys, .matrix_key_map_size = ARRAY_SIZE(mainstone_matrix_keys), .enable_rotary0 = 1, .rotary0_up_key = KEY_UP, .rotary0_down_key = KEY_DOWN, .debounce_interval = 30, }; static void __init mainstone_init_keypad(void) { pxa_set_keypad_info(&mainstone_keypad_info); } #else static inline void mainstone_init_keypad(void) {} #endif static void __init mainstone_init(void) { int SW7 = 0; /* FIXME: get from SCR (Mst doc section 3.2.1.1) */ pxa2xx_mfp_config(ARRAY_AND_SIZE(mainstone_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); mst_flash_data[0].width = (__raw_readl(BOOT_DEF) & 1) ? 2 : 4; mst_flash_data[1].width = 4; /* Compensate for SW7 which swaps the flash banks */ mst_flash_data[SW7].name = "processor-flash"; mst_flash_data[SW7 ^ 1].name = "mainboard-flash"; printk(KERN_NOTICE "Mainstone configured to boot from %s\n", mst_flash_data[0].name); /* system bus arbiter setting * - Core_Park * - LCD_wt:DMA_wt:CORE_Wt = 2:3:4 */ ARB_CNTRL = ARB_CORE_PARK | 0x234; platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); /* reading Mainstone's "Virtual Configuration Register" might be handy to select LCD type here */ if (0) mainstone_pxafb_info.modes = &toshiba_ltm04c380k_mode; else mainstone_pxafb_info.modes = &toshiba_ltm035a776c_mode; pxa_set_fb_info(NULL, &mainstone_pxafb_info); mainstone_backlight_register(); pxa_set_mci_info(&mainstone_mci_platform_data); pxa_set_ficp_info(&mainstone_ficp_platform_data); pxa_set_ohci_info(&mainstone_ohci_platform_data); pxa_set_i2c_info(NULL); pxa_set_ac97_info(&mst_audio_ops); mainstone_init_keypad(); } static struct map_desc mainstone_io_desc[] __initdata = { { /* CPLD */ .virtual = MST_FPGA_VIRT, .pfn = __phys_to_pfn(MST_FPGA_PHYS), .length = 0x00100000, .type = MT_DEVICE } }; static void __init mainstone_map_io(void) { pxa27x_map_io(); iotable_init(mainstone_io_desc, ARRAY_SIZE(mainstone_io_desc)); /* for use I SRAM as framebuffer. */ PSLR |= 0xF04; PCFR = 0x66; } /* * Driver for the 8 discrete LEDs available for general use: * Note: bits [15-8] are used to enable/blank the 8 7 segment hex displays * so be sure to not monkey with them here. */ #if defined(CONFIG_NEW_LEDS) && defined(CONFIG_LEDS_CLASS) struct mainstone_led { struct led_classdev cdev; u8 mask; }; /* * The triggers lines up below will only be used if the * LED triggers are compiled in. */ static const struct { const char *name; const char *trigger; } mainstone_leds[] = { { "mainstone:D28", "default-on", }, { "mainstone:D27", "cpu0", }, { "mainstone:D26", "heartbeat" }, { "mainstone:D25", }, { "mainstone:D24", }, { "mainstone:D23", }, { "mainstone:D22", }, { "mainstone:D21", }, }; static void mainstone_led_set(struct led_classdev *cdev, enum led_brightness b) { struct mainstone_led *led = container_of(cdev, struct mainstone_led, cdev); u32 reg = MST_LEDCTRL; if (b != LED_OFF) reg |= led->mask; else reg &= ~led->mask; MST_LEDCTRL = reg; } static enum led_brightness mainstone_led_get(struct led_classdev *cdev) { struct mainstone_led *led = container_of(cdev, struct mainstone_led, cdev); u32 reg = MST_LEDCTRL; return (reg & led->mask) ? LED_FULL : LED_OFF; } static int __init mainstone_leds_init(void) { int i; if (!machine_is_mainstone()) return -ENODEV; /* All ON */ MST_LEDCTRL |= 0xff; for (i = 0; i < ARRAY_SIZE(mainstone_leds); i++) { struct mainstone_led *led; led = kzalloc(sizeof(*led), GFP_KERNEL); if (!led) break; led->cdev.name = mainstone_leds[i].name; led->cdev.brightness_set = mainstone_led_set; led->cdev.brightness_get = mainstone_led_get; led->cdev.default_trigger = mainstone_leds[i].trigger; led->mask = BIT(i); if (led_classdev_register(NULL, &led->cdev) < 0) { kfree(led); break; } } return 0; } /* * Since we may have triggers on any subsystem, defer registration * until after subsystem_init. */ fs_initcall(mainstone_leds_init); #endif MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)") /* Maintainer: MontaVista Software Inc. */ .atag_offset = 0x100, /* BLOB boot parameter setting */ .map_io = mainstone_map_io, .nr_irqs = MAINSTONE_NR_IRQS, .init_irq = mainstone_init_irq, .handle_irq = pxa27x_handle_irq, .init_time = pxa_timer_init, .init_machine = mainstone_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
Infusion-OS/android_kernel_lge_gee
arch/arm/mach-msm/msm_bus/msm_bus_dbg.c
2610
18727
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "AXI: %s(): " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/hrtimer.h> #include <mach/msm_bus_board.h> #include <mach/msm_bus.h> #include "msm_bus_core.h" #define MAX_BUFF_SIZE 4096 #define FILL_LIMIT 128 static struct dentry *clients; static struct dentry *dir; static DEFINE_MUTEX(msm_bus_dbg_fablist_lock); struct msm_bus_dbg_state { uint32_t cl; uint8_t enable; uint8_t current_index; } clstate; struct msm_bus_cldata { const struct msm_bus_scale_pdata *pdata; int index; uint32_t clid; int size; struct dentry *file; struct list_head list; char buffer[MAX_BUFF_SIZE]; }; struct msm_bus_fab_list { const char *name; int size; struct dentry *file; struct list_head list; char buffer[MAX_BUFF_SIZE]; }; LIST_HEAD(fabdata_list); LIST_HEAD(cl_list); /** * The following structures and funtions are used for * the test-client which can be created at run-time. */ static struct msm_bus_vectors init_vectors[1]; static struct msm_bus_vectors current_vectors[1]; static struct msm_bus_vectors requested_vectors[1]; static struct msm_bus_paths shell_client_usecases[] = { { .num_paths = ARRAY_SIZE(init_vectors), .vectors = init_vectors, }, { .num_paths = ARRAY_SIZE(current_vectors), .vectors = current_vectors, }, { .num_paths = ARRAY_SIZE(requested_vectors), .vectors = requested_vectors, }, }; static struct msm_bus_scale_pdata shell_client = { .usecase = shell_client_usecases, .num_usecases = ARRAY_SIZE(shell_client_usecases), .name = "test-client", }; static void msm_bus_dbg_init_vectors(void) { init_vectors[0].src = -1; init_vectors[0].dst = -1; init_vectors[0].ab = 0; init_vectors[0].ib = 0; current_vectors[0].src = -1; current_vectors[0].dst = -1; current_vectors[0].ab = 0; current_vectors[0].ib = 0; requested_vectors[0].src = -1; requested_vectors[0].dst = -1; requested_vectors[0].ab = 0; requested_vectors[0].ib = 0; clstate.enable = 0; clstate.current_index = 0; } static int msm_bus_dbg_update_cl_request(uint32_t cl) { int ret = 0; if (clstate.current_index < 2) clstate.current_index = 2; else { clstate.current_index = 1; current_vectors[0].ab = requested_vectors[0].ab; current_vectors[0].ib = requested_vectors[0].ib; } if (clstate.enable) { MSM_BUS_DBG("Updating request for shell client, index: %d\n", clstate.current_index); ret = msm_bus_scale_client_update_request(clstate.cl, clstate.current_index); } else MSM_BUS_DBG("Enable bit not set. Skipping update request\n"); return ret; } static void msm_bus_dbg_unregister_client(uint32_t cl) { MSM_BUS_DBG("Unregistering shell client\n"); msm_bus_scale_unregister_client(clstate.cl); clstate.cl = 0; } static uint32_t msm_bus_dbg_register_client(void) { int ret = 0; if (init_vectors[0].src != requested_vectors[0].src) { MSM_BUS_DBG("Shell client master changed. Unregistering\n"); msm_bus_dbg_unregister_client(clstate.cl); } if (init_vectors[0].dst != requested_vectors[0].dst) { MSM_BUS_DBG("Shell client slave changed. Unregistering\n"); msm_bus_dbg_unregister_client(clstate.cl); } if (!clstate.enable) { MSM_BUS_DBG("Enable bit not set, skipping registration: cl " "%d\n", clstate.cl); return 0; } if (clstate.cl) { MSM_BUS_DBG("Client registered, skipping registration\n"); return 0; } current_vectors[0].src = init_vectors[0].src; requested_vectors[0].src = init_vectors[0].src; current_vectors[0].dst = init_vectors[0].dst; requested_vectors[0].dst = init_vectors[0].dst; MSM_BUS_DBG("Registering shell client\n"); ret = msm_bus_scale_register_client(&shell_client); return ret; } static int msm_bus_dbg_mas_get(void *data, u64 *val) { *val = init_vectors[0].src; MSM_BUS_DBG("Get master: %llu\n", *val); return 0; } static int msm_bus_dbg_mas_set(void *data, u64 val) { init_vectors[0].src = val; MSM_BUS_DBG("Set master: %llu\n", val); clstate.cl = msm_bus_dbg_register_client(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get, msm_bus_dbg_mas_set, "%llu\n"); static int msm_bus_dbg_slv_get(void *data, u64 *val) { *val = init_vectors[0].dst; MSM_BUS_DBG("Get slave: %llu\n", *val); return 0; } static int msm_bus_dbg_slv_set(void *data, u64 val) { init_vectors[0].dst = val; MSM_BUS_DBG("Set slave: %llu\n", val); clstate.cl = msm_bus_dbg_register_client(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get, msm_bus_dbg_slv_set, "%llu\n"); static int msm_bus_dbg_ab_get(void *data, u64 *val) { *val = requested_vectors[0].ab; MSM_BUS_DBG("Get ab: %llu\n", *val); return 0; } static int msm_bus_dbg_ab_set(void *data, u64 val) { requested_vectors[0].ab = val; MSM_BUS_DBG("Set ab: %llu\n", val); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get, msm_bus_dbg_ab_set, "%llu\n"); static int msm_bus_dbg_ib_get(void *data, u64 *val) { *val = requested_vectors[0].ib; MSM_BUS_DBG("Get ib: %llu\n", *val); return 0; } static int msm_bus_dbg_ib_set(void *data, u64 val) { requested_vectors[0].ib = val; MSM_BUS_DBG("Set ib: %llu\n", val); return 0; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get, msm_bus_dbg_ib_set, "%llu\n"); static int msm_bus_dbg_en_get(void *data, u64 *val) { *val = clstate.enable; MSM_BUS_DBG("Get enable: %llu\n", *val); return 0; } static int msm_bus_dbg_en_set(void *data, u64 val) { int ret = 0; clstate.enable = val; if (clstate.enable) { if (!clstate.cl) { MSM_BUS_DBG("client: %u\n", clstate.cl); clstate.cl = msm_bus_dbg_register_client(); if (clstate.cl) ret = msm_bus_dbg_update_cl_request(clstate.cl); } else { MSM_BUS_DBG("update request for cl: %u\n", clstate.cl); ret = msm_bus_dbg_update_cl_request(clstate.cl); } } MSM_BUS_DBG("Set enable: %llu\n", val); return ret; } DEFINE_SIMPLE_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get, msm_bus_dbg_en_set, "%llu\n"); /** * The following funtions are used for viewing the client data * and changing the client request at run-time */ static ssize_t client_data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { int bsize = 0; uint32_t cl = (uint32_t)file->private_data; struct msm_bus_cldata *cldata = NULL; list_for_each_entry(cldata, &cl_list, list) { if (cldata->clid == cl) break; } bsize = cldata->size; return simple_read_from_buffer(buf, count, ppos, cldata->buffer, bsize); } static int client_data_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return 0; } static const struct file_operations client_data_fops = { .open = client_data_open, .read = client_data_read, }; struct dentry *msm_bus_dbg_create(const char *name, mode_t mode, struct dentry *dent, uint32_t clid) { if (dent == NULL) { MSM_BUS_DBG("debugfs not ready yet\n"); return NULL; } return debugfs_create_file(name, mode, dent, (void *)clid, &client_data_fops); } static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata, int index, uint32_t clid, struct dentry *file) { struct msm_bus_cldata *cldata; cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL); if (!cldata) { MSM_BUS_DBG("Failed to allocate memory for client data\n"); return -ENOMEM; } cldata->pdata = pdata; cldata->index = index; cldata->clid = clid; cldata->file = file; cldata->size = 0; list_add_tail(&cldata->list, &cl_list); return 0; } static void msm_bus_dbg_free_client(uint32_t clid) { struct msm_bus_cldata *cldata = NULL; list_for_each_entry(cldata, &cl_list, list) { if (cldata->clid == clid) { debugfs_remove(cldata->file); list_del(&cldata->list); kfree(cldata); break; } } } static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata, int index, uint32_t clid) { int i = 0, j; char *buf = NULL; struct msm_bus_cldata *cldata = NULL; struct timespec ts; list_for_each_entry(cldata, &cl_list, list) { if (cldata->clid == clid) break; } if (cldata->file == NULL) { if (pdata->name == NULL) { MSM_BUS_DBG("Client doesn't have a name\n"); return -EINVAL; } cldata->file = msm_bus_dbg_create(pdata->name, S_IRUGO, clients, clid); } if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT)) i = cldata->size; else { i = 0; cldata->size = 0; } buf = cldata->buffer; ts = ktime_to_timespec(ktime_get()); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", (int)ts.tv_sec, (int)ts.tv_nsec); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr : %d\n", index); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->usecase[index].vectors[j].src); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->usecase[index].vectors[j].dst); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", pdata->usecase[index].vectors[j].ab); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : "); for (j = 0; j < pdata->usecase->num_paths; j++) i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", pdata->usecase[index].vectors[j].ib); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); cldata->size = i; return i; } static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index) { int ret = 0; if ((index < 0) || (index > cldata->pdata->num_usecases)) { MSM_BUS_DBG("Invalid index!\n"); return -EINVAL; } ret = msm_bus_scale_client_update_request(cldata->clid, index); return ret; } static ssize_t msm_bus_dbg_update_request_write(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct msm_bus_cldata *cldata; unsigned long index = 0; int ret = 0; char *chid; char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL); if (!buf || IS_ERR(buf)) { MSM_BUS_ERR("Memory allocation for buffer failed\n"); return -ENOMEM; } if (cnt == 0) return 0; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = '\0'; chid = buf; MSM_BUS_DBG("buffer: %s\n size: %d\n", buf, sizeof(ubuf)); list_for_each_entry(cldata, &cl_list, list) { if (strstr(chid, cldata->pdata->name)) { cldata = cldata; strsep(&chid, " "); if (chid) { ret = strict_strtoul(chid, 10, &index); if (ret) { MSM_BUS_DBG("Index conversion" " failed\n"); return -EFAULT; } } else MSM_BUS_DBG("Error parsing input. Index not" " found\n"); break; } } msm_bus_dbg_update_request(cldata, index); kfree(buf); return cnt; } /** * The following funtions are used for viewing the commit data * for each fabric */ static ssize_t fabric_data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct msm_bus_fab_list *fablist = NULL; int bsize = 0; ssize_t ret; const char *name = file->private_data; mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { if (strcmp(fablist->name, name) == 0) break; } bsize = fablist->size; ret = simple_read_from_buffer(buf, count, ppos, fablist->buffer, bsize); mutex_unlock(&msm_bus_dbg_fablist_lock); return ret; } static const struct file_operations fabric_data_fops = { .open = client_data_open, .read = fabric_data_read, }; static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file) { struct msm_bus_fab_list *fablist; int ret = 0; mutex_lock(&msm_bus_dbg_fablist_lock); fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL); if (!fablist) { MSM_BUS_DBG("Failed to allocate memory for commit data\n"); ret = -ENOMEM; goto err; } fablist->name = fabname; fablist->size = 0; list_add_tail(&fablist->list, &fabdata_list); err: mutex_unlock(&msm_bus_dbg_fablist_lock); return ret; } static void msm_bus_dbg_free_fabric(const char *fabname) { struct msm_bus_fab_list *fablist = NULL; mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { if (strcmp(fablist->name, fabname) == 0) { debugfs_remove(fablist->file); list_del(&fablist->list); kfree(fablist); break; } } mutex_unlock(&msm_bus_dbg_fablist_lock); } static int msm_bus_dbg_fill_fab_buffer(const char *fabname, void *cdata, int nmasters, int nslaves, int ntslaves) { int i; char *buf = NULL; struct msm_bus_fab_list *fablist = NULL; struct timespec ts; mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { if (strcmp(fablist->name, fabname) == 0) break; } if (fablist->file == NULL) { MSM_BUS_DBG("Fabric dbg entry does not exist\n"); mutex_unlock(&msm_bus_dbg_fablist_lock); return -EFAULT; } if (fablist->size < MAX_BUFF_SIZE - 256) i = fablist->size; else { i = 0; fablist->size = 0; } buf = fablist->buffer; mutex_unlock(&msm_bus_dbg_fablist_lock); ts = ktime_to_timespec(ktime_get()); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n", (int)ts.tv_sec, (int)ts.tv_nsec); msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata, nmasters, nslaves, ntslaves); i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n"); mutex_lock(&msm_bus_dbg_fablist_lock); fablist->size = i; mutex_unlock(&msm_bus_dbg_fablist_lock); return 0; } static const struct file_operations msm_bus_dbg_update_request_fops = { .open = client_data_open, .write = msm_bus_dbg_update_request_write, }; /** * msm_bus_dbg_client_data() - Add debug data for clients * @pdata: Platform data of the client * @index: The current index or operation to be performed * @clid: Client handle obtained during registration */ void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index, uint32_t clid) { struct dentry *file = NULL; if (index == MSM_BUS_DBG_REGISTER) { msm_bus_dbg_record_client(pdata, index, clid, file); if (!pdata->name) { MSM_BUS_DBG("Cannot create debugfs entry. Null name\n"); return; } } else if (index == MSM_BUS_DBG_UNREGISTER) { msm_bus_dbg_free_client(clid); MSM_BUS_DBG("Client %d unregistered\n", clid); } else msm_bus_dbg_fill_cl_buffer(pdata, index, clid); } EXPORT_SYMBOL(msm_bus_dbg_client_data); /** * msm_bus_dbg_commit_data() - Add commit data from fabrics * @fabname: Fabric name specified in platform data * @cdata: Commit Data * @nmasters: Number of masters attached to fabric * @nslaves: Number of slaves attached to fabric * @ntslaves: Number of tiered slaves attached to fabric * @op: Operation to be performed */ void msm_bus_dbg_commit_data(const char *fabname, void *cdata, int nmasters, int nslaves, int ntslaves, int op) { struct dentry *file = NULL; if (op == MSM_BUS_DBG_REGISTER) msm_bus_dbg_record_fabric(fabname, file); else if (op == MSM_BUS_DBG_UNREGISTER) msm_bus_dbg_free_fabric(fabname); else msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters, nslaves, ntslaves); } EXPORT_SYMBOL(msm_bus_dbg_commit_data); static int __init msm_bus_debugfs_init(void) { struct dentry *commit, *shell_client; struct msm_bus_fab_list *fablist; struct msm_bus_cldata *cldata = NULL; uint64_t val = 0; dir = debugfs_create_dir("msm-bus-dbg", NULL); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create msm-bus-dbg\n"); goto err; } clients = debugfs_create_dir("client-data", dir); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create clients\n"); goto err; } shell_client = debugfs_create_dir("shell-client", dir); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create clients\n"); goto err; } commit = debugfs_create_dir("commit-data", dir); if ((!dir) || IS_ERR(dir)) { MSM_BUS_ERR("Couldn't create commit\n"); goto err; } if (debugfs_create_file("update_request", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_en_fops) == NULL) goto err; if (debugfs_create_file("ib", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_ib_fops) == NULL) goto err; if (debugfs_create_file("ab", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_ab_fops) == NULL) goto err; if (debugfs_create_file("slv", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_slv_fops) == NULL) goto err; if (debugfs_create_file("mas", S_IRUGO | S_IWUSR, shell_client, &val, &shell_client_mas_fops) == NULL) goto err; if (debugfs_create_file("update-request", S_IRUGO | S_IWUSR, clients, NULL, &msm_bus_dbg_update_request_fops) == NULL) goto err; list_for_each_entry(cldata, &cl_list, list) { if (cldata->pdata->name == NULL) { MSM_BUS_DBG("Client name not found\n"); continue; } cldata->file = msm_bus_dbg_create(cldata-> pdata->name, S_IRUGO, clients, cldata->clid); } mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry(fablist, &fabdata_list, list) { fablist->file = debugfs_create_file(fablist->name, S_IRUGO, commit, (void *)fablist->name, &fabric_data_fops); if (fablist->file == NULL) { MSM_BUS_DBG("Cannot create files for commit data\n"); goto err; } } mutex_unlock(&msm_bus_dbg_fablist_lock); msm_bus_dbg_init_vectors(); return 0; err: debugfs_remove_recursive(dir); return -ENODEV; } late_initcall(msm_bus_debugfs_init); static void __exit msm_bus_dbg_teardown(void) { struct msm_bus_fab_list *fablist = NULL, *fablist_temp; struct msm_bus_cldata *cldata = NULL, *cldata_temp; debugfs_remove_recursive(dir); list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) { list_del(&cldata->list); kfree(cldata); } mutex_lock(&msm_bus_dbg_fablist_lock); list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) { list_del(&fablist->list); kfree(fablist); } mutex_unlock(&msm_bus_dbg_fablist_lock); } module_exit(msm_bus_dbg_teardown); MODULE_DESCRIPTION("Debugfs for msm bus scaling client"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Gagan Mac <gmac@codeaurora.org>");
gpl-2.0
Awesomez/htc_glacier_kernel
drivers/staging/wlan-ng/prism2usb.c
3378
10232
#include "hfa384x_usb.c" #include "prism2mgmt.c" #include "prism2mib.c" #include "prism2sta.c" #include "prism2fw.c" #define PRISM_USB_DEVICE(vid, pid, name) \ USB_DEVICE(vid, pid), \ .driver_info = (unsigned long) name static struct usb_device_id usb_prism_tbl[] = { {PRISM_USB_DEVICE(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS")}, {PRISM_USB_DEVICE(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11")}, {PRISM_USB_DEVICE(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter")}, {PRISM_USB_DEVICE (0x1668, 0x0408, "Actiontec Prism2.5 11Mbps WLAN USB Adapter")}, {PRISM_USB_DEVICE (0x1668, 0x0421, "Actiontec Prism2.5 11Mbps WLAN USB Adapter")}, {PRISM_USB_DEVICE (0x1915, 0x2236, "Linksys WUSB11v3.0 11Mbps WLAN USB Adapter")}, {PRISM_USB_DEVICE (0x066b, 0x2212, "Linksys WUSB11v2.5 11Mbps WLAN USB Adapter")}, {PRISM_USB_DEVICE (0x066b, 0x2213, "Linksys WUSB12v1.1 11Mbps WLAN USB Adapter")}, {PRISM_USB_DEVICE (0x067c, 0x1022, "Siemens SpeedStream 1022 11Mbps WLAN USB Adapter")}, {PRISM_USB_DEVICE (0x049f, 0x0033, "Compaq/Intel W100 PRO/Wireless 11Mbps multiport WLAN Adapter")}, {PRISM_USB_DEVICE (0x0411, 0x0016, "Melco WLI-USB-S11 11Mbps WLAN Adapter")}, {PRISM_USB_DEVICE (0x08de, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter")}, {PRISM_USB_DEVICE (0x8086, 0x1111, "Intel PRO/Wireless 2011B LAN USB Adapter")}, {PRISM_USB_DEVICE (0x0d8e, 0x7a01, "PRISM25 IEEE 802.11 Mini USB Adapter")}, {PRISM_USB_DEVICE (0x045e, 0x006e, "Microsoft MN510 Wireless USB Adapter")}, {PRISM_USB_DEVICE(0x0967, 0x0204, "Acer Warplink USB Adapter")}, {PRISM_USB_DEVICE (0x0cde, 0x0002, "Z-Com 725/726 Prism2.5 USB/USB Integrated")}, {PRISM_USB_DEVICE (0x0cde, 0x0005, "Z-Com Xl735 Wireless 802.11b USB Adapter")}, {PRISM_USB_DEVICE (0x413c, 0x8100, "Dell TrueMobile 1180 Wireless USB Adapter")}, {PRISM_USB_DEVICE (0x0b3b, 0x1601, "ALLNET 0193 11Mbps WLAN USB Adapter")}, {PRISM_USB_DEVICE (0x0b3b, 0x1602, "ZyXEL ZyAIR B200 Wireless USB Adapter")}, {PRISM_USB_DEVICE (0x0baf, 0x00eb, "USRobotics USR1120 Wireless USB Adapter")}, {PRISM_USB_DEVICE (0x0411, 0x0027, "Melco WLI-USB-KS11G 11Mbps WLAN Adapter")}, {PRISM_USB_DEVICE (0x04f1, 0x3009, "JVC MP-XP7250 Builtin USB WLAN Adapter")}, {PRISM_USB_DEVICE(0x0846, 0x4110, "NetGear MA111")}, {PRISM_USB_DEVICE(0x03f3, 0x0020, "Adaptec AWN-8020 USB WLAN Adapter")}, {PRISM_USB_DEVICE(0x2821, 0x3300, "ASUS-WL140 Wireless USB Adapter")}, {PRISM_USB_DEVICE(0x2001, 0x3700, "DWL-122 Wireless USB Adapter")}, {PRISM_USB_DEVICE (0x2001, 0x3702, "DWL-120 Rev F Wireless USB Adapter")}, {PRISM_USB_DEVICE(0x50c2, 0x4013, "Averatec USB WLAN Adapter")}, {PRISM_USB_DEVICE(0x2c02, 0x14ea, "Planex GW-US11H WLAN USB Adapter")}, {PRISM_USB_DEVICE(0x124a, 0x168b, "Airvast PRISM3 WLAN USB Adapter")}, {PRISM_USB_DEVICE(0x083a, 0x3503, "T-Sinus 111 USB WLAN Adapter")}, {PRISM_USB_DEVICE(0x2821, 0x3300, "Hawking HighDB USB Adapter")}, {PRISM_USB_DEVICE (0x0411, 0x0044, "Melco WLI-USB-KB11 11Mbps WLAN Adapter")}, {PRISM_USB_DEVICE(0x1668, 0x6106, "ROPEX FreeLan 802.11b USB Adapter")}, {PRISM_USB_DEVICE (0x124a, 0x4017, "Pheenet WL-503IA 802.11b USB Adapter")}, {PRISM_USB_DEVICE(0x0bb2, 0x0302, "Ambit Microsystems Corp.")}, {PRISM_USB_DEVICE (0x9016, 0x182d, "Sitecom WL-022 802.11b USB Adapter")}, {PRISM_USB_DEVICE (0x0543, 0x0f01, "ViewSonic Airsync USB Adapter 11Mbps (Prism2.5)")}, { /* terminator */ } }; MODULE_DEVICE_TABLE(usb, usb_prism_tbl); /*---------------------------------------------------------------- * prism2sta_probe_usb * * Probe routine called by the USB subsystem. * * Arguments: * dev ptr to the usb_device struct * ifnum interface number being offered * * Returns: * NULL - we're not claiming the device+interface * non-NULL - we are claiming the device+interface and * this is a ptr to the data we want back * when disconnect is called. * * Side effects: * * Call context: * I'm not sure, assume it's interrupt. * ----------------------------------------------------------------*/ static int prism2sta_probe_usb(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *dev; wlandevice_t *wlandev = NULL; hfa384x_t *hw = NULL; int result = 0; dev = interface_to_usbdev(interface); wlandev = create_wlan(); if (wlandev == NULL) { printk(KERN_ERR "%s: Memory allocation failure.\n", dev_info); result = -EIO; goto failed; } hw = wlandev->priv; if (wlan_setup(wlandev, &(interface->dev)) != 0) { printk(KERN_ERR "%s: wlan_setup() failed.\n", dev_info); result = -EIO; goto failed; } /* Initialize the hw data */ hfa384x_create(hw, dev); hw->wlandev = wlandev; /* Register the wlandev, this gets us a name and registers the * linux netdevice. */ SET_NETDEV_DEV(wlandev->netdev, &(interface->dev)); /* Do a chip-level reset on the MAC */ if (prism2_doreset) { result = hfa384x_corereset(hw, prism2_reset_holdtime, prism2_reset_settletime, 0); if (result != 0) { unregister_wlandev(wlandev); hfa384x_destroy(hw); result = -EIO; printk(KERN_ERR "%s: hfa384x_corereset() failed.\n", dev_info); goto failed; } } usb_get_dev(dev); wlandev->msdstate = WLAN_MSD_HWPRESENT; /* Try and load firmware, then enable card before we register */ prism2_fwtry(dev, wlandev); prism2sta_ifstate(wlandev, P80211ENUM_ifstate_enable); if (register_wlandev(wlandev) != 0) { printk(KERN_ERR "%s: register_wlandev() failed.\n", dev_info); result = -EIO; goto failed; } goto done; failed: kfree(wlandev); kfree(hw); wlandev = NULL; done: usb_set_intfdata(interface, wlandev); return result; } /*---------------------------------------------------------------- * prism2sta_disconnect_usb * * Called when a device previously claimed by probe is removed * from the USB. * * Arguments: * dev ptr to the usb_device struct * ptr ptr returned by probe() when the device * was claimed. * * Returns: * Nothing * * Side effects: * * Call context: * process ----------------------------------------------------------------*/ static void prism2sta_disconnect_usb(struct usb_interface *interface) { wlandevice_t *wlandev; wlandev = (wlandevice_t *) usb_get_intfdata(interface); if (wlandev != NULL) { LIST_HEAD(cleanlist); struct list_head *entry; struct list_head *temp; unsigned long flags; hfa384x_t *hw = wlandev->priv; if (!hw) goto exit; spin_lock_irqsave(&hw->ctlxq.lock, flags); p80211netdev_hwremoved(wlandev); list_splice_init(&hw->ctlxq.reapable, &cleanlist); list_splice_init(&hw->ctlxq.completing, &cleanlist); list_splice_init(&hw->ctlxq.pending, &cleanlist); list_splice_init(&hw->ctlxq.active, &cleanlist); spin_unlock_irqrestore(&hw->ctlxq.lock, flags); /* There's no hardware to shutdown, but the driver * might have some tasks or tasklets that must be * stopped before we can tear everything down. */ prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable); del_singleshot_timer_sync(&hw->throttle); del_singleshot_timer_sync(&hw->reqtimer); del_singleshot_timer_sync(&hw->resptimer); /* Unlink all the URBs. This "removes the wheels" * from the entire CTLX handling mechanism. */ usb_kill_urb(&hw->rx_urb); usb_kill_urb(&hw->tx_urb); usb_kill_urb(&hw->ctlx_urb); tasklet_kill(&hw->completion_bh); tasklet_kill(&hw->reaper_bh); flush_scheduled_work(); /* Now we complete any outstanding commands * and tell everyone who is waiting for their * responses that we have shut down. */ list_for_each(entry, &cleanlist) { hfa384x_usbctlx_t *ctlx; ctlx = list_entry(entry, hfa384x_usbctlx_t, list); complete(&ctlx->done); } /* Give any outstanding synchronous commands * a chance to complete. All they need to do * is "wake up", so that's easy. * (I'd like a better way to do this, really.) */ msleep(100); /* Now delete the CTLXs, because no-one else can now. */ list_for_each_safe(entry, temp, &cleanlist) { hfa384x_usbctlx_t *ctlx; ctlx = list_entry(entry, hfa384x_usbctlx_t, list); kfree(ctlx); } /* Unhook the wlandev */ unregister_wlandev(wlandev); wlan_unsetup(wlandev); usb_put_dev(hw->usb); hfa384x_destroy(hw); kfree(hw); kfree(wlandev); } exit: usb_set_intfdata(interface, NULL); } #ifdef CONFIG_PM static int prism2sta_suspend(struct usb_interface *interface, pm_message_t message) { hfa384x_t *hw = NULL; wlandevice_t *wlandev; wlandev = (wlandevice_t *) usb_get_intfdata(interface); if (!wlandev) return -ENODEV; hw = wlandev->priv; if (!hw) return -ENODEV; prism2sta_ifstate(wlandev, P80211ENUM_ifstate_disable); usb_kill_urb(&hw->rx_urb); usb_kill_urb(&hw->tx_urb); usb_kill_urb(&hw->ctlx_urb); return 0; } static int prism2sta_resume(struct usb_interface *interface) { int result = 0; hfa384x_t *hw = NULL; wlandevice_t *wlandev; wlandev = (wlandevice_t *) usb_get_intfdata(interface); if (!wlandev) return -ENODEV; hw = wlandev->priv; if (!hw) return -ENODEV; /* Do a chip-level reset on the MAC */ if (prism2_doreset) { result = hfa384x_corereset(hw, prism2_reset_holdtime, prism2_reset_settletime, 0); if (result != 0) { unregister_wlandev(wlandev); hfa384x_destroy(hw); printk(KERN_ERR "%s: hfa384x_corereset() failed.\n", dev_info); kfree(wlandev); kfree(hw); wlandev = NULL; return -ENODEV; } } prism2sta_ifstate(wlandev, P80211ENUM_ifstate_enable); return 0; } #else #define prism2sta_suspend NULL #define prism2sta_resume NULL #endif /* CONFIG_PM */ static struct usb_driver prism2_usb_driver = { .name = "prism2_usb", .probe = prism2sta_probe_usb, .disconnect = prism2sta_disconnect_usb, .id_table = usb_prism_tbl, .suspend = prism2sta_suspend, .resume = prism2sta_resume, .reset_resume = prism2sta_resume, /* fops, minor? */ }; static int __init prism2usb_init(void) { /* This call will result in calls to prism2sta_probe_usb. */ return usb_register(&prism2_usb_driver); }; static void __exit prism2usb_cleanup(void) { usb_deregister(&prism2_usb_driver); }; module_init(prism2usb_init); module_exit(prism2usb_cleanup);
gpl-2.0
srisurya95/Bhel_Kernel_falcon
drivers/video/msm/mipi_simulator_video.c
3634
2566
/* Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "msm_fb.h" #include "mipi_dsi.h" #include "mipi_simulator.h" static struct msm_panel_info pinfo; static struct mipi_dsi_phy_ctrl dsi_video_mode_phy_db = { {0x03, 0x01, 0x01, 0x00}, {0xaa, 0x3b, 0x1b, 0x00, 0x52, 0x58, 0x20, 0x3f, 0x2e, 0x03, 0x04}, {0x7f, 0x00, 0x00, 0x00}, {0xee, 0x00, 0x86, 0x00}, {0x40, 0xc7, 0xb0, 0xda, 0x00, 0x50, 0x48, 0x63, 0x30, 0x07, 0x03, 0x05, 0x14, 0x03, 0x0, 0x0, 0x54, 0x06, 0x10, 0x04, 0x0}, }; static int __init mipi_video_simulator_init(void) { int ret; if (msm_fb_detect_client("mipi_video_simulator_vga")) return 0; pinfo.xres = 640; pinfo.yres = 480; pinfo.type = MIPI_VIDEO_PANEL; pinfo.pdest = DISPLAY_1; pinfo.wait_cycle = 0; pinfo.bpp = 24; pinfo.lcdc.h_back_porch = 6; pinfo.lcdc.h_front_porch = 6; pinfo.lcdc.h_pulse_width = 2; pinfo.lcdc.v_back_porch = 6; pinfo.lcdc.v_front_porch = 6; pinfo.lcdc.v_pulse_width = 2; pinfo.lcdc.border_clr = 0; /* blk */ pinfo.lcdc.underflow_clr = 0xff; /* blue */ pinfo.lcdc.hsync_skew = 0; pinfo.bl_max = 15; pinfo.bl_min = 1; pinfo.fb_num = 2; pinfo.mipi.mode = DSI_VIDEO_MODE; pinfo.mipi.pulse_mode_hsa_he = TRUE; pinfo.mipi.hfp_power_stop = TRUE; pinfo.mipi.hbp_power_stop = TRUE; pinfo.mipi.hsa_power_stop = TRUE; pinfo.mipi.eof_bllp_power_stop = TRUE; pinfo.mipi.bllp_power_stop = TRUE; pinfo.mipi.traffic_mode = DSI_NON_BURST_SYNCH_PULSE; pinfo.mipi.dst_format = DSI_VIDEO_DST_FORMAT_RGB888; pinfo.mipi.vc = 0; pinfo.mipi.rgb_swap = DSI_RGB_SWAP_RGB; pinfo.mipi.data_lane0 = TRUE; pinfo.mipi.data_lane1 = TRUE; pinfo.mipi.t_clk_post = 0x03; pinfo.mipi.t_clk_pre = 0x24; pinfo.mipi.stream = 0; /* dma_p */ pinfo.mipi.mdp_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.dma_trigger = DSI_CMD_TRIGGER_SW; pinfo.mipi.frame_rate = 60; pinfo.mipi.dsi_phy_db = &dsi_video_mode_phy_db; ret = mipi_simulator_device_register(&pinfo, MIPI_DSI_PRIM, MIPI_DSI_PANEL_VGA); if (ret) pr_err("%s: failed to register device!\n", __func__); return ret; } module_init(mipi_video_simulator_init);
gpl-2.0
Silviumik/Silviu_Kernel_I9195_LTE_KitKat
drivers/block/aoe/aoeblk.c
3634
7731
/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ /* * aoeblk.c * block device routines */ #include <linux/kernel.h> #include <linux/hdreg.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/fs.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/ratelimit.h> #include <linux/genhd.h> #include <linux/netdevice.h> #include <linux/mutex.h> #include <linux/export.h> #include "aoe.h" static DEFINE_MUTEX(aoeblk_mutex); static struct kmem_cache *buf_pool_cache; static ssize_t aoedisk_show_state(struct device *dev, struct device_attribute *attr, char *page) { struct gendisk *disk = dev_to_disk(dev); struct aoedev *d = disk->private_data; return snprintf(page, PAGE_SIZE, "%s%s\n", (d->flags & DEVFL_UP) ? "up" : "down", (d->flags & DEVFL_KICKME) ? ",kickme" : (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : ""); /* I'd rather see nopen exported so we can ditch closewait */ } static ssize_t aoedisk_show_mac(struct device *dev, struct device_attribute *attr, char *page) { struct gendisk *disk = dev_to_disk(dev); struct aoedev *d = disk->private_data; struct aoetgt *t = d->targets[0]; if (t == NULL) return snprintf(page, PAGE_SIZE, "none\n"); return snprintf(page, PAGE_SIZE, "%pm\n", t->addr); } static ssize_t aoedisk_show_netif(struct device *dev, struct device_attribute *attr, char *page) { struct gendisk *disk = dev_to_disk(dev); struct aoedev *d = disk->private_data; struct net_device *nds[8], **nd, **nnd, **ne; struct aoetgt **t, **te; struct aoeif *ifp, *e; char *p; memset(nds, 0, sizeof nds); nd = nds; ne = nd + ARRAY_SIZE(nds); t = d->targets; te = t + NTARGETS; for (; t < te && *t; t++) { ifp = (*t)->ifs; e = ifp + NAOEIFS; for (; ifp < e && ifp->nd; ifp++) { for (nnd = nds; nnd < nd; nnd++) if (*nnd == ifp->nd) break; if (nnd == nd && nd != ne) *nd++ = ifp->nd; } } ne = nd; nd = nds; if (*nd == NULL) return snprintf(page, PAGE_SIZE, "none\n"); for (p = page; nd < ne; nd++) p += snprintf(p, PAGE_SIZE - (p-page), "%s%s", p == page ? "" : ",", (*nd)->name); p += snprintf(p, PAGE_SIZE - (p-page), "\n"); return p-page; } /* firmware version */ static ssize_t aoedisk_show_fwver(struct device *dev, struct device_attribute *attr, char *page) { struct gendisk *disk = dev_to_disk(dev); struct aoedev *d = disk->private_data; return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver); } static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL); static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL); static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL); static struct device_attribute dev_attr_firmware_version = { .attr = { .name = "firmware-version", .mode = S_IRUGO }, .show = aoedisk_show_fwver, }; static struct attribute *aoe_attrs[] = { &dev_attr_state.attr, &dev_attr_mac.attr, &dev_attr_netif.attr, &dev_attr_firmware_version.attr, NULL, }; static const struct attribute_group attr_group = { .attrs = aoe_attrs, }; static int aoedisk_add_sysfs(struct aoedev *d) { return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group); } void aoedisk_rm_sysfs(struct aoedev *d) { sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group); } static int aoeblk_open(struct block_device *bdev, fmode_t mode) { struct aoedev *d = bdev->bd_disk->private_data; ulong flags; mutex_lock(&aoeblk_mutex); spin_lock_irqsave(&d->lock, flags); if (d->flags & DEVFL_UP) { d->nopen++; spin_unlock_irqrestore(&d->lock, flags); mutex_unlock(&aoeblk_mutex); return 0; } spin_unlock_irqrestore(&d->lock, flags); mutex_unlock(&aoeblk_mutex); return -ENODEV; } static int aoeblk_release(struct gendisk *disk, fmode_t mode) { struct aoedev *d = disk->private_data; ulong flags; spin_lock_irqsave(&d->lock, flags); if (--d->nopen == 0) { spin_unlock_irqrestore(&d->lock, flags); aoecmd_cfg(d->aoemajor, d->aoeminor); return 0; } spin_unlock_irqrestore(&d->lock, flags); return 0; } static void aoeblk_make_request(struct request_queue *q, struct bio *bio) { struct sk_buff_head queue; struct aoedev *d; struct buf *buf; ulong flags; blk_queue_bounce(q, &bio); if (bio == NULL) { printk(KERN_ERR "aoe: bio is NULL\n"); BUG(); return; } d = bio->bi_bdev->bd_disk->private_data; if (d == NULL) { printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); BUG(); bio_endio(bio, -ENXIO); return; } else if (bio->bi_io_vec == NULL) { printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); BUG(); bio_endio(bio, -ENXIO); return; } buf = mempool_alloc(d->bufpool, GFP_NOIO); if (buf == NULL) { printk(KERN_INFO "aoe: buf allocation failure\n"); bio_endio(bio, -ENOMEM); return; } memset(buf, 0, sizeof(*buf)); INIT_LIST_HEAD(&buf->bufs); buf->stime = jiffies; buf->bio = bio; buf->resid = bio->bi_size; buf->sector = bio->bi_sector; buf->bv = &bio->bi_io_vec[bio->bi_idx]; buf->bv_resid = buf->bv->bv_len; WARN_ON(buf->bv_resid == 0); buf->bv_off = buf->bv->bv_offset; spin_lock_irqsave(&d->lock, flags); if ((d->flags & DEVFL_UP) == 0) { pr_info_ratelimited("aoe: device %ld.%d is not up\n", d->aoemajor, d->aoeminor); spin_unlock_irqrestore(&d->lock, flags); mempool_free(buf, d->bufpool); bio_endio(bio, -ENXIO); return; } list_add_tail(&buf->bufs, &d->bufq); aoecmd_work(d); __skb_queue_head_init(&queue); skb_queue_splice_init(&d->sendq, &queue); spin_unlock_irqrestore(&d->lock, flags); aoenet_xmit(&queue); } static int aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct aoedev *d = bdev->bd_disk->private_data; if ((d->flags & DEVFL_UP) == 0) { printk(KERN_ERR "aoe: disk not up\n"); return -ENODEV; } geo->cylinders = d->geo.cylinders; geo->heads = d->geo.heads; geo->sectors = d->geo.sectors; return 0; } static const struct block_device_operations aoe_bdops = { .open = aoeblk_open, .release = aoeblk_release, .getgeo = aoeblk_getgeo, .owner = THIS_MODULE, }; /* alloc_disk and add_disk can sleep */ void aoeblk_gdalloc(void *vp) { struct aoedev *d = vp; struct gendisk *gd; ulong flags; gd = alloc_disk(AOE_PARTITIONS); if (gd == NULL) { printk(KERN_ERR "aoe: cannot allocate disk structure for %ld.%d\n", d->aoemajor, d->aoeminor); goto err; } d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); if (d->bufpool == NULL) { printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n", d->aoemajor, d->aoeminor); goto err_disk; } d->blkq = blk_alloc_queue(GFP_KERNEL); if (!d->blkq) goto err_mempool; blk_queue_make_request(d->blkq, aoeblk_make_request); d->blkq->backing_dev_info.name = "aoe"; if (bdi_init(&d->blkq->backing_dev_info)) goto err_blkq; spin_lock_irqsave(&d->lock, flags); gd->major = AOE_MAJOR; gd->first_minor = d->sysminor * AOE_PARTITIONS; gd->fops = &aoe_bdops; gd->private_data = d; set_capacity(gd, d->ssize); snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", d->aoemajor, d->aoeminor); gd->queue = d->blkq; d->gd = gd; d->flags &= ~DEVFL_GDALLOC; d->flags |= DEVFL_UP; spin_unlock_irqrestore(&d->lock, flags); add_disk(gd); aoedisk_add_sysfs(d); return; err_blkq: blk_cleanup_queue(d->blkq); d->blkq = NULL; err_mempool: mempool_destroy(d->bufpool); err_disk: put_disk(gd); err: spin_lock_irqsave(&d->lock, flags); d->flags &= ~DEVFL_GDALLOC; spin_unlock_irqrestore(&d->lock, flags); } void aoeblk_exit(void) { kmem_cache_destroy(buf_pool_cache); } int __init aoeblk_init(void) { buf_pool_cache = kmem_cache_create("aoe_bufs", sizeof(struct buf), 0, 0, NULL); if (buf_pool_cache == NULL) return -ENOMEM; return 0; }
gpl-2.0
meyskld/hammerhead_mr1
arch/arm/mach-msm/board-halibut-panel.c
3634
1469
/* linux/arch/arm/mach-msm/board-halibut-mddi.c ** Author: Brian Swetland <swetland@google.com> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/leds.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/bootmem.h> #include <asm/io.h> #include <asm/gpio.h> #include <asm/mach-types.h> #include <mach/msm_fb.h> #include <mach/vreg.h> #include <mach/proc_comm.h> #include "devices.h" #include "board-halibut.h" static void halibut_mddi_power_client(struct msm_mddi_client_data *mddi, int on) { } static struct resource resources_msm_fb = { .start = MSM_FB_BASE, .end = MSM_FB_BASE + MSM_FB_SIZE - 1, .flags = IORESOURCE_MEM, }; static struct msm_fb_data fb_data = { .xres = 800, .yres = 480, .output_format = 0, }; static struct msm_mddi_platform_data mddi_pdata = { .clk_rate = 122880000, .power_client = halibut_mddi_power_client, .fb_resource = &resources_msm_fb, .num_clients = 1, .client_platform_data = { { .product_id = (0x4474 << 16 | 0xc065), .name = "mddi_c_dummy", .id = 0, .client_data = &fb_data, .clk_rate = 0, }, }, }; int __init halibut_init_panel(void) { int rc; if (!machine_is_halibut()) return 0; rc = platform_device_register(&msm_device_mdp); if (rc) return rc; msm_device_mddi0.dev.platform_data = &mddi_pdata; return platform_device_register(&msm_device_mddi0); } device_initcall(halibut_init_panel);
gpl-2.0
civato/KK_Xplorer-9005
drivers/hid/hidraw.c
3890
12743
/* * HID raw devices, giving access to raw HID events. * * In comparison to hiddev, this device does not process the * hid events at all (no parsing, no lookups). This lets applications * to work on raw hid events as they want to, and avoids a need to * use a transport-specific userspace libhid/libusb libraries. * * Copyright (c) 2007 Jiri Kosina */ /* * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fs.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/cdev.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/major.h> #include <linux/slab.h> #include <linux/hid.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/hidraw.h> static int hidraw_major; static struct cdev hidraw_cdev; static struct class *hidraw_class; static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; static DEFINE_MUTEX(minors_lock); static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hidraw_list *list = file->private_data; int ret = 0, len; DECLARE_WAITQUEUE(wait, current); mutex_lock(&list->read_mutex); while (ret == 0) { if (list->head == list->tail) { add_wait_queue(&list->hidraw->wait, &wait); set_current_state(TASK_INTERRUPTIBLE); while (list->head == list->tail) { if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!list->hidraw->exist) { ret = -EIO; break; } /* allow O_NONBLOCK to work well from other threads */ mutex_unlock(&list->read_mutex); schedule(); mutex_lock(&list->read_mutex); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&list->hidraw->wait, &wait); } if (ret) goto out; len = list->buffer[list->tail].len > count ? count : list->buffer[list->tail].len; if (copy_to_user(buffer, list->buffer[list->tail].value, len)) { ret = -EFAULT; goto out; } ret = len; kfree(list->buffer[list->tail].value); list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); } out: mutex_unlock(&list->read_mutex); return ret; } /* The first byte is expected to be a report number. * This function is to be called with the minors_lock mutex held */ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct hid_device *dev; __u8 *buf; int ret = 0; if (!hidraw_table[minor]) { ret = -ENODEV; goto out; } dev = hidraw_table[minor]->hid; if (!dev->hid_output_raw_report) { ret = -ENODEV; goto out; } if (count > HID_MAX_BUFFER_SIZE) { hid_warn(dev, "pid %d passed too large report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } if (count < 2) { hid_warn(dev, "pid %d passed too short report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } buf = kmalloc(count * sizeof(__u8), GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } if (copy_from_user(buf, buffer, count)) { ret = -EFAULT; goto out_free; } ret = dev->hid_output_raw_report(dev, buf, count, report_type); out_free: kfree(buf); out: return ret; } /* the first byte is expected to be a report number */ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { ssize_t ret; mutex_lock(&minors_lock); ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT); mutex_unlock(&minors_lock); return ret; } /* This function performs a Get_Report transfer over the control endpoint * per section 7.2.1 of the HID specification, version 1.1. The first byte * of buffer is the report number to request, or 0x0 if the defice does not * use numbered reports. The report_type parameter can be HID_FEATURE_REPORT * or HID_INPUT_REPORT. This function is to be called with the minors_lock * mutex held. */ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct hid_device *dev; __u8 *buf; int ret = 0, len; unsigned char report_number; dev = hidraw_table[minor]->hid; if (!dev->hid_get_raw_report) { ret = -ENODEV; goto out; } if (count > HID_MAX_BUFFER_SIZE) { printk(KERN_WARNING "hidraw: pid %d passed too large report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } if (count < 2) { printk(KERN_WARNING "hidraw: pid %d passed too short report\n", task_pid_nr(current)); ret = -EINVAL; goto out; } buf = kmalloc(count * sizeof(__u8), GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto out; } /* Read the first byte from the user. This is the report number, * which is passed to dev->hid_get_raw_report(). */ if (copy_from_user(&report_number, buffer, 1)) { ret = -EFAULT; goto out_free; } ret = dev->hid_get_raw_report(dev, report_number, buf, count, report_type); if (ret < 0) goto out_free; len = (ret < count) ? ret : count; if (copy_to_user(buffer, buf, len)) { ret = -EFAULT; goto out_free; } ret = len; out_free: kfree(buf); out: return ret; } static unsigned int hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; poll_wait(file, &list->hidraw->wait, wait); if (list->head != list->tail) return POLLIN | POLLRDNORM; if (!list->hidraw->exist) return POLLERR | POLLHUP; return 0; } static int hidraw_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct hidraw *dev; struct hidraw_list *list; int err = 0; if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) { err = -ENOMEM; goto out; } mutex_lock(&minors_lock); if (!hidraw_table[minor]) { err = -ENODEV; goto out_unlock; } list->hidraw = hidraw_table[minor]; mutex_init(&list->read_mutex); list_add_tail(&list->node, &hidraw_table[minor]->list); file->private_data = list; dev = hidraw_table[minor]; if (!dev->open++) { err = hid_hw_power(dev->hid, PM_HINT_FULLON); if (err < 0) { dev->open--; goto out_unlock; } err = hid_hw_open(dev->hid); if (err < 0) { hid_hw_power(dev->hid, PM_HINT_NORMAL); dev->open--; } } out_unlock: mutex_unlock(&minors_lock); out: if (err < 0) kfree(list); return err; } static int hidraw_release(struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); struct hidraw *dev; struct hidraw_list *list = file->private_data; int ret; mutex_lock(&minors_lock); if (!hidraw_table[minor]) { ret = -ENODEV; goto unlock; } list_del(&list->node); dev = hidraw_table[minor]; if (!--dev->open) { if (list->hidraw->exist) { hid_hw_power(dev->hid, PM_HINT_NORMAL); hid_hw_close(dev->hid); } else { kfree(list->hidraw); } } kfree(list); ret = 0; unlock: mutex_unlock(&minors_lock); return ret; } static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file->f_path.dentry->d_inode; unsigned int minor = iminor(inode); long ret = 0; struct hidraw *dev; void __user *user_arg = (void __user*) arg; mutex_lock(&minors_lock); dev = hidraw_table[minor]; if (!dev) { ret = -ENODEV; goto out; } switch (cmd) { case HIDIOCGRDESCSIZE: if (put_user(dev->hid->rsize, (int __user *)arg)) ret = -EFAULT; break; case HIDIOCGRDESC: { __u32 len; if (get_user(len, (int __user *)arg)) ret = -EFAULT; else if (len > HID_MAX_DESCRIPTOR_SIZE - 1) ret = -EINVAL; else if (copy_to_user(user_arg + offsetof( struct hidraw_report_descriptor, value[0]), dev->hid->rdesc, min(dev->hid->rsize, len))) ret = -EFAULT; break; } case HIDIOCGRAWINFO: { struct hidraw_devinfo dinfo; dinfo.bustype = dev->hid->bus; dinfo.vendor = dev->hid->vendor; dinfo.product = dev->hid->product; if (copy_to_user(user_arg, &dinfo, sizeof(dinfo))) ret = -EFAULT; break; } default: { struct hid_device *hid = dev->hid; if (_IOC_TYPE(cmd) != 'H') { ret = -EINVAL; break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) { int len = _IOC_SIZE(cmd); ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) { int len = _IOC_SIZE(cmd); ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); break; } /* Begin Read-only ioctls. */ if (_IOC_DIR(cmd) != _IOC_READ) { ret = -EINVAL; break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWNAME(0))) { int len = strlen(hid->name) + 1; if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); ret = copy_to_user(user_arg, hid->name, len) ? -EFAULT : len; break; } if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWPHYS(0))) { int len = strlen(hid->phys) + 1; if (len > _IOC_SIZE(cmd)) len = _IOC_SIZE(cmd); ret = copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len; break; } } ret = -ENOTTY; } out: mutex_unlock(&minors_lock); return ret; } static const struct file_operations hidraw_ops = { .owner = THIS_MODULE, .read = hidraw_read, .write = hidraw_write, .poll = hidraw_poll, .open = hidraw_open, .release = hidraw_release, .unlocked_ioctl = hidraw_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = hidraw_ioctl, #endif .llseek = noop_llseek, }; void hidraw_report_event(struct hid_device *hid, u8 *data, int len) { struct hidraw *dev = hid->hidraw; struct hidraw_list *list; list_for_each_entry(list, &dev->list, node) { list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC); list->buffer[list->head].len = len; list->head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1); kill_fasync(&list->fasync, SIGIO, POLL_IN); } wake_up_interruptible(&dev->wait); } EXPORT_SYMBOL_GPL(hidraw_report_event); int hidraw_connect(struct hid_device *hid) { int minor, result; struct hidraw *dev; /* we accept any HID device, no matter the applications */ dev = kzalloc(sizeof(struct hidraw), GFP_KERNEL); if (!dev) return -ENOMEM; result = -EINVAL; mutex_lock(&minors_lock); for (minor = 0; minor < HIDRAW_MAX_DEVICES; minor++) { if (hidraw_table[minor]) continue; hidraw_table[minor] = dev; result = 0; break; } if (result) { mutex_unlock(&minors_lock); kfree(dev); goto out; } dev->dev = device_create(hidraw_class, &hid->dev, MKDEV(hidraw_major, minor), NULL, "%s%d", "hidraw", minor); if (IS_ERR(dev->dev)) { hidraw_table[minor] = NULL; mutex_unlock(&minors_lock); result = PTR_ERR(dev->dev); kfree(dev); goto out; } mutex_unlock(&minors_lock); init_waitqueue_head(&dev->wait); INIT_LIST_HEAD(&dev->list); dev->hid = hid; dev->minor = minor; dev->exist = 1; hid->hidraw = dev; out: return result; } EXPORT_SYMBOL_GPL(hidraw_connect); void hidraw_disconnect(struct hid_device *hid) { struct hidraw *hidraw = hid->hidraw; mutex_lock(&minors_lock); hidraw->exist = 0; device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); hidraw_table[hidraw->minor] = NULL; if (hidraw->open) { hid_hw_close(hid); wake_up_interruptible(&hidraw->wait); } else { kfree(hidraw); } mutex_unlock(&minors_lock); } EXPORT_SYMBOL_GPL(hidraw_disconnect); int __init hidraw_init(void) { int result; dev_t dev_id; result = alloc_chrdev_region(&dev_id, HIDRAW_FIRST_MINOR, HIDRAW_MAX_DEVICES, "hidraw"); hidraw_major = MAJOR(dev_id); if (result < 0) { pr_warn("can't get major number\n"); result = 0; goto out; } hidraw_class = class_create(THIS_MODULE, "hidraw"); if (IS_ERR(hidraw_class)) { result = PTR_ERR(hidraw_class); unregister_chrdev(hidraw_major, "hidraw"); goto out; } cdev_init(&hidraw_cdev, &hidraw_ops); cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES); out: return result; } void hidraw_exit(void) { dev_t dev_id = MKDEV(hidraw_major, 0); cdev_del(&hidraw_cdev); class_destroy(hidraw_class); unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); }
gpl-2.0