repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
dschensen/android_kernel_samsung_smdk4412 | drivers/gpu/mali400/r3p2/mali/common/mali_l2_cache.c | 194 | 13977 | /*
* Copyright (C) 2010-2012 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_osk.h"
#include "mali_l2_cache.h"
#include "mali_hw_core.h"
#include "mali_scheduler.h"
#include "mali_pm_domain.h"
/**
* Size of the Mali L2 cache registers in bytes
*/
#define MALI400_L2_CACHE_REGISTERS_SIZE 0x30
/**
* Mali L2 cache register numbers
* Used in the register read/write routines.
* See the hardware documentation for more information about each register
*/
typedef enum mali_l2_cache_register {
MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
/*unused = 0x000C */
MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, /**< Misc cache commands, e.g. clear */
MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, /**< Limit of outstanding read requests */
MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, /**< Enable misc cache features */
MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1 = 0x002C,
} mali_l2_cache_register;
/**
* Mali L2 cache commands
* These are the commands that can be sent to the Mali L2 cache unit
*/
typedef enum mali_l2_cache_command
{
MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
/* Read HW TRM carefully before adding/using other commands than the clear above */
} mali_l2_cache_command;
/**
* Mali L2 cache commands
* These are the commands that can be sent to the Mali L2 cache unit
*/
typedef enum mali_l2_cache_enable
{
MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
} mali_l2_cache_enable;
/**
* Mali L2 cache status bits
*/
typedef enum mali_l2_cache_status
{
MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
} mali_l2_cache_status;
#define MALI400_L2_MAX_READS_DEFAULT 0x1C
static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
static u32 mali_global_num_l2_cache_cores = 0;
int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
/* Local helper functions */
static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
{
struct mali_l2_cache_core *cache = NULL;
_mali_osk_lock_flags_t lock_flags;
#if defined(MALI_UPPER_HALF_SCHEDULING)
lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK_IRQ | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
#else
lock_flags = _MALI_OSK_LOCKFLAG_ORDERED | _MALI_OSK_LOCKFLAG_SPINLOCK | _MALI_OSK_LOCKFLAG_NONINTERRUPTABLE;
#endif
MALI_DEBUG_PRINT(2, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES)
{
MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
return NULL;
}
cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
if (NULL != cache)
{
cache->core_id = mali_global_num_l2_cache_cores;
cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
cache->pm_domain = NULL;
if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE))
{
cache->command_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
if (NULL != cache->command_lock)
{
cache->counter_lock = _mali_osk_lock_init(lock_flags, 0, _MALI_OSK_LOCK_ORDER_L2_COUNTER);
if (NULL != cache->counter_lock)
{
mali_l2_cache_reset(cache);
cache->last_invalidated_id = 0;
mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
mali_global_num_l2_cache_cores++;
return cache;
}
else
{
MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
}
_mali_osk_lock_term(cache->command_lock);
}
else
{
MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
}
mali_hw_core_delete(&cache->hw_core);
}
_mali_osk_free(cache);
}
else
{
MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
}
return NULL;
}
void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
{
u32 i;
/* reset to defaults */
mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
_mali_osk_lock_term(cache->counter_lock);
_mali_osk_lock_term(cache->command_lock);
mali_hw_core_delete(&cache->hw_core);
for (i = 0; i < mali_global_num_l2_cache_cores; i++)
{
if (mali_global_l2_cache_cores[i] == cache)
{
mali_global_l2_cache_cores[i] = NULL;
mali_global_num_l2_cache_cores--;
if (i != mali_global_num_l2_cache_cores)
{
/* We removed a l2 cache from the middle of the array -- move the last
* l2 cache to the current position to close the gap */
mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
}
break;
}
}
_mali_osk_free(cache);
}
u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
{
return cache->core_id;
}
mali_bool mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
{
u32 value = 0; /* disabled src */
mali_bool core_is_on;
MALI_DEBUG_ASSERT_POINTER(cache);
core_is_on = mali_l2_cache_lock_power_state(cache);
_mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
cache->counter_src0 = counter;
if (MALI_HW_CORE_NO_COUNTER != counter)
{
value = counter;
}
if (MALI_TRUE == core_is_on)
{
mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
}
_mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
mali_l2_cache_unlock_power_state(cache);
return MALI_TRUE;
}
mali_bool mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
{
u32 value = 0; /* disabled src */
mali_bool core_is_on;
MALI_DEBUG_ASSERT_POINTER(cache);
core_is_on = mali_l2_cache_lock_power_state(cache);
_mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
cache->counter_src1 = counter;
if (MALI_HW_CORE_NO_COUNTER != counter)
{
value = counter;
}
if (MALI_TRUE == core_is_on)
{
mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
}
_mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
mali_l2_cache_unlock_power_state(cache);
return MALI_TRUE;
}
u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
{
return cache->counter_src0;
}
u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
{
return cache->counter_src1;
}
void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
{
MALI_DEBUG_ASSERT(NULL != src0);
MALI_DEBUG_ASSERT(NULL != value0);
MALI_DEBUG_ASSERT(NULL != src1);
MALI_DEBUG_ASSERT(NULL != value1);
/* Caller must hold the PM lock and know that we are powered on */
_mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
*src0 = cache->counter_src0;
*src1 = cache->counter_src1;
if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
{
*value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
}
if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
{
*value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
}
_mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
}
struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
{
if (mali_global_num_l2_cache_cores > index)
{
return mali_global_l2_cache_cores[index];
}
return NULL;
}
u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
{
return mali_global_num_l2_cache_cores;
}
void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
{
/* Invalidate cache (just to keep it in a known state at startup) */
mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
/* Enable cache */
mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
/* Restart any performance counters (if enabled) */
_mali_osk_lock_wait(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER)
{
mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
}
if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER)
{
mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
}
_mali_osk_lock_signal(cache->counter_lock, _MALI_OSK_LOCKMODE_RW);
}
void mali_l2_cache_reset_all(void)
{
int i;
u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
for (i = 0; i < num_cores; i++)
{
mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
}
}
void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
{
MALI_DEBUG_ASSERT_POINTER(cache);
if (NULL != cache)
{
cache->last_invalidated_id = mali_scheduler_get_new_id();
mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
}
}
mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
{
MALI_DEBUG_ASSERT_POINTER(cache);
if (NULL != cache)
{
/* If the last cache invalidation was done by a job with a higher id we
* don't have to flush. Since user space will store jobs w/ their
* corresponding memory in sequence (first job #0, then job #1, ...),
* we don't have to flush for job n-1 if job n has already invalidated
* the cache since we know for sure that job n-1's memory was already
* written when job n was started. */
if (((s32)id) <= ((s32)cache->last_invalidated_id))
{
return MALI_FALSE;
}
else
{
cache->last_invalidated_id = mali_scheduler_get_new_id();
}
mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
}
return MALI_TRUE;
}
void mali_l2_cache_invalidate_all(void)
{
u32 i;
for (i = 0; i < mali_global_num_l2_cache_cores; i++)
{
/*additional check*/
if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i]))
{
_mali_osk_errcode_t ret;
mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_id();
ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
if (_MALI_OSK_ERR_OK != ret)
{
MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
}
}
mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
}
}
void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
{
u32 i;
for (i = 0; i < mali_global_num_l2_cache_cores; i++)
{
/*additional check*/
if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i]))
{
u32 j;
for (j = 0; j < num_pages; j++)
{
_mali_osk_errcode_t ret;
ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
if (_MALI_OSK_ERR_OK != ret)
{
MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
}
}
}
mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
}
}
mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
{
return mali_pm_domain_lock_state(cache->pm_domain);
}
void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
{
return mali_pm_domain_unlock_state(cache->pm_domain);
}
/* -------- local helper functions below -------- */
static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
{
int i = 0;
const int loop_count = 100000;
/*
* Grab lock in order to send commands to the L2 cache in a serialized fashion.
* The L2 cache will ignore commands if it is busy.
*/
_mali_osk_lock_wait(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
/* First, wait for L2 cache command handler to go idle */
for (i = 0; i < loop_count; i++)
{
if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY))
{
break;
}
}
if (i == loop_count)
{
_mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
MALI_DEBUG_PRINT(1, ( "Mali L2 cache: aborting wait for command interface to go idle\n"));
MALI_ERROR( _MALI_OSK_ERR_FAULT );
}
/* then issue the command */
mali_hw_core_register_write(&cache->hw_core, reg, val);
_mali_osk_lock_signal(cache->command_lock, _MALI_OSK_LOCKMODE_RW);
MALI_SUCCESS;
}
| gpl-2.0 |
mialwe/mnics-i9100 | drivers/media/video/samsung/mali/linux/mali_osk_low_level_mem.c | 194 | 17615 | /*
* Copyright (C) 2010 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
* @file mali_osk_low_level_mem.c
* Implementation of the OS abstraction layer for the kernel device driver
*/
/* needed to detect kernel version specific code */
#include <linux/version.h>
#include <asm/io.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include "mali_osk.h"
#include "mali_ukk.h" /* required to hook in _mali_ukk_mem_mmap handling */
#include "mali_kernel_common.h"
#include "mali_kernel_linux.h"
static void mali_kernel_memory_vma_open(struct vm_area_struct * vma);
static void mali_kernel_memory_vma_close(struct vm_area_struct * vma);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
#else
static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
#endif
typedef struct mali_vma_usage_tracker
{
int references;
u32 cookie;
} mali_vma_usage_tracker;
/* Linked list structure to hold details of all OS allocations in a particular
* mapping
*/
struct AllocationList
{
struct AllocationList *next;
u32 offset;
u32 physaddr;
};
typedef struct AllocationList AllocationList;
/* Private structure to store details of a mapping region returned
* from _mali_osk_mem_mapregion_init
*/
struct MappingInfo
{
struct vm_area_struct *vma;
struct AllocationList *list;
};
typedef struct MappingInfo MappingInfo;
static u32 _kernel_page_allocate(void);
static void _kernel_page_release(u32 physical_address);
static AllocationList * _allocation_list_item_get(void);
static void _allocation_list_item_release(AllocationList * item);
/* Variable declarations */
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,36)
spinlock_t allocation_list_spinlock = SPIN_LOCK_UNLOCKED;
#else
DEFINE_SPINLOCK(allocation_list_spinlock);
#endif
static AllocationList * pre_allocated_memory = (AllocationList*) NULL ;
static int pre_allocated_memory_size_current = 0;
#ifdef MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB
static int pre_allocated_memory_size_max = MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 1024 * 1024;
#else
static int pre_allocated_memory_size_max = 6 * 1024 * 1024; /* 6 MiB */
#endif
static struct vm_operations_struct mali_kernel_vm_ops =
{
.open = mali_kernel_memory_vma_open,
.close = mali_kernel_memory_vma_close,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
.fault = mali_kernel_memory_cpu_page_fault_handler
#else
.nopfn = mali_kernel_memory_cpu_page_fault_handler
#endif
};
void mali_osk_low_level_mem_init(void)
{
pre_allocated_memory = (AllocationList*) NULL ;
}
void mali_osk_low_level_mem_term(void)
{
while ( NULL != pre_allocated_memory )
{
AllocationList *item;
item = pre_allocated_memory;
pre_allocated_memory = item->next;
_kernel_page_release(item->physaddr);
_mali_osk_free( item );
}
pre_allocated_memory_size_current = 0;
}
static u32 _kernel_page_allocate(void)
{
struct page *new_page;
u32 linux_phys_addr;
new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD);
if ( NULL == new_page )
{
return 0;
}
/* Ensure page is flushed from CPU caches. */
linux_phys_addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
return linux_phys_addr;
}
static void _kernel_page_release(u32 physical_address)
{
struct page *unmap_page;
#if 1
dma_unmap_page(NULL, physical_address, PAGE_SIZE, DMA_BIDIRECTIONAL);
#endif
unmap_page = pfn_to_page( physical_address >> PAGE_SHIFT );
MALI_DEBUG_ASSERT_POINTER( unmap_page );
__free_page( unmap_page );
}
static AllocationList * _allocation_list_item_get(void)
{
AllocationList *item = NULL;
unsigned long flags;
spin_lock_irqsave(&allocation_list_spinlock,flags);
if ( pre_allocated_memory )
{
item = pre_allocated_memory;
pre_allocated_memory = pre_allocated_memory->next;
pre_allocated_memory_size_current -= PAGE_SIZE;
spin_unlock_irqrestore(&allocation_list_spinlock,flags);
return item;
}
spin_unlock_irqrestore(&allocation_list_spinlock,flags);
item = _mali_osk_malloc( sizeof(AllocationList) );
if ( NULL == item)
{
return NULL;
}
item->physaddr = _kernel_page_allocate();
if ( 0 == item->physaddr )
{
/* Non-fatal error condition, out of memory. Upper levels will handle this. */
_mali_osk_free( item );
return NULL;
}
return item;
}
static void _allocation_list_item_release(AllocationList * item)
{
unsigned long flags;
spin_lock_irqsave(&allocation_list_spinlock,flags);
if ( pre_allocated_memory_size_current < pre_allocated_memory_size_max)
{
item->next = pre_allocated_memory;
pre_allocated_memory = item;
pre_allocated_memory_size_current += PAGE_SIZE;
spin_unlock_irqrestore(&allocation_list_spinlock,flags);
return;
}
spin_unlock_irqrestore(&allocation_list_spinlock,flags);
_kernel_page_release(item->physaddr);
_mali_osk_free( item );
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
static int mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
#else
static unsigned long mali_kernel_memory_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
#endif
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
void __user * address;
address = vmf->virtual_address;
#endif
/*
* We always fail the call since all memory is pre-faulted when assigned to the process.
* Only the Mali cores can use page faults to extend buffers.
*/
MALI_DEBUG_PRINT(1, ("Page-fault in Mali memory region caused by the CPU.\n"));
MALI_DEBUG_PRINT(1, ("Tried to access %p (process local virtual address) which is not currently mapped to any Mali memory.\n", (void*)address));
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
return VM_FAULT_SIGBUS;
#else
return NOPFN_SIGBUS;
#endif
}
static void mali_kernel_memory_vma_open(struct vm_area_struct * vma)
{
mali_vma_usage_tracker * vma_usage_tracker;
MALI_DEBUG_PRINT(4, ("Open called on vma %p\n", vma));
vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
vma_usage_tracker->references++;
return;
}
static void mali_kernel_memory_vma_close(struct vm_area_struct * vma)
{
_mali_uk_mem_munmap_s args = {0, };
mali_memory_allocation * descriptor;
mali_vma_usage_tracker * vma_usage_tracker;
MappingInfo *mappingInfo;
MALI_DEBUG_PRINT(3, ("Close called on vma %p\n", vma));
vma_usage_tracker = (mali_vma_usage_tracker*)vma->vm_private_data;
BUG_ON(!vma_usage_tracker);
BUG_ON(0 == vma_usage_tracker->references);
vma_usage_tracker->references--;
descriptor = (mali_memory_allocation *)vma_usage_tracker->cookie;
mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
mappingInfo->vma = vma;
if (0 != vma_usage_tracker->references)
{
MALI_DEBUG_PRINT(3, ("Ignoring this close, %d references still exists\n", vma_usage_tracker->references));
return;
}
/** @note args->context unused, initialized to 0.
* Instead, we use the memory_session from the cookie */
args.cookie = (u32)descriptor;
args.mapping = descriptor->mapping;
args.size = descriptor->size;
_mali_ukk_mem_munmap( &args );
/* vma_usage_tracker is free()d by _mali_osk_mem_mapregion_term().
* In the case of the memory engine, it is called as the release function that has been registered with the engine*/
}
void _mali_osk_mem_barrier( void )
{
mb();
}
void _mali_osk_write_mem_barrier( void )
{
wmb();
}
mali_io_address _mali_osk_mem_mapioregion( u32 phys, u32 size, const char *description )
{
return (mali_io_address)ioremap_nocache(phys, size);
}
void _mali_osk_mem_unmapioregion( u32 phys, u32 size, mali_io_address virt )
{
iounmap((void*)virt);
}
mali_io_address _mali_osk_mem_allocioregion( u32 *phys, u32 size )
{
void * virt;
MALI_DEBUG_ASSERT_POINTER( phys );
MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
MALI_DEBUG_ASSERT( 0 != size );
/* dma_alloc_* uses a limited region of address space. On most arch/marchs
* 2 to 14 MiB is available. This should be enough for the page tables, which
* currently is the only user of this function. */
virt = dma_alloc_coherent(NULL, size, phys, GFP_KERNEL | GFP_DMA );
MALI_DEBUG_PRINT(3, ("Page table virt: 0x%x = dma_alloc_coherent(size:%d, phys:0x%x, )\n", virt, size, phys));
if ( NULL == virt )
{
MALI_DEBUG_PRINT(5, ("allocioregion: Failed to allocate Pagetable memory, size=0x%.8X\n", size ));
return 0;
}
MALI_DEBUG_ASSERT( 0 == (*phys & ~_MALI_OSK_CPU_PAGE_MASK) );
return (mali_io_address)virt;
}
void _mali_osk_mem_freeioregion( u32 phys, u32 size, mali_io_address virt )
{
MALI_DEBUG_ASSERT_POINTER( (void*)virt );
MALI_DEBUG_ASSERT( 0 != size );
MALI_DEBUG_ASSERT( 0 == (phys & ( (1 << PAGE_SHIFT) - 1 )) );
dma_free_coherent(NULL, size, virt, phys);
}
_mali_osk_errcode_t inline _mali_osk_mem_reqregion( u32 phys, u32 size, const char *description )
{
return ((NULL == request_mem_region(phys, size, description)) ? _MALI_OSK_ERR_NOMEM : _MALI_OSK_ERR_OK);
}
void inline _mali_osk_mem_unreqregion( u32 phys, u32 size )
{
release_mem_region(phys, size);
}
void inline _mali_osk_mem_iowrite32_relaxed( volatile mali_io_address addr, u32 offset, u32 val )
{
__raw_writel(cpu_to_le32(val),((u8*)addr) + offset);
}
u32 inline _mali_osk_mem_ioread32( volatile mali_io_address addr, u32 offset )
{
return ioread32(((u8*)addr) + offset);
}
void inline _mali_osk_mem_iowrite32( volatile mali_io_address addr, u32 offset, u32 val )
{
iowrite32(val, ((u8*)addr) + offset);
}
void _mali_osk_cache_flushall( void )
{
/** @note Cached memory is not currently supported in this implementation */
}
void _mali_osk_cache_ensure_uncached_range_flushed( void *uncached_mapping, u32 offset, u32 size )
{
_mali_osk_write_mem_barrier();
}
_mali_osk_errcode_t _mali_osk_mem_mapregion_init( mali_memory_allocation * descriptor )
{
struct vm_area_struct *vma;
mali_vma_usage_tracker * vma_usage_tracker;
MappingInfo *mappingInfo;
if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
vma = (struct vm_area_struct*)descriptor->process_addr_mapping_info;
if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
/* Re-write the process_addr_mapping_info */
mappingInfo = _mali_osk_calloc( 1, sizeof(MappingInfo) );
if ( NULL == mappingInfo ) return _MALI_OSK_ERR_FAULT;
vma_usage_tracker = _mali_osk_calloc( 1, sizeof(mali_vma_usage_tracker) );
if (NULL == vma_usage_tracker)
{
MALI_DEBUG_PRINT(2, ("Failed to allocate memory to track memory usage\n"));
_mali_osk_free( mappingInfo );
return _MALI_OSK_ERR_FAULT;
}
mappingInfo->vma = vma;
descriptor->process_addr_mapping_info = mappingInfo;
/* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
descriptor->mapping = (void __user*)vma->vm_start;
/* list member is already NULL */
/*
set some bits which indicate that:
The memory is IO memory, meaning that no paging is to be performed and the memory should not be included in crash dumps
The memory is reserved, meaning that it's present and can never be paged out (see also previous entry)
*/
vma->vm_flags |= VM_IO;
vma->vm_flags |= VM_RESERVED;
vma->vm_flags |= VM_DONTCOPY;
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &mali_kernel_vm_ops; /* Operations used on any memory system */
vma_usage_tracker->references = 1; /* set initial reference count to be 1 as vma_open won't be called for the first mmap call */
vma_usage_tracker->cookie = (u32)descriptor; /* cookie for munmap */
vma->vm_private_data = vma_usage_tracker;
return _MALI_OSK_ERR_OK;
}
void _mali_osk_mem_mapregion_term( mali_memory_allocation * descriptor )
{
struct vm_area_struct* vma;
mali_vma_usage_tracker * vma_usage_tracker;
MappingInfo *mappingInfo;
if (NULL == descriptor) return;
MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
MALI_DEBUG_ASSERT_POINTER( mappingInfo );
/* Linux does the right thing as part of munmap to remove the mapping
* All that remains is that we remove the vma_usage_tracker setup in init() */
vma = mappingInfo->vma;
MALI_DEBUG_ASSERT_POINTER( vma );
/* ASSERT that there are no allocations on the list. Unmap should've been
* called on all OS allocations. */
MALI_DEBUG_ASSERT( NULL == mappingInfo->list );
vma_usage_tracker = vma->vm_private_data;
/* We only get called if mem_mapregion_init succeeded */
_mali_osk_free(vma_usage_tracker);
_mali_osk_free( mappingInfo );
return;
}
_mali_osk_errcode_t _mali_osk_mem_mapregion_map( mali_memory_allocation * descriptor, u32 offset, u32 *phys_addr, u32 size )
{
struct vm_area_struct *vma;
MappingInfo *mappingInfo;
if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
MALI_DEBUG_ASSERT_POINTER( phys_addr );
MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK));
if (NULL == descriptor->mapping) return _MALI_OSK_ERR_INVALID_ARGS;
if (size > (descriptor->size - offset))
{
MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_map: virtual memory area not large enough to map physical 0x%x size %x into area 0x%x at offset 0x%xr\n",
*phys_addr, size, descriptor->mapping, offset));
return _MALI_OSK_ERR_FAULT;
}
mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
MALI_DEBUG_ASSERT_POINTER( mappingInfo );
vma = mappingInfo->vma;
if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
MALI_DEBUG_PRINT(7, ("Process map: mapping 0x%08X to process address 0x%08lX length 0x%08X\n", *phys_addr, (long unsigned int)(descriptor->mapping + offset), size));
if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == *phys_addr )
{
_mali_osk_errcode_t ret;
AllocationList *alloc_item;
u32 linux_phys_frame_num;
alloc_item = _allocation_list_item_get();
if (NULL == alloc_item)
{
MALI_DEBUG_PRINT(1, ("Failed to allocate list item\n"));
return _MALI_OSK_ERR_NOMEM;
}
linux_phys_frame_num = alloc_item->physaddr >> PAGE_SHIFT;
ret = ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, linux_phys_frame_num, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
if ( ret != _MALI_OSK_ERR_OK)
{
_allocation_list_item_release(alloc_item);
return ret;
}
/* Put our alloc_item into the list of allocations on success */
alloc_item->next = mappingInfo->list;
alloc_item->offset = offset;
/*alloc_item->physaddr = linux_phys_addr;*/
mappingInfo->list = alloc_item;
/* Write out new physical address on success */
*phys_addr = alloc_item->physaddr;
return ret;
}
/* Otherwise, Use the supplied physical address */
/* ASSERT that supplied phys_addr is page aligned */
MALI_DEBUG_ASSERT( 0 == ((*phys_addr) & ~_MALI_OSK_CPU_PAGE_MASK) );
return ( remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, *phys_addr >> PAGE_SHIFT, size, vma->vm_page_prot) ) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;
}
void _mali_osk_mem_mapregion_unmap( mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t flags )
{
MappingInfo *mappingInfo;
if (NULL == descriptor) return;
MALI_DEBUG_ASSERT( 0 != (descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE) );
MALI_DEBUG_ASSERT( 0 == (size & ~_MALI_OSK_CPU_PAGE_MASK) );
MALI_DEBUG_ASSERT( 0 == (offset & ~_MALI_OSK_CPU_PAGE_MASK) );
if (NULL == descriptor->mapping) return;
if (size > (descriptor->size - offset))
{
MALI_DEBUG_PRINT(1,("_mali_osk_mem_mapregion_unmap: virtual memory area not large enough to unmap size %x from area 0x%x at offset 0x%x\n",
size, descriptor->mapping, offset));
return;
}
mappingInfo = (MappingInfo *)descriptor->process_addr_mapping_info;
MALI_DEBUG_ASSERT_POINTER( mappingInfo );
if ( 0 != (flags & _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR) )
{
/* This physical RAM was allocated in _mali_osk_mem_mapregion_map and
* so needs to be unmapped
*/
while (size)
{
/* First find the allocation in the list of allocations */
AllocationList *alloc = mappingInfo->list;
AllocationList **prev = &(mappingInfo->list);
while (NULL != alloc && alloc->offset != offset)
{
prev = &(alloc->next);
alloc = alloc->next;
}
if (alloc == NULL) {
MALI_DEBUG_PRINT(1, ("Unmapping memory that isn't mapped\n"));
size -= _MALI_OSK_CPU_PAGE_SIZE;
offset += _MALI_OSK_CPU_PAGE_SIZE;
continue;
}
_kernel_page_release(alloc->physaddr);
/* Remove the allocation from the list */
*prev = alloc->next;
_mali_osk_free( alloc );
/* Move onto the next allocation */
size -= _MALI_OSK_CPU_PAGE_SIZE;
offset += _MALI_OSK_CPU_PAGE_SIZE;
}
}
/* Linux does the right thing as part of munmap to remove the mapping */
return;
}
| gpl-2.0 |
zjgeer/linux | drivers/firmware/dmi_scan.c | 450 | 25717 | #include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/bootmem.h>
#include <linux/random.h>
#include <asm/dmi.h>
#include <asm/unaligned.h>
struct kobject *dmi_kobj;
EXPORT_SYMBOL_GPL(dmi_kobj);
/*
* DMI stands for "Desktop Management Interface". It is part
* of and an antecedent to, SMBIOS, which stands for System
* Management BIOS. See further: http://www.dmtf.org/standards
*/
static const char dmi_empty_string[] = " ";
static u32 dmi_ver __initdata;
static u32 dmi_len;
static u16 dmi_num;
static u8 smbios_entry_point[32];
static int smbios_entry_point_size;
/*
* Catch too early calls to dmi_check_system():
*/
static int dmi_initialized;
/* DMI system identification string used during boot */
static char dmi_ids_string[128] __initdata;
static struct dmi_memdev_info {
const char *device;
const char *bank;
u16 handle;
} *dmi_memdev;
static int dmi_memdev_nr;
static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
{
const u8 *bp = ((u8 *) dm) + dm->length;
if (s) {
s--;
while (s > 0 && *bp) {
bp += strlen(bp) + 1;
s--;
}
if (*bp != 0) {
size_t len = strlen(bp)+1;
size_t cmp_len = len > 8 ? 8 : len;
if (!memcmp(bp, dmi_empty_string, cmp_len))
return dmi_empty_string;
return bp;
}
}
return "";
}
static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
{
const char *bp = dmi_string_nosave(dm, s);
char *str;
size_t len;
if (bp == dmi_empty_string)
return dmi_empty_string;
len = strlen(bp) + 1;
str = dmi_alloc(len);
if (str != NULL)
strcpy(str, bp);
return str;
}
/*
* We have to be cautious here. We have seen BIOSes with DMI pointers
* pointing to completely the wrong place for example
*/
static void dmi_decode_table(u8 *buf,
void (*decode)(const struct dmi_header *, void *),
void *private_data)
{
u8 *data = buf;
int i = 0;
/*
* Stop when we have seen all the items the table claimed to have
* (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
* >= 3.0 only) OR we run off the end of the table (should never
* happen but sometimes does on bogus implementations.)
*/
while ((!dmi_num || i < dmi_num) &&
(data - buf + sizeof(struct dmi_header)) <= dmi_len) {
const struct dmi_header *dm = (const struct dmi_header *)data;
/*
* We want to know the total length (formatted area and
* strings) before decoding to make sure we won't run off the
* table in dmi_decode or dmi_string
*/
data += dm->length;
while ((data - buf < dmi_len - 1) && (data[0] || data[1]))
data++;
if (data - buf < dmi_len - 1)
decode(dm, private_data);
data += 2;
i++;
/*
* 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
* For tables behind a 64-bit entry point, we have no item
* count and no exact table length, so stop on end-of-table
* marker. For tables behind a 32-bit entry point, we have
* seen OEM structures behind the end-of-table marker on
* some systems, so don't trust it.
*/
if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE)
break;
}
/* Trim DMI table length if needed */
if (dmi_len > data - buf)
dmi_len = data - buf;
}
static phys_addr_t dmi_base;
static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
void *))
{
u8 *buf;
u32 orig_dmi_len = dmi_len;
buf = dmi_early_remap(dmi_base, orig_dmi_len);
if (buf == NULL)
return -1;
dmi_decode_table(buf, decode, NULL);
add_device_randomness(buf, dmi_len);
dmi_early_unmap(buf, orig_dmi_len);
return 0;
}
static int __init dmi_checksum(const u8 *buf, u8 len)
{
u8 sum = 0;
int a;
for (a = 0; a < len; a++)
sum += buf[a];
return sum == 0;
}
static const char *dmi_ident[DMI_STRING_MAX];
static LIST_HEAD(dmi_devices);
int dmi_available;
/*
* Save a DMI string
*/
static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
int string)
{
const char *d = (const char *) dm;
const char *p;
if (dmi_ident[slot])
return;
p = dmi_string(dm, d[string]);
if (p == NULL)
return;
dmi_ident[slot] = p;
}
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
int index)
{
const u8 *d = (u8 *) dm + index;
char *s;
int is_ff = 1, is_00 = 1, i;
if (dmi_ident[slot])
return;
for (i = 0; i < 16 && (is_ff || is_00); i++) {
if (d[i] != 0x00)
is_00 = 0;
if (d[i] != 0xFF)
is_ff = 0;
}
if (is_ff || is_00)
return;
s = dmi_alloc(16*2+4+1);
if (!s)
return;
/*
* As of version 2.6 of the SMBIOS specification, the first 3 fields of
* the UUID are supposed to be little-endian encoded. The specification
* says that this is the defacto standard.
*/
if (dmi_ver >= 0x020600)
sprintf(s, "%pUL", d);
else
sprintf(s, "%pUB", d);
dmi_ident[slot] = s;
}
static void __init dmi_save_type(const struct dmi_header *dm, int slot,
int index)
{
const u8 *d = (u8 *) dm + index;
char *s;
if (dmi_ident[slot])
return;
s = dmi_alloc(4);
if (!s)
return;
sprintf(s, "%u", *d & 0x7F);
dmi_ident[slot] = s;
}
static void __init dmi_save_one_device(int type, const char *name)
{
struct dmi_device *dev;
/* No duplicate device */
if (dmi_find_device(type, name, NULL))
return;
dev = dmi_alloc(sizeof(*dev) + strlen(name) + 1);
if (!dev)
return;
dev->type = type;
strcpy((char *)(dev + 1), name);
dev->name = (char *)(dev + 1);
dev->device_data = NULL;
list_add(&dev->list, &dmi_devices);
}
static void __init dmi_save_devices(const struct dmi_header *dm)
{
int i, count = (dm->length - sizeof(struct dmi_header)) / 2;
for (i = 0; i < count; i++) {
const char *d = (char *)(dm + 1) + (i * 2);
/* Skip disabled device */
if ((*d & 0x80) == 0)
continue;
dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d + 1)));
}
}
static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
{
int i, count = *(u8 *)(dm + 1);
struct dmi_device *dev;
for (i = 1; i <= count; i++) {
const char *devname = dmi_string(dm, i);
if (devname == dmi_empty_string)
continue;
dev = dmi_alloc(sizeof(*dev));
if (!dev)
break;
dev->type = DMI_DEV_TYPE_OEM_STRING;
dev->name = devname;
dev->device_data = NULL;
list_add(&dev->list, &dmi_devices);
}
}
static void __init dmi_save_ipmi_device(const struct dmi_header *dm)
{
struct dmi_device *dev;
void *data;
data = dmi_alloc(dm->length);
if (data == NULL)
return;
memcpy(data, dm, dm->length);
dev = dmi_alloc(sizeof(*dev));
if (!dev)
return;
dev->type = DMI_DEV_TYPE_IPMI;
dev->name = "IPMI controller";
dev->device_data = data;
list_add_tail(&dev->list, &dmi_devices);
}
static void __init dmi_save_dev_onboard(int instance, int segment, int bus,
int devfn, const char *name)
{
struct dmi_dev_onboard *onboard_dev;
onboard_dev = dmi_alloc(sizeof(*onboard_dev) + strlen(name) + 1);
if (!onboard_dev)
return;
onboard_dev->instance = instance;
onboard_dev->segment = segment;
onboard_dev->bus = bus;
onboard_dev->devfn = devfn;
strcpy((char *)&onboard_dev[1], name);
onboard_dev->dev.type = DMI_DEV_TYPE_DEV_ONBOARD;
onboard_dev->dev.name = (char *)&onboard_dev[1];
onboard_dev->dev.device_data = onboard_dev;
list_add(&onboard_dev->dev.list, &dmi_devices);
}
static void __init dmi_save_extended_devices(const struct dmi_header *dm)
{
const u8 *d = (u8 *) dm + 5;
/* Skip disabled device */
if ((*d & 0x80) == 0)
return;
dmi_save_dev_onboard(*(d+1), *(u16 *)(d+2), *(d+4), *(d+5),
dmi_string_nosave(dm, *(d-1)));
dmi_save_one_device(*d & 0x7f, dmi_string_nosave(dm, *(d - 1)));
}
static void __init count_mem_devices(const struct dmi_header *dm, void *v)
{
if (dm->type != DMI_ENTRY_MEM_DEVICE)
return;
dmi_memdev_nr++;
}
static void __init save_mem_devices(const struct dmi_header *dm, void *v)
{
const char *d = (const char *)dm;
static int nr;
if (dm->type != DMI_ENTRY_MEM_DEVICE)
return;
if (nr >= dmi_memdev_nr) {
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
return;
}
dmi_memdev[nr].handle = get_unaligned(&dm->handle);
dmi_memdev[nr].device = dmi_string(dm, d[0x10]);
dmi_memdev[nr].bank = dmi_string(dm, d[0x11]);
nr++;
}
void __init dmi_memdev_walk(void)
{
if (!dmi_available)
return;
if (dmi_walk_early(count_mem_devices) == 0 && dmi_memdev_nr) {
dmi_memdev = dmi_alloc(sizeof(*dmi_memdev) * dmi_memdev_nr);
if (dmi_memdev)
dmi_walk_early(save_mem_devices);
}
}
/*
* Process a DMI table entry. Right now all we care about are the BIOS
* and machine entries. For 2.5 we should pull the smbus controller info
* out of here.
*/
static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
{
switch (dm->type) {
case 0: /* BIOS Information */
dmi_save_ident(dm, DMI_BIOS_VENDOR, 4);
dmi_save_ident(dm, DMI_BIOS_VERSION, 5);
dmi_save_ident(dm, DMI_BIOS_DATE, 8);
break;
case 1: /* System Information */
dmi_save_ident(dm, DMI_SYS_VENDOR, 4);
dmi_save_ident(dm, DMI_PRODUCT_NAME, 5);
dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
break;
case 2: /* Base Board Information */
dmi_save_ident(dm, DMI_BOARD_VENDOR, 4);
dmi_save_ident(dm, DMI_BOARD_NAME, 5);
dmi_save_ident(dm, DMI_BOARD_VERSION, 6);
dmi_save_ident(dm, DMI_BOARD_SERIAL, 7);
dmi_save_ident(dm, DMI_BOARD_ASSET_TAG, 8);
break;
case 3: /* Chassis Information */
dmi_save_ident(dm, DMI_CHASSIS_VENDOR, 4);
dmi_save_type(dm, DMI_CHASSIS_TYPE, 5);
dmi_save_ident(dm, DMI_CHASSIS_VERSION, 6);
dmi_save_ident(dm, DMI_CHASSIS_SERIAL, 7);
dmi_save_ident(dm, DMI_CHASSIS_ASSET_TAG, 8);
break;
case 10: /* Onboard Devices Information */
dmi_save_devices(dm);
break;
case 11: /* OEM Strings */
dmi_save_oem_strings_devices(dm);
break;
case 38: /* IPMI Device Information */
dmi_save_ipmi_device(dm);
break;
case 41: /* Onboard Devices Extended Information */
dmi_save_extended_devices(dm);
}
}
static int __init print_filtered(char *buf, size_t len, const char *info)
{
int c = 0;
const char *p;
if (!info)
return c;
for (p = info; *p; p++)
if (isprint(*p))
c += scnprintf(buf + c, len - c, "%c", *p);
else
c += scnprintf(buf + c, len - c, "\\x%02x", *p & 0xff);
return c;
}
static void __init dmi_format_ids(char *buf, size_t len)
{
int c = 0;
const char *board; /* Board Name is optional */
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_SYS_VENDOR));
c += scnprintf(buf + c, len - c, " ");
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_PRODUCT_NAME));
board = dmi_get_system_info(DMI_BOARD_NAME);
if (board) {
c += scnprintf(buf + c, len - c, "/");
c += print_filtered(buf + c, len - c, board);
}
c += scnprintf(buf + c, len - c, ", BIOS ");
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_BIOS_VERSION));
c += scnprintf(buf + c, len - c, " ");
c += print_filtered(buf + c, len - c,
dmi_get_system_info(DMI_BIOS_DATE));
}
/*
* Check for DMI/SMBIOS headers in the system firmware image. Any
* SMBIOS header must start 16 bytes before the DMI header, so take a
* 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
* 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
* takes precedence) and return 0. Otherwise return 1.
*/
static int __init dmi_present(const u8 *buf)
{
u32 smbios_ver;
if (memcmp(buf, "_SM_", 4) == 0 &&
buf[5] < 32 && dmi_checksum(buf, buf[5])) {
smbios_ver = get_unaligned_be16(buf + 6);
smbios_entry_point_size = buf[5];
memcpy(smbios_entry_point, buf, smbios_entry_point_size);
/* Some BIOS report weird SMBIOS version, fix that up */
switch (smbios_ver) {
case 0x021F:
case 0x0221:
pr_debug("SMBIOS version fixup (2.%d->2.%d)\n",
smbios_ver & 0xFF, 3);
smbios_ver = 0x0203;
break;
case 0x0233:
pr_debug("SMBIOS version fixup (2.%d->2.%d)\n", 51, 6);
smbios_ver = 0x0206;
break;
}
} else {
smbios_ver = 0;
}
buf += 16;
if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
if (smbios_ver)
dmi_ver = smbios_ver;
else
dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
dmi_num = get_unaligned_le16(buf + 12);
dmi_len = get_unaligned_le16(buf + 6);
dmi_base = get_unaligned_le32(buf + 8);
if (dmi_walk_early(dmi_decode) == 0) {
if (smbios_ver) {
pr_info("SMBIOS %d.%d present.\n",
dmi_ver >> 8, dmi_ver & 0xFF);
} else {
smbios_entry_point_size = 15;
memcpy(smbios_entry_point, buf,
smbios_entry_point_size);
pr_info("Legacy DMI %d.%d present.\n",
dmi_ver >> 8, dmi_ver & 0xFF);
}
dmi_ver <<= 8;
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
return 0;
}
}
return 1;
}
/*
* Check for the SMBIOS 3.0 64-bit entry point signature. Unlike the legacy
* 32-bit entry point, there is no embedded DMI header (_DMI_) in here.
*/
static int __init dmi_smbios3_present(const u8 *buf)
{
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
smbios_entry_point_size = buf[6];
memcpy(smbios_entry_point, buf, smbios_entry_point_size);
if (dmi_walk_early(dmi_decode) == 0) {
pr_info("SMBIOS %d.%d.%d present.\n",
dmi_ver >> 16, (dmi_ver >> 8) & 0xFF,
dmi_ver & 0xFF);
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
pr_debug("DMI: %s\n", dmi_ids_string);
return 0;
}
}
return 1;
}
void __init dmi_scan_machine(void)
{
char __iomem *p, *q;
char buf[32];
if (efi_enabled(EFI_CONFIG_TABLES)) {
/*
* According to the DMTF SMBIOS reference spec v3.0.0, it is
* allowed to define both the 64-bit entry point (smbios3) and
* the 32-bit entry point (smbios), in which case they should
* either both point to the same SMBIOS structure table, or the
* table pointed to by the 64-bit entry point should contain a
* superset of the table contents pointed to by the 32-bit entry
* point (section 5.2)
* This implies that the 64-bit entry point should have
* precedence if it is defined and supported by the OS. If we
* have the 64-bit entry point, but fail to decode it, fall
* back to the legacy one (if available)
*/
if (efi.smbios3 != EFI_INVALID_TABLE_ADDR) {
p = dmi_early_remap(efi.smbios3, 32);
if (p == NULL)
goto error;
memcpy_fromio(buf, p, 32);
dmi_early_unmap(p, 32);
if (!dmi_smbios3_present(buf)) {
dmi_available = 1;
goto out;
}
}
if (efi.smbios == EFI_INVALID_TABLE_ADDR)
goto error;
/* This is called as a core_initcall() because it isn't
* needed during early boot. This also means we can
* iounmap the space when we're done with it.
*/
p = dmi_early_remap(efi.smbios, 32);
if (p == NULL)
goto error;
memcpy_fromio(buf, p, 32);
dmi_early_unmap(p, 32);
if (!dmi_present(buf)) {
dmi_available = 1;
goto out;
}
} else if (IS_ENABLED(CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK)) {
p = dmi_early_remap(0xF0000, 0x10000);
if (p == NULL)
goto error;
/*
* Iterate over all possible DMI header addresses q.
* Maintain the 32 bytes around q in buf. On the
* first iteration, substitute zero for the
* out-of-range bytes so there is no chance of falsely
* detecting an SMBIOS header.
*/
memset(buf, 0, 16);
for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16);
if (!dmi_smbios3_present(buf) || !dmi_present(buf)) {
dmi_available = 1;
dmi_early_unmap(p, 0x10000);
goto out;
}
memcpy(buf, buf + 16, 16);
}
dmi_early_unmap(p, 0x10000);
}
error:
pr_info("DMI not present or invalid.\n");
out:
dmi_initialized = 1;
}
static ssize_t raw_table_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t pos, size_t count)
{
memcpy(buf, attr->private + pos, count);
return count;
}
static BIN_ATTR(smbios_entry_point, S_IRUSR, raw_table_read, NULL, 0);
static BIN_ATTR(DMI, S_IRUSR, raw_table_read, NULL, 0);
static int __init dmi_init(void)
{
struct kobject *tables_kobj;
u8 *dmi_table;
int ret = -ENOMEM;
if (!dmi_available) {
ret = -ENODATA;
goto err;
}
/*
* Set up dmi directory at /sys/firmware/dmi. This entry should stay
* even after farther error, as it can be used by other modules like
* dmi-sysfs.
*/
dmi_kobj = kobject_create_and_add("dmi", firmware_kobj);
if (!dmi_kobj)
goto err;
tables_kobj = kobject_create_and_add("tables", dmi_kobj);
if (!tables_kobj)
goto err;
dmi_table = dmi_remap(dmi_base, dmi_len);
if (!dmi_table)
goto err_tables;
bin_attr_smbios_entry_point.size = smbios_entry_point_size;
bin_attr_smbios_entry_point.private = smbios_entry_point;
ret = sysfs_create_bin_file(tables_kobj, &bin_attr_smbios_entry_point);
if (ret)
goto err_unmap;
bin_attr_DMI.size = dmi_len;
bin_attr_DMI.private = dmi_table;
ret = sysfs_create_bin_file(tables_kobj, &bin_attr_DMI);
if (!ret)
return 0;
sysfs_remove_bin_file(tables_kobj,
&bin_attr_smbios_entry_point);
err_unmap:
dmi_unmap(dmi_table);
err_tables:
kobject_del(tables_kobj);
kobject_put(tables_kobj);
err:
pr_err("dmi: Firmware registration failed.\n");
return ret;
}
subsys_initcall(dmi_init);
/**
* dmi_set_dump_stack_arch_desc - set arch description for dump_stack()
*
* Invoke dump_stack_set_arch_desc() with DMI system information so that
* DMI identifiers are printed out on task dumps. Arch boot code should
* call this function after dmi_scan_machine() if it wants to print out DMI
* identifiers on task dumps.
*/
void __init dmi_set_dump_stack_arch_desc(void)
{
dump_stack_set_arch_desc("%s", dmi_ids_string);
}
/**
* dmi_matches - check if dmi_system_id structure matches system DMI data
* @dmi: pointer to the dmi_system_id structure to check
*/
static bool dmi_matches(const struct dmi_system_id *dmi)
{
int i;
WARN(!dmi_initialized, KERN_ERR "dmi check: not initialized yet.\n");
for (i = 0; i < ARRAY_SIZE(dmi->matches); i++) {
int s = dmi->matches[i].slot;
if (s == DMI_NONE)
break;
if (dmi_ident[s]) {
if (!dmi->matches[i].exact_match &&
strstr(dmi_ident[s], dmi->matches[i].substr))
continue;
else if (dmi->matches[i].exact_match &&
!strcmp(dmi_ident[s], dmi->matches[i].substr))
continue;
}
/* No match */
return false;
}
return true;
}
/**
* dmi_is_end_of_table - check for end-of-table marker
* @dmi: pointer to the dmi_system_id structure to check
*/
static bool dmi_is_end_of_table(const struct dmi_system_id *dmi)
{
return dmi->matches[0].slot == DMI_NONE;
}
/**
* dmi_check_system - check system DMI data
* @list: array of dmi_system_id structures to match against
* All non-null elements of the list must match
* their slot's (field index's) data (i.e., each
* list string must be a substring of the specified
* DMI slot's string data) to be considered a
* successful match.
*
* Walk the blacklist table running matching functions until someone
* returns non zero or we hit the end. Callback function is called for
* each successful match. Returns the number of matches.
*/
int dmi_check_system(const struct dmi_system_id *list)
{
int count = 0;
const struct dmi_system_id *d;
for (d = list; !dmi_is_end_of_table(d); d++)
if (dmi_matches(d)) {
count++;
if (d->callback && d->callback(d))
break;
}
return count;
}
EXPORT_SYMBOL(dmi_check_system);
/**
* dmi_first_match - find dmi_system_id structure matching system DMI data
* @list: array of dmi_system_id structures to match against
* All non-null elements of the list must match
* their slot's (field index's) data (i.e., each
* list string must be a substring of the specified
* DMI slot's string data) to be considered a
* successful match.
*
* Walk the blacklist table until the first match is found. Return the
* pointer to the matching entry or NULL if there's no match.
*/
const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list)
{
const struct dmi_system_id *d;
for (d = list; !dmi_is_end_of_table(d); d++)
if (dmi_matches(d))
return d;
return NULL;
}
EXPORT_SYMBOL(dmi_first_match);
/**
* dmi_get_system_info - return DMI data value
* @field: data index (see enum dmi_field)
*
* Returns one DMI data value, can be used to perform
* complex DMI data checks.
*/
const char *dmi_get_system_info(int field)
{
return dmi_ident[field];
}
EXPORT_SYMBOL(dmi_get_system_info);
/**
* dmi_name_in_serial - Check if string is in the DMI product serial information
* @str: string to check for
*/
int dmi_name_in_serial(const char *str)
{
int f = DMI_PRODUCT_SERIAL;
if (dmi_ident[f] && strstr(dmi_ident[f], str))
return 1;
return 0;
}
/**
* dmi_name_in_vendors - Check if string is in the DMI system or board vendor name
* @str: Case sensitive Name
*/
int dmi_name_in_vendors(const char *str)
{
static int fields[] = { DMI_SYS_VENDOR, DMI_BOARD_VENDOR, DMI_NONE };
int i;
for (i = 0; fields[i] != DMI_NONE; i++) {
int f = fields[i];
if (dmi_ident[f] && strstr(dmi_ident[f], str))
return 1;
}
return 0;
}
EXPORT_SYMBOL(dmi_name_in_vendors);
/**
* dmi_find_device - find onboard device by type/name
* @type: device type or %DMI_DEV_TYPE_ANY to match all device types
* @name: device name string or %NULL to match all
* @from: previous device found in search, or %NULL for new search.
*
* Iterates through the list of known onboard devices. If a device is
* found with a matching @vendor and @device, a pointer to its device
* structure is returned. Otherwise, %NULL is returned.
* A new search is initiated by passing %NULL as the @from argument.
* If @from is not %NULL, searches continue from next device.
*/
const struct dmi_device *dmi_find_device(int type, const char *name,
const struct dmi_device *from)
{
const struct list_head *head = from ? &from->list : &dmi_devices;
struct list_head *d;
for (d = head->next; d != &dmi_devices; d = d->next) {
const struct dmi_device *dev =
list_entry(d, struct dmi_device, list);
if (((type == DMI_DEV_TYPE_ANY) || (dev->type == type)) &&
((name == NULL) || (strcmp(dev->name, name) == 0)))
return dev;
}
return NULL;
}
EXPORT_SYMBOL(dmi_find_device);
/**
* dmi_get_date - parse a DMI date
* @field: data index (see enum dmi_field)
* @yearp: optional out parameter for the year
* @monthp: optional out parameter for the month
* @dayp: optional out parameter for the day
*
* The date field is assumed to be in the form resembling
* [mm[/dd]]/yy[yy] and the result is stored in the out
* parameters any or all of which can be omitted.
*
* If the field doesn't exist, all out parameters are set to zero
* and false is returned. Otherwise, true is returned with any
* invalid part of date set to zero.
*
* On return, year, month and day are guaranteed to be in the
* range of [0,9999], [0,12] and [0,31] respectively.
*/
bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
int year = 0, month = 0, day = 0;
bool exists;
const char *s, *y;
char *e;
s = dmi_get_system_info(field);
exists = s;
if (!exists)
goto out;
/*
* Determine year first. We assume the date string resembles
* mm/dd/yy[yy] but the original code extracted only the year
* from the end. Keep the behavior in the spirit of no
* surprises.
*/
y = strrchr(s, '/');
if (!y)
goto out;
y++;
year = simple_strtoul(y, &e, 10);
if (y != e && year < 100) { /* 2-digit year */
year += 1900;
if (year < 1996) /* no dates < spec 1.0 */
year += 100;
}
if (year > 9999) /* year should fit in %04d */
year = 0;
/* parse the mm and dd */
month = simple_strtoul(s, &e, 10);
if (s == e || *e != '/' || !month || month > 12) {
month = 0;
goto out;
}
s = e + 1;
day = simple_strtoul(s, &e, 10);
if (s == y || s == e || *e != '/' || day > 31)
day = 0;
out:
if (yearp)
*yearp = year;
if (monthp)
*monthp = month;
if (dayp)
*dayp = day;
return exists;
}
EXPORT_SYMBOL(dmi_get_date);
/**
* dmi_walk - Walk the DMI table and get called back for every record
* @decode: Callback function
* @private_data: Private data to be passed to the callback function
*
* Returns -1 when the DMI table can't be reached, 0 on success.
*/
int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data)
{
u8 *buf;
if (!dmi_available)
return -1;
buf = dmi_remap(dmi_base, dmi_len);
if (buf == NULL)
return -1;
dmi_decode_table(buf, decode, private_data);
dmi_unmap(buf);
return 0;
}
EXPORT_SYMBOL_GPL(dmi_walk);
/**
* dmi_match - compare a string to the dmi field (if exists)
* @f: DMI field identifier
* @str: string to compare the DMI field to
*
* Returns true if the requested field equals to the str (including NULL).
*/
bool dmi_match(enum dmi_field f, const char *str)
{
const char *info = dmi_get_system_info(f);
if (info == NULL || str == NULL)
return info == str;
return !strcmp(info, str);
}
EXPORT_SYMBOL_GPL(dmi_match);
void dmi_memdev_name(u16 handle, const char **bank, const char **device)
{
int n;
if (dmi_memdev == NULL)
return;
for (n = 0; n < dmi_memdev_nr; n++) {
if (handle == dmi_memdev[n].handle) {
*bank = dmi_memdev[n].bank;
*device = dmi_memdev[n].device;
break;
}
}
}
EXPORT_SYMBOL_GPL(dmi_memdev_name);
| gpl-2.0 |
adknight87/android_kernel_samsung_msm8226 | drivers/staging/prima/CORE/WDI/TRP/CTS/src/wlan_qct_wdi_cts.c | 450 | 34240 | /*
* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/*===========================================================================
W L A N _ Q C T _ C T S . C
OVERVIEW:
This software unit holds the implementation of the WLAN Control Transport
Service Layer.
The functions externalized by this module are to be called by the DAL Core
that wishes to use a platform agnostic API to communicate with the WLAN SS.
DEPENDENCIES:
Are listed for each API below.
Copyright (c) 2010-2011 QUALCOMM Incorporated.
All Rights Reserved.
Qualcomm Confidential and Proprietary
===========================================================================*/
/*===========================================================================
EDIT HISTORY FOR FILE
This section contains comments describing changes made to the module.
Notice that changes are listed in reverse chronological order.
$Header$$DateTime$$Author$
when who what, where, why
---------- --- --------------------------------------------------------
2011-02-28 jtj Linux/Android implementation which utilizes SMD
2010-08-09 mss Created module
===========================================================================*/
/*===========================================================================
INCLUDE FILES FOR MODULE
===========================================================================*/
/*----------------------------------------------------------------------------
* Include Files
* -------------------------------------------------------------------------*/
#include "wlan_qct_wdi_cts.h"
#include "wlan_qct_pal_msg.h"
#include "wlan_qct_pal_api.h"
#include "wlan_qct_pal_trace.h"
#include "wlan_qct_os_list.h"
#include "wlan_qct_wdi.h"
#include "wlan_qct_wdi_i.h"
#ifdef CONFIG_ANDROID
#ifdef EXISTS_MSM_SMD
#include <mach/msm_smd.h>
#else
#include <soc/qcom/smd.h>
#endif
#include <linux/delay.h>
#else
#include "msm_smd.h"
#endif
/* Global context for CTS handle, it is required to keep this
* transport open during SSR shutdown */
static WCTS_HandleType gwctsHandle;
/*----------------------------------------------------------------------------
* Preprocessor Definitions and Constants
* -------------------------------------------------------------------------*/
/* Magic value to validate a WCTS CB (value is little endian ASCII: WCTS */
#define WCTS_CB_MAGIC 0x53544357
/* time to wait for SMD channel to open (in msecs) */
#define WCTS_SMD_OPEN_TIMEOUT 5000
/* time to wait for SMD channel to close (in msecs) */
#define WCTS_SMD_CLOSE_TIMEOUT 5000
/*----------------------------------------------------------------------------
* Type Declarations
* -------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------
WCTS_StateType
---------------------------------------------------------------------------*/
typedef enum
{
WCTS_STATE_CLOSED, /* Closed */
WCTS_STATE_OPEN_PENDING, /* Waiting for the OPEN event from SMD */
WCTS_STATE_OPEN, /* Open event received from SMD */
WCTS_STATE_DEFERRED, /* Write pending, SMD chennel is full */
WCTS_STATE_REM_CLOSED, /* Remote end closed the SMD channel */
WCTS_STATE_MAX
} WCTS_StateType;
/*---------------------------------------------------------------------------
Control Transport Control Block Type
---------------------------------------------------------------------------*/
typedef struct
{
WCTS_NotifyCBType wctsNotifyCB;
void* wctsNotifyCBData;
WCTS_RxMsgCBType wctsRxMsgCB;
void* wctsRxMsgCBData;
WCTS_StateType wctsState;
smd_channel_t* wctsChannel;
wpt_list wctsPendingQueue;
wpt_uint32 wctsMagic;
wpt_msg wctsOpenMsg;
wpt_msg wctsDataMsg;
wpt_event wctsEvent;
} WCTS_ControlBlockType;
/*---------------------------------------------------------------------------
WDI CTS Buffer Type
---------------------------------------------------------------------------*/
typedef struct
{
/*Node for linking pending buffers into a linked list */
wpt_list_node node;
/*Buffer associated with the request */
void* pBuffer;
/*Buffer Size*/
int bufferSize;
} WCTS_BufferType;
/*----------------------------------------------------------------------------
* Global Data Definitions
* -------------------------------------------------------------------------*/
/*----------------------------------------------------------------------------
* Static Variable Definitions
* -------------------------------------------------------------------------*/
#ifdef FEATURE_R33D
/* R33D will not close SMD port
* If receive close request from WDI, just pretend as port closed,
* Store control block info static memory, and reuse next open */
static WCTS_ControlBlockType *ctsCB;
/* If port open once, not try to actual open next time */
static int port_open;
#endif /* FEATURE_R33D */
/*----------------------------------------------------------------------------
* Static Function Declarations and Definitions
* -------------------------------------------------------------------------*/
/**
@brief Callback function for serializing WCTS Open
processing in the control context
@param
pMsg - pointer to the message
@see
@return void
*/
static void
WCTS_PALOpenCallback
(
wpt_msg *pMsg
)
{
WCTS_ControlBlockType* pWCTSCb;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* extract our context from the message */
pWCTSCb = pMsg->pContext;
/*--------------------------------------------------------------------
Sanity check
--------------------------------------------------------------------*/
if ((NULL == pWCTSCb) || (WCTS_CB_MAGIC != pWCTSCb->wctsMagic)) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_PALOpenCallback: Invalid parameters received.");
return;
}
if (WCTS_STATE_OPEN_PENDING != pWCTSCb->wctsState) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_PALOpenCallback: Invoke from invalid state %d.",
pWCTSCb->wctsState);
return;
}
/* notified registered client that the channel is open */
pWCTSCb->wctsState = WCTS_STATE_OPEN;
pWCTSCb->wctsNotifyCB((WCTS_HandleType)pWCTSCb,
WCTS_EVENT_OPEN,
pWCTSCb->wctsNotifyCBData);
/* signal event for WCTS_OpenTransport to proceed */
wpalEventSet(&pWCTSCb->wctsEvent);
}/*WCTS_PALOpenCallback*/
/**
@brief Callback function for serializing WCTS Read
processing in the control context
@param pWCTSCb WCTS Control Block
@see
@return void
*/
static void
WCTS_PALReadCallback
(
WCTS_ControlBlockType* pWCTSCb
)
{
void* buffer;
int packet_size;
int available;
int bytes_read;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/*--------------------------------------------------------------------
Sanity check
--------------------------------------------------------------------*/
if ((NULL == pWCTSCb) || (WCTS_CB_MAGIC != pWCTSCb->wctsMagic)) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_PALReadCallback: Invalid parameter received.");
return;
}
/* iterate until no more packets are available */
while (1) {
/* check the length of the next packet */
packet_size = smd_cur_packet_size(pWCTSCb->wctsChannel);
if (0 == packet_size) {
/* No more data to be read */
return;
}
/* Check how much of the data is available */
available = smd_read_avail(pWCTSCb->wctsChannel);
if (available < packet_size) {
/* Entire packet not yet ready to be read --
There will be another notification when it is ready */
return;
}
buffer = wpalMemoryAllocate(packet_size);
if (NULL == buffer) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_PALReadCallback: Memory allocation failure");
WPAL_ASSERT(0);
return;
}
bytes_read = smd_read(pWCTSCb->wctsChannel,
buffer,
packet_size);
if (bytes_read != packet_size) {
/*Some problem, do not forward it to WDI.*/
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_PALReadCallback: Failed to read data from SMD");
wpalMemoryFree(buffer);
WPAL_ASSERT(0);
return;
}
/* forward the message to the registered handler */
pWCTSCb->wctsRxMsgCB((WCTS_HandleType)pWCTSCb,
buffer,
packet_size,
pWCTSCb->wctsRxMsgCBData);
/* Free the allocated buffer*/
wpalMemoryZero(buffer, bytes_read);
wpalMemoryFree(buffer);
}
} /*WCTS_PALReadCallback*/
/**
@brief Callback function for serializing WCTS Write
processing in the control context
@param pWCTSCb WCTS Control Block
@see
@return void
*/
static void
WCTS_PALWriteCallback
(
WCTS_ControlBlockType* pWCTSCb
)
{
wpt_list_node* pNode;
WCTS_BufferType* pBufferQueue;
void* pBuffer;
int len;
int available;
int written;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/*--------------------------------------------------------------------
Sanity check
--------------------------------------------------------------------*/
if ((NULL == pWCTSCb) || (WCTS_CB_MAGIC != pWCTSCb->wctsMagic)) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_PALWriteCallback: Invalid parameter received.");
return;
}
/* if we are not deferred, then there are no pending packets */
if (WCTS_STATE_DEFERRED != pWCTSCb->wctsState) {
return;
}
/* Keep sending deferred messages as long as there is room in
the channel. Note that we initially peek at the head of the
list to access the parameters for the next message; we don't
actually remove the next message from the deferred list until
we know the channel can handle it */
while (eWLAN_PAL_STATUS_SUCCESS ==
wpal_list_peek_front(&pWCTSCb->wctsPendingQueue, &pNode)) {
pBufferQueue = container_of(pNode, WCTS_BufferType, node);
pBuffer = pBufferQueue->pBuffer;
len = pBufferQueue->bufferSize;
available = smd_write_avail(pWCTSCb->wctsChannel);
if (available < len) {
/* channel has no room for the next packet so we are done */
return;
}
/* there is room for the next message, so we can now remove
it from the deferred message queue and send it */
wpal_list_remove_front(&pWCTSCb->wctsPendingQueue, &pNode);
/* note that pNode will be the same as when we peeked, so
there is no need to update pBuffer or len */
written = smd_write(pWCTSCb->wctsChannel, pBuffer, len);
if (written != len) {
/* Something went wrong */
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_PALWriteCallback: channel write failure");
/* we were unable to send the message that was at the head
of the deferred list. there is nothing else we can do
other than drop it, so we will just fall through to the
"success" processing.
hopefully the client can recover from this since there is
nothing else we can do here */
}
/* whether we had success or failure, reclaim all memory */
wpalMemoryZero(pBuffer, len);
wpalMemoryFree(pBuffer);
wpalMemoryFree(pBufferQueue);
/* we'll continue to iterate until the channel is full or all
of the deferred messages have been sent */
}
/* if we've exited the loop, then we have drained the deferred queue.
set the state to indicate we are no longer deferred, and turn off
the remote read interrupt */
pWCTSCb->wctsState = WCTS_STATE_OPEN;
smd_disable_read_intr(pWCTSCb->wctsChannel);
} /*WCTS_PALWriteCallback*/
/**
@brief Callback function for serializing SMD DATA Event
processing in the control context
@param
pMsg - pointer to the message
@see
@return void
*/
static void
WCTS_PALDataCallback
(
wpt_msg *pMsg
)
{
WCTS_ControlBlockType* pWCTSCb;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* extract our context from the message */
pWCTSCb = pMsg->pContext;
/* process any incoming messages */
WCTS_PALReadCallback(pWCTSCb);
/* send any deferred messages */
WCTS_PALWriteCallback(pWCTSCb);
} /*WCTS_PALDataCallback*/
/**
@brief This helper function is used to clean up the pending
messages in the transport queue
@param wctsHandlehandle: transport handle
@see
@return 0 for success
*/
wpt_uint32
WCTS_ClearPendingQueue
(
WCTS_HandleType wctsHandle
)
{
WCTS_ControlBlockType* pWCTSCb = (WCTS_ControlBlockType*) wctsHandle;
wpt_list_node* pNode = NULL;
WCTS_BufferType* pBufferQueue = NULL;
void* pBuffer = NULL;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
if ((NULL == pWCTSCb) || (WCTS_CB_MAGIC != pWCTSCb->wctsMagic)) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_ClearPendingQueue: Invalid parameters received.");
return eWLAN_PAL_STATUS_E_INVAL;
}
/*Free the buffers in the pending queue.*/
while (eWLAN_PAL_STATUS_SUCCESS ==
wpal_list_remove_front(&pWCTSCb->wctsPendingQueue, &pNode)) {
pBufferQueue = container_of(pNode, WCTS_BufferType, node);
pBuffer = pBufferQueue->pBuffer;
wpalMemoryFree(pBuffer);
wpalMemoryFree(pBufferQueue);
}
return eWLAN_PAL_STATUS_SUCCESS;
}/*WCTS_ClearPendingQueue*/
/**
* Notification callback when SMD needs to communicate asynchronously with
* the client.
*
* This callback function may be called from interrupt context; clients must
* not block or call any functions that block.
*
* @param[in] data The user-supplied data provided to smd_named_open_on_edge()
* @param[in] event The event that occurred
*
* @return void
*/
static void
WCTS_NotifyCallback
(
void *data,
unsigned event
)
{
wpt_msg *palMsg;
WCTS_ControlBlockType* pWCTSCb = (WCTS_ControlBlockType*) data;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/*--------------------------------------------------------------------
Sanity check
--------------------------------------------------------------------*/
if (WCTS_CB_MAGIC != pWCTSCb->wctsMagic) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"%s: Received unexpected SMD event %u",
__func__, event);
/* TODO_PRIMA what error recovery options do we have? */
return;
}
/* Serialize processing in the control thread */
switch (event) {
case SMD_EVENT_OPEN:
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_INFO,
"%s: received SMD_EVENT_OPEN from SMD", __func__);
/* If the prev state was 'remote closed' then it is a Riva 'restart',
* subsystem restart re-init
*/
if (WCTS_STATE_REM_CLOSED == pWCTSCb->wctsState)
{
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_INFO,
"%s: received SMD_EVENT_OPEN in WCTS_STATE_REM_CLOSED state",
__func__);
/* call subsystem restart re-init function */
wpalDriverReInit();
return;
}
palMsg = &pWCTSCb->wctsOpenMsg;
break;
case SMD_EVENT_DATA:
if (WCTS_STATE_REM_CLOSED == pWCTSCb->wctsState)
{
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"%s: received SMD data when the state is remote closed ",
__func__);
/* we should not be getting any data now */
return;
}
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_INFO,
"%s: received SMD_EVENT_DATA from SMD", __func__);
palMsg = &pWCTSCb->wctsDataMsg;
break;
case SMD_EVENT_CLOSE:
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_INFO,
"%s: received SMD_EVENT_CLOSE from SMD", __func__);
/* SMD channel was closed from the remote side,
* this would happen only when Riva crashed and SMD is
* closing the channel on behalf of Riva */
pWCTSCb->wctsState = WCTS_STATE_REM_CLOSED;
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_INFO,
"%s: received SMD_EVENT_CLOSE WLAN driver going down now",
__func__);
/* subsystem restart: shutdown */
wpalDriverShutdown();
return;
case SMD_EVENT_STATUS:
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_INFO,
"%s: received SMD_EVENT_STATUS from SMD", __func__);
return;
case SMD_EVENT_REOPEN_READY:
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"%s: received SMD_EVENT_REOPEN_READY from SMD", __func__);
/* unlike other events which occur when our kernel threads are
running, this one is received when the threads are closed and
the rmmod thread is waiting. so just unblock that thread */
wpalEventSet(&pWCTSCb->wctsEvent);
return;
default:
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"%s: Unexpected event %u received from SMD",
__func__, event);
return;
}
/* serialize this event */
wpalPostCtrlMsg(WDI_GET_PAL_CTX(), palMsg);
} /*WCTS_NotifyCallback*/
/*----------------------------------------------------------------------------
* Externalized Function Definitions
* -------------------------------------------------------------------------*/
/**
@brief This function is used by the DAL Core to initialize the Control
Transport for processing. It must be called prior to calling any
other APIs of the Control Transport.
@param szName: unique name for the channel that is to be opened
uSize: size of the channel that must be opened (should fit the
largest size of packet that the Dal Core wishes to send)
wctsCBs: a list of callbacks that the CT needs to use to send
notification and messages back to DAL
@see
@return A handle that must be used for further communication with the CTS.
This is an opaque structure for the caller and it will be used in
all communications to and from the CTS.
*/
WCTS_HandleType
WCTS_OpenTransport
(
const wpt_uint8* szName,
wpt_uint32 uSize,
WCTS_TransportCBsType* wctsCBs
)
{
WCTS_ControlBlockType* pWCTSCb;
wpt_status status;
int smdstatus;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/
/*---------------------------------------------------------------------
Sanity check
---------------------------------------------------------------------*/
if ((NULL == wctsCBs) || (NULL == szName) ||
(NULL == wctsCBs->wctsNotifyCB) || (NULL == wctsCBs->wctsRxMsgCB)) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_OpenTransport: Invalid parameters received.");
return NULL;
}
/* This open is coming after a SSR, we don't need to reopen SMD,
* the SMD port was never closed during SSR*/
if (gwctsHandle) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_INFO,
"WCTS_OpenTransport port is already open");
pWCTSCb = gwctsHandle;
if (WCTS_CB_MAGIC != pWCTSCb->wctsMagic) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_FATAL,
"WCTS_OpenTransport: Invalid magic.");
return NULL;
}
pWCTSCb->wctsState = WCTS_STATE_OPEN;
pWCTSCb->wctsNotifyCB((WCTS_HandleType)pWCTSCb,
WCTS_EVENT_OPEN,
pWCTSCb->wctsNotifyCBData);
/* we initially don't want read interrupts
(we only want them if we get into deferred write mode) */
smd_disable_read_intr(pWCTSCb->wctsChannel);
return (WCTS_HandleType)pWCTSCb;
}
#ifdef FEATURE_R33D
if(port_open)
{
/* Port open before, not need to open again */
/* notified registered client that the channel is open */
ctsCB->wctsState = WCTS_STATE_OPEN;
ctsCB->wctsNotifyCB((WCTS_HandleType)ctsCB,
WCTS_EVENT_OPEN,
ctsCB->wctsNotifyCBData);
return (WCTS_HandleType)ctsCB;
}
#endif /* FEATURE_R33D */
/* allocate a ControlBlock to hold all context */
pWCTSCb = wpalMemoryAllocate(sizeof(*pWCTSCb));
if (NULL == pWCTSCb) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_OpenTransport: Memory allocation failure.");
return NULL;
}
/* make sure the control block is initialized. in particular we need
to make sure the embedded event and list structures are initialized
to prevent "magic number" tests from being run against uninitialized
values */
wpalMemoryZero(pWCTSCb, sizeof(*pWCTSCb));
#ifdef FEATURE_R33D
smd_init(0);
port_open = 1;
ctsCB = pWCTSCb;
#endif /* FEATURE_R33D */
/*Initialise the event*/
wpalEventInit(&pWCTSCb->wctsEvent);
/* save the user-supplied information */
pWCTSCb->wctsNotifyCB = wctsCBs->wctsNotifyCB;
pWCTSCb->wctsNotifyCBData = wctsCBs->wctsNotifyCBData;
pWCTSCb->wctsRxMsgCB = wctsCBs->wctsRxMsgCB;
pWCTSCb->wctsRxMsgCBData = wctsCBs->wctsRxMsgCBData;
/* initialize the remaining fields */
wpal_list_init(&pWCTSCb->wctsPendingQueue);
pWCTSCb->wctsMagic = WCTS_CB_MAGIC;
pWCTSCb->wctsState = WCTS_STATE_OPEN_PENDING;
pWCTSCb->wctsChannel = NULL;
/* since SMD will callback in interrupt context, we will used
* canned messages to serialize the SMD events into a thread
* context
*/
pWCTSCb->wctsOpenMsg.callback = WCTS_PALOpenCallback;
pWCTSCb->wctsOpenMsg.pContext = pWCTSCb;
pWCTSCb->wctsOpenMsg.type= WPAL_MC_MSG_SMD_NOTIF_OPEN_SIG;
pWCTSCb->wctsDataMsg.callback = WCTS_PALDataCallback;
pWCTSCb->wctsDataMsg.pContext = pWCTSCb;
pWCTSCb-> wctsDataMsg.type= WPAL_MC_MSG_SMD_NOTIF_DATA_SIG;
/*---------------------------------------------------------------------
Open the SMD channel
---------------------------------------------------------------------*/
wpalEventReset(&pWCTSCb->wctsEvent);
smdstatus = smd_named_open_on_edge(szName,
SMD_APPS_WCNSS,
&pWCTSCb->wctsChannel,
pWCTSCb,
WCTS_NotifyCallback);
if (0 != smdstatus) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"%s: smd_named_open_on_edge failed with status %d",
__func__, smdstatus);
goto fail;
}
/* wait for the channel to be fully opened before we proceed */
status = wpalEventWait(&pWCTSCb->wctsEvent, WCTS_SMD_OPEN_TIMEOUT);
if (eWLAN_PAL_STATUS_SUCCESS != status) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"%s: failed to receive SMD_EVENT_OPEN",
__func__);
/* since we opened one end of the channel, close it */
smdstatus = smd_close(pWCTSCb->wctsChannel);
if (0 != smdstatus) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"%s: smd_close failed with status %d",
__func__, smdstatus);
}
goto fail;
}
/* we initially don't want read interrupts
(we only want them if we get into deferred write mode) */
smd_disable_read_intr(pWCTSCb->wctsChannel);
/* we have successfully opened the SMD channel */
gwctsHandle = pWCTSCb;
return (WCTS_HandleType)pWCTSCb;
fail:
/* we were unable to open the SMD channel */
pWCTSCb->wctsMagic = 0;
wpalMemoryFree(pWCTSCb);
return NULL;
}/*WCTS_OpenTransport*/
/**
@brief This function is used by the DAL Core to close the
Control Transport when its services are no longer
needed. Full close notification will be receive
asynchronously on the notification callback
registered on Open
@param wctsHandlehandle: received upon open
@see
@return 0 for success
*/
wpt_uint32
WCTS_CloseTransport
(
WCTS_HandleType wctsHandle
)
{
WCTS_ControlBlockType* pWCTSCb = (WCTS_ControlBlockType*) wctsHandle;
wpt_list_node* pNode = NULL;
WCTS_BufferType* pBufferQueue = NULL;
void* pBuffer = NULL;
wpt_status status;
int smdstatus;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
if ((NULL == pWCTSCb) || (WCTS_CB_MAGIC != pWCTSCb->wctsMagic)) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_CloseTransport: Invalid parameters received.");
return eWLAN_PAL_STATUS_E_INVAL;
}
#ifdef FEATURE_R33D
/* Not actually close port, just pretend */
/* notified registered client that the channel is closed */
pWCTSCb->wctsState = WCTS_STATE_CLOSED;
pWCTSCb->wctsNotifyCB((WCTS_HandleType)pWCTSCb,
WCTS_EVENT_CLOSE,
pWCTSCb->wctsNotifyCBData);
printk(KERN_ERR "R33D Not need to close");
return eWLAN_PAL_STATUS_SUCCESS;
#endif /* FEATURE_R33D */
/*Free the buffers in the pending queue.*/
while (eWLAN_PAL_STATUS_SUCCESS ==
wpal_list_remove_front(&pWCTSCb->wctsPendingQueue, &pNode)) {
pBufferQueue = container_of(pNode, WCTS_BufferType, node);
pBuffer = pBufferQueue->pBuffer;
wpalMemoryFree(pBuffer);
wpalMemoryFree(pBufferQueue);
}
/* Reset the state */
pWCTSCb->wctsState = WCTS_STATE_CLOSED;
wpalEventReset(&pWCTSCb->wctsEvent);
smdstatus = smd_close(pWCTSCb->wctsChannel);
if (0 != smdstatus) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"%s: smd_close failed with status %d",
__func__, smdstatus);
/* SMD did not successfully close the channel, therefore we
won't receive an asynchronous close notification so don't
bother to wait for an event that won't come */
} else {
/* close command was sent -- wait for the callback to complete */
status = wpalEventWait(&pWCTSCb->wctsEvent, WCTS_SMD_CLOSE_TIMEOUT);
if (eWLAN_PAL_STATUS_SUCCESS != status) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"%s: failed to receive SMD_EVENT_REOPEN_READY",
__func__);
}
/* During the close sequence we deregistered from SMD. As part
of deregistration SMD will call back into our driver with an
event to let us know the channel is closed. We need to
insert a brief delay to allow that thread of execution to
exit our module. Otherwise our module may be unloaded while
there is still code running within the address space, and
that code will crash when the memory is unmapped */
msleep(50);
}
/* channel has (hopefully) been closed */
pWCTSCb->wctsNotifyCB((WCTS_HandleType)pWCTSCb,
WCTS_EVENT_CLOSE,
pWCTSCb->wctsNotifyCBData);
/* release the resource */
pWCTSCb->wctsMagic = 0;
wpalMemoryFree(pWCTSCb);
gwctsHandle = NULL;
return eWLAN_PAL_STATUS_SUCCESS;
}/*WCTS_CloseTransport*/
/**
@brief This function is used by the DAL Core to to send a
message over to the WLAN sub-system.
Once a buffer has been passed into the Send Message
API, CT takes full ownership of it and it is responsible for
freeing the associated resources. (This prevents a memcpy in
case of a deferred write)
The messages transported through the CT on both RX and TX are
flat memory buffers that can be accessed and manipulated
through standard memory functions.
@param wctsHandlehandle: received upon open
pMsg: the message to be sent
uLen: the length of the message
@see
@return 0 for success
*/
wpt_uint32
WCTS_SendMessage
(
WCTS_HandleType wctsHandle,
void* pMsg,
wpt_uint32 uLen
)
{
WCTS_ControlBlockType* pWCTSCb = (WCTS_ControlBlockType*) wctsHandle;
WCTS_BufferType* pBufferQueue;
int len;
int written = 0;
int available;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/*--------------------------------------------------------------------
Sanity check
--------------------------------------------------------------------*/
if ((NULL == pWCTSCb) || (WCTS_CB_MAGIC != pWCTSCb->wctsMagic) ||
(NULL == pMsg) || (0 == uLen) || (0x7fffffff < uLen)) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_SendMessage: Invalid parameters received.");
WPAL_ASSERT(0);
if (NULL != pMsg) {
wpalMemoryFree(pMsg);
}
return eWLAN_PAL_STATUS_E_INVAL;
}
/* the SMD API uses int instead of uint, so change types here */
len = (int)uLen;
if (WCTS_STATE_OPEN == pWCTSCb->wctsState) {
available = smd_write_avail(pWCTSCb->wctsChannel);
if (available >= len) {
written = smd_write(pWCTSCb->wctsChannel, pMsg, len);
}
} else if (WCTS_STATE_DEFERRED == pWCTSCb->wctsState) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_INFO,
"WCTS_SendMessage: FIFO space not available, the packets will be queued");
} else {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_SendMessage: Channel in illegal state [%d].",
pWCTSCb->wctsState);
/* force following logic to reclaim the buffer */
written = -1;
}
if (-1 == written) {
/*Something wrong*/
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_SendMessage: Failed to send message over the bus.");
wpalMemoryFree(pMsg);
return eWLAN_PAL_STATUS_E_FAILURE;
} else if (written == len) {
/* Message sent! No deferred state, free the buffer*/
wpalMemoryZero(pMsg, len);
wpalMemoryFree(pMsg);
} else {
/* This much data cannot be written at this time,
queue the rest of the data for later*/
pBufferQueue = wpalMemoryAllocate(sizeof(WCTS_BufferType));
if (NULL == pBufferQueue) {
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"WCTS_SendMessage: Cannot allocate memory for queuing the buffer");
wpalMemoryFree(pMsg);
WPAL_ASSERT(0);
return eWLAN_PAL_STATUS_E_NOMEM;
}
pBufferQueue->bufferSize = len;
pBufferQueue->pBuffer = pMsg;
if (eWLAN_PAL_STATUS_E_FAILURE ==
wpal_list_insert_back(&pWCTSCb->wctsPendingQueue,
&pBufferQueue->node))
{
WPAL_TRACE(eWLAN_MODULE_DAL_CTRL, eWLAN_PAL_TRACE_LEVEL_ERROR,
"pBufferQueue wpal_list_insert_back failed");
wpalMemoryFree(pMsg);
wpalMemoryFree(pBufferQueue);
WPAL_ASSERT(0);
return eWLAN_PAL_STATUS_E_NOMEM;
}
/* if we are not already in the deferred state, then transition
to that state. when we do so, we enable the remote read
interrupt so that we'll be notified when messages are read
from the remote end */
if (WCTS_STATE_DEFERRED != pWCTSCb->wctsState) {
/* Mark the state as deferred.
Later: We may need to protect wctsState by locks*/
pWCTSCb->wctsState = WCTS_STATE_DEFERRED;
smd_enable_read_intr(pWCTSCb->wctsChannel);
}
/*indicate to client that message was placed in deferred queue*/
return eWLAN_PAL_STATUS_E_RESOURCES;
}
return eWLAN_PAL_STATUS_SUCCESS;
}/*WCTS_SendMessage*/
| gpl-2.0 |
pathletboy/rt-thread | bsp/imx6sx/iMX6_Platform_SDK/board/mx6sdl/sabre_ai/rev_a_iomux/dcic_iomux_config.c | 450 | 1677 | /*
* Copyright (c) 2012, Freescale Semiconductor, Inc.
* All rights reserved.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// File: dcic_iomux_config.c
/* ------------------------------------------------------------------------------
* <auto-generated>
* This code was generated by a tool.
* Runtime Version:3.4.0.0
*
* Changes to this file may cause incorrect behavior and will be lost if
* the code is regenerated.
* </auto-generated>
* ------------------------------------------------------------------------------
*/
#include "iomux_config.h"
#include "registers/regsiomuxc.h"
#include "registers/regsdcic.h"
#include "io.h"
#include <assert.h>
void dcic_iomux_config(int instance)
{
switch (instance)
{
case HW_DCIC1:
return dcic1_iomux_config();
case HW_DCIC2:
return dcic2_iomux_config();
default:
assert(false);
}
}
| gpl-2.0 |
cmenard/OverStock_I897 | drivers/serial/ucc_uart.c | 450 | 41715 | /*
* Freescale QUICC Engine UART device driver
*
* Author: Timur Tabi <timur@freescale.com>
*
* Copyright 2007 Freescale Semiconductor, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*
* This driver adds support for UART devices via Freescale's QUICC Engine
* found on some Freescale SOCs.
*
* If Soft-UART support is needed but not already present, then this driver
* will request and upload the "Soft-UART" microcode upon probe. The
* filename of the microcode should be fsl_qe_ucode_uart_X_YZ.bin, where "X"
* is the name of the SOC (e.g. 8323), and YZ is the revision of the SOC,
* (e.g. "11" for 1.1).
*/
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/fs_uart_pd.h>
#include <asm/ucc_slow.h>
#include <linux/firmware.h>
#include <asm/reg.h>
/*
* The GUMR flag for Soft UART. This would normally be defined in qe.h,
* but Soft-UART is a hack and we want to keep everything related to it in
* this file.
*/
#define UCC_SLOW_GUMR_H_SUART 0x00004000 /* Soft-UART */
/*
* soft_uart is 1 if we need to use Soft-UART mode
*/
static int soft_uart;
/*
* firmware_loaded is 1 if the firmware has been loaded, 0 otherwise.
*/
static int firmware_loaded;
/* Enable this macro to configure all serial ports in internal loopback
mode */
/* #define LOOPBACK */
/* The major and minor device numbers are defined in
* http://www.lanana.org/docs/device-list/devices-2.6+.txt. For the QE
* UART, we have major number 204 and minor numbers 46 - 49, which are the
* same as for the CPM2. This decision was made because no Freescale part
* has both a CPM and a QE.
*/
#define SERIAL_QE_MAJOR 204
#define SERIAL_QE_MINOR 46
/* Since we only have minor numbers 46 - 49, there is a hard limit of 4 ports */
#define UCC_MAX_UART 4
/* The number of buffer descriptors for receiving characters. */
#define RX_NUM_FIFO 4
/* The number of buffer descriptors for transmitting characters. */
#define TX_NUM_FIFO 4
/* The maximum size of the character buffer for a single RX BD. */
#define RX_BUF_SIZE 32
/* The maximum size of the character buffer for a single TX BD. */
#define TX_BUF_SIZE 32
/*
* The number of jiffies to wait after receiving a close command before the
* device is actually closed. This allows the last few characters to be
* sent over the wire.
*/
#define UCC_WAIT_CLOSING 100
struct ucc_uart_pram {
struct ucc_slow_pram common;
u8 res1[8]; /* reserved */
__be16 maxidl; /* Maximum idle chars */
__be16 idlc; /* temp idle counter */
__be16 brkcr; /* Break count register */
__be16 parec; /* receive parity error counter */
__be16 frmec; /* receive framing error counter */
__be16 nosec; /* receive noise counter */
__be16 brkec; /* receive break condition counter */
__be16 brkln; /* last received break length */
__be16 uaddr[2]; /* UART address character 1 & 2 */
__be16 rtemp; /* Temp storage */
__be16 toseq; /* Transmit out of sequence char */
__be16 cchars[8]; /* control characters 1-8 */
__be16 rccm; /* receive control character mask */
__be16 rccr; /* receive control character register */
__be16 rlbc; /* receive last break character */
__be16 res2; /* reserved */
__be32 res3; /* reserved, should be cleared */
u8 res4; /* reserved, should be cleared */
u8 res5[3]; /* reserved, should be cleared */
__be32 res6; /* reserved, should be cleared */
__be32 res7; /* reserved, should be cleared */
__be32 res8; /* reserved, should be cleared */
__be32 res9; /* reserved, should be cleared */
__be32 res10; /* reserved, should be cleared */
__be32 res11; /* reserved, should be cleared */
__be32 res12; /* reserved, should be cleared */
__be32 res13; /* reserved, should be cleared */
/* The rest is for Soft-UART only */
__be16 supsmr; /* 0x90, Shadow UPSMR */
__be16 res92; /* 0x92, reserved, initialize to 0 */
__be32 rx_state; /* 0x94, RX state, initialize to 0 */
__be32 rx_cnt; /* 0x98, RX count, initialize to 0 */
u8 rx_length; /* 0x9C, Char length, set to 1+CL+PEN+1+SL */
u8 rx_bitmark; /* 0x9D, reserved, initialize to 0 */
u8 rx_temp_dlst_qe; /* 0x9E, reserved, initialize to 0 */
u8 res14[0xBC - 0x9F]; /* reserved */
__be32 dump_ptr; /* 0xBC, Dump pointer */
__be32 rx_frame_rem; /* 0xC0, reserved, initialize to 0 */
u8 rx_frame_rem_size; /* 0xC4, reserved, initialize to 0 */
u8 tx_mode; /* 0xC5, mode, 0=AHDLC, 1=UART */
__be16 tx_state; /* 0xC6, TX state */
u8 res15[0xD0 - 0xC8]; /* reserved */
__be32 resD0; /* 0xD0, reserved, initialize to 0 */
u8 resD4; /* 0xD4, reserved, initialize to 0 */
__be16 resD5; /* 0xD5, reserved, initialize to 0 */
} __attribute__ ((packed));
/* SUPSMR definitions, for Soft-UART only */
#define UCC_UART_SUPSMR_SL 0x8000
#define UCC_UART_SUPSMR_RPM_MASK 0x6000
#define UCC_UART_SUPSMR_RPM_ODD 0x0000
#define UCC_UART_SUPSMR_RPM_LOW 0x2000
#define UCC_UART_SUPSMR_RPM_EVEN 0x4000
#define UCC_UART_SUPSMR_RPM_HIGH 0x6000
#define UCC_UART_SUPSMR_PEN 0x1000
#define UCC_UART_SUPSMR_TPM_MASK 0x0C00
#define UCC_UART_SUPSMR_TPM_ODD 0x0000
#define UCC_UART_SUPSMR_TPM_LOW 0x0400
#define UCC_UART_SUPSMR_TPM_EVEN 0x0800
#define UCC_UART_SUPSMR_TPM_HIGH 0x0C00
#define UCC_UART_SUPSMR_FRZ 0x0100
#define UCC_UART_SUPSMR_UM_MASK 0x00c0
#define UCC_UART_SUPSMR_UM_NORMAL 0x0000
#define UCC_UART_SUPSMR_UM_MAN_MULTI 0x0040
#define UCC_UART_SUPSMR_UM_AUTO_MULTI 0x00c0
#define UCC_UART_SUPSMR_CL_MASK 0x0030
#define UCC_UART_SUPSMR_CL_8 0x0030
#define UCC_UART_SUPSMR_CL_7 0x0020
#define UCC_UART_SUPSMR_CL_6 0x0010
#define UCC_UART_SUPSMR_CL_5 0x0000
#define UCC_UART_TX_STATE_AHDLC 0x00
#define UCC_UART_TX_STATE_UART 0x01
#define UCC_UART_TX_STATE_X1 0x00
#define UCC_UART_TX_STATE_X16 0x80
#define UCC_UART_PRAM_ALIGNMENT 0x100
#define UCC_UART_SIZE_OF_BD UCC_SLOW_SIZE_OF_BD
#define NUM_CONTROL_CHARS 8
/* Private per-port data structure */
struct uart_qe_port {
struct uart_port port;
struct ucc_slow __iomem *uccp;
struct ucc_uart_pram __iomem *uccup;
struct ucc_slow_info us_info;
struct ucc_slow_private *us_private;
struct device_node *np;
unsigned int ucc_num; /* First ucc is 0, not 1 */
u16 rx_nrfifos;
u16 rx_fifosize;
u16 tx_nrfifos;
u16 tx_fifosize;
int wait_closing;
u32 flags;
struct qe_bd *rx_bd_base;
struct qe_bd *rx_cur;
struct qe_bd *tx_bd_base;
struct qe_bd *tx_cur;
unsigned char *tx_buf;
unsigned char *rx_buf;
void *bd_virt; /* virtual address of the BD buffers */
dma_addr_t bd_dma_addr; /* bus address of the BD buffers */
unsigned int bd_size; /* size of BD buffer space */
};
static struct uart_driver ucc_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "ucc_uart",
.dev_name = "ttyQE",
.major = SERIAL_QE_MAJOR,
.minor = SERIAL_QE_MINOR,
.nr = UCC_MAX_UART,
};
/*
* Virtual to physical address translation.
*
* Given the virtual address for a character buffer, this function returns
* the physical (DMA) equivalent.
*/
static inline dma_addr_t cpu2qe_addr(void *addr, struct uart_qe_port *qe_port)
{
if (likely((addr >= qe_port->bd_virt)) &&
(addr < (qe_port->bd_virt + qe_port->bd_size)))
return qe_port->bd_dma_addr + (addr - qe_port->bd_virt);
/* something nasty happened */
printk(KERN_ERR "%s: addr=%p\n", __func__, addr);
BUG();
return 0;
}
/*
* Physical to virtual address translation.
*
* Given the physical (DMA) address for a character buffer, this function
* returns the virtual equivalent.
*/
static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)
{
/* sanity check */
if (likely((addr >= qe_port->bd_dma_addr) &&
(addr < (qe_port->bd_dma_addr + qe_port->bd_size))))
return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);
/* something nasty happened */
printk(KERN_ERR "%s: addr=%x\n", __func__, addr);
BUG();
return NULL;
}
/*
* Return 1 if the QE is done transmitting all buffers for this port
*
* This function scans each BD in sequence. If we find a BD that is not
* ready (READY=1), then we return 0 indicating that the QE is still sending
* data. If we reach the last BD (WRAP=1), then we know we've scanned
* the entire list, and all BDs are done.
*/
static unsigned int qe_uart_tx_empty(struct uart_port *port)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
struct qe_bd *bdp = qe_port->tx_bd_base;
while (1) {
if (in_be16(&bdp->status) & BD_SC_READY)
/* This BD is not done, so return "not done" */
return 0;
if (in_be16(&bdp->status) & BD_SC_WRAP)
/*
* This BD is done and it's the last one, so return
* "done"
*/
return 1;
bdp++;
};
}
/*
* Set the modem control lines
*
* Although the QE can control the modem control lines (e.g. CTS), we
* don't need that support. This function must exist, however, otherwise
* the kernel will panic.
*/
void qe_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
/*
* Get the current modem control line status
*
* Although the QE can control the modem control lines (e.g. CTS), this
* driver currently doesn't support that, so we always return Carrier
* Detect, Data Set Ready, and Clear To Send.
*/
static unsigned int qe_uart_get_mctrl(struct uart_port *port)
{
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
}
/*
* Disable the transmit interrupt.
*
* Although this function is called "stop_tx", it does not actually stop
* transmission of data. Instead, it tells the QE to not generate an
* interrupt when the UCC is finished sending characters.
*/
static void qe_uart_stop_tx(struct uart_port *port)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
}
/*
* Transmit as many characters to the HW as possible.
*
* This function will attempt to stuff of all the characters from the
* kernel's transmit buffer into TX BDs.
*
* A return value of non-zero indicates that it sucessfully stuffed all
* characters from the kernel buffer.
*
* A return value of zero indicates that there are still characters in the
* kernel's buffer that have not been transmitted, but there are no more BDs
* available. This function should be called again after a BD has been made
* available.
*/
static int qe_uart_tx_pump(struct uart_qe_port *qe_port)
{
struct qe_bd *bdp;
unsigned char *p;
unsigned int count;
struct uart_port *port = &qe_port->port;
struct circ_buf *xmit = &port->state->xmit;
bdp = qe_port->rx_cur;
/* Handle xon/xoff */
if (port->x_char) {
/* Pick next descriptor and fill from buffer */
bdp = qe_port->tx_cur;
p = qe2cpu_addr(bdp->buf, qe_port);
*p++ = port->x_char;
out_be16(&bdp->length, 1);
setbits16(&bdp->status, BD_SC_READY);
/* Get next BD. */
if (in_be16(&bdp->status) & BD_SC_WRAP)
bdp = qe_port->tx_bd_base;
else
bdp++;
qe_port->tx_cur = bdp;
port->icount.tx++;
port->x_char = 0;
return 1;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
qe_uart_stop_tx(port);
return 0;
}
/* Pick next descriptor and fill from buffer */
bdp = qe_port->tx_cur;
while (!(in_be16(&bdp->status) & BD_SC_READY) &&
(xmit->tail != xmit->head)) {
count = 0;
p = qe2cpu_addr(bdp->buf, qe_port);
while (count < qe_port->tx_fifosize) {
*p++ = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
count++;
if (xmit->head == xmit->tail)
break;
}
out_be16(&bdp->length, count);
setbits16(&bdp->status, BD_SC_READY);
/* Get next BD. */
if (in_be16(&bdp->status) & BD_SC_WRAP)
bdp = qe_port->tx_bd_base;
else
bdp++;
}
qe_port->tx_cur = bdp;
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit)) {
/* The kernel buffer is empty, so turn off TX interrupts. We
don't need to be told when the QE is finished transmitting
the data. */
qe_uart_stop_tx(port);
return 0;
}
return 1;
}
/*
* Start transmitting data
*
* This function will start transmitting any available data, if the port
* isn't already transmitting data.
*/
static void qe_uart_start_tx(struct uart_port *port)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
/* If we currently are transmitting, then just return */
if (in_be16(&qe_port->uccp->uccm) & UCC_UART_UCCE_TX)
return;
/* Otherwise, pump the port and start transmission */
if (qe_uart_tx_pump(qe_port))
setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_TX);
}
/*
* Stop transmitting data
*/
static void qe_uart_stop_rx(struct uart_port *port)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
clrbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
}
/*
* Enable status change interrupts
*
* We don't support status change interrupts, but we need to define this
* function otherwise the kernel will panic.
*/
static void qe_uart_enable_ms(struct uart_port *port)
{
}
/* Start or stop sending break signal
*
* This function controls the sending of a break signal. If break_state=1,
* then we start sending a break signal. If break_state=0, then we stop
* sending the break signal.
*/
static void qe_uart_break_ctl(struct uart_port *port, int break_state)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
if (break_state)
ucc_slow_stop_tx(qe_port->us_private);
else
ucc_slow_restart_tx(qe_port->us_private);
}
/* ISR helper function for receiving character.
*
* This function is called by the ISR to handling receiving characters
*/
static void qe_uart_int_rx(struct uart_qe_port *qe_port)
{
int i;
unsigned char ch, *cp;
struct uart_port *port = &qe_port->port;
struct tty_struct *tty = port->state->port.tty;
struct qe_bd *bdp;
u16 status;
unsigned int flg;
/* Just loop through the closed BDs and copy the characters into
* the buffer.
*/
bdp = qe_port->rx_cur;
while (1) {
status = in_be16(&bdp->status);
/* If this one is empty, then we assume we've read them all */
if (status & BD_SC_EMPTY)
break;
/* get number of characters, and check space in RX buffer */
i = in_be16(&bdp->length);
/* If we don't have enough room in RX buffer for the entire BD,
* then we try later, which will be the next RX interrupt.
*/
if (tty_buffer_request_room(tty, i) < i) {
dev_dbg(port->dev, "ucc-uart: no room in RX buffer\n");
return;
}
/* get pointer */
cp = qe2cpu_addr(bdp->buf, qe_port);
/* loop through the buffer */
while (i-- > 0) {
ch = *cp++;
port->icount.rx++;
flg = TTY_NORMAL;
if (!i && status &
(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV))
goto handle_error;
if (uart_handle_sysrq_char(port, ch))
continue;
error_return:
tty_insert_flip_char(tty, ch, flg);
}
/* This BD is ready to be used again. Clear status. get next */
clrsetbits_be16(&bdp->status, BD_SC_BR | BD_SC_FR | BD_SC_PR |
BD_SC_OV | BD_SC_ID, BD_SC_EMPTY);
if (in_be16(&bdp->status) & BD_SC_WRAP)
bdp = qe_port->rx_bd_base;
else
bdp++;
}
/* Write back buffer pointer */
qe_port->rx_cur = bdp;
/* Activate BH processing */
tty_flip_buffer_push(tty);
return;
/* Error processing */
handle_error:
/* Statistics */
if (status & BD_SC_BR)
port->icount.brk++;
if (status & BD_SC_PR)
port->icount.parity++;
if (status & BD_SC_FR)
port->icount.frame++;
if (status & BD_SC_OV)
port->icount.overrun++;
/* Mask out ignored conditions */
status &= port->read_status_mask;
/* Handle the remaining ones */
if (status & BD_SC_BR)
flg = TTY_BREAK;
else if (status & BD_SC_PR)
flg = TTY_PARITY;
else if (status & BD_SC_FR)
flg = TTY_FRAME;
/* Overrun does not affect the current character ! */
if (status & BD_SC_OV)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
#ifdef SUPPORT_SYSRQ
port->sysrq = 0;
#endif
goto error_return;
}
/* Interrupt handler
*
* This interrupt handler is called after a BD is processed.
*/
static irqreturn_t qe_uart_int(int irq, void *data)
{
struct uart_qe_port *qe_port = (struct uart_qe_port *) data;
struct ucc_slow __iomem *uccp = qe_port->uccp;
u16 events;
/* Clear the interrupts */
events = in_be16(&uccp->ucce);
out_be16(&uccp->ucce, events);
if (events & UCC_UART_UCCE_BRKE)
uart_handle_break(&qe_port->port);
if (events & UCC_UART_UCCE_RX)
qe_uart_int_rx(qe_port);
if (events & UCC_UART_UCCE_TX)
qe_uart_tx_pump(qe_port);
return events ? IRQ_HANDLED : IRQ_NONE;
}
/* Initialize buffer descriptors
*
* This function initializes all of the RX and TX buffer descriptors.
*/
static void qe_uart_initbd(struct uart_qe_port *qe_port)
{
int i;
void *bd_virt;
struct qe_bd *bdp;
/* Set the physical address of the host memory buffers in the buffer
* descriptors, and the virtual address for us to work with.
*/
bd_virt = qe_port->bd_virt;
bdp = qe_port->rx_bd_base;
qe_port->rx_cur = qe_port->rx_bd_base;
for (i = 0; i < (qe_port->rx_nrfifos - 1); i++) {
out_be16(&bdp->status, BD_SC_EMPTY | BD_SC_INTRPT);
out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
out_be16(&bdp->length, 0);
bd_virt += qe_port->rx_fifosize;
bdp++;
}
/* */
out_be16(&bdp->status, BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT);
out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
out_be16(&bdp->length, 0);
/* Set the physical address of the host memory
* buffers in the buffer descriptors, and the
* virtual address for us to work with.
*/
bd_virt = qe_port->bd_virt +
L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize);
qe_port->tx_cur = qe_port->tx_bd_base;
bdp = qe_port->tx_bd_base;
for (i = 0; i < (qe_port->tx_nrfifos - 1); i++) {
out_be16(&bdp->status, BD_SC_INTRPT);
out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
out_be16(&bdp->length, 0);
bd_virt += qe_port->tx_fifosize;
bdp++;
}
/* Loopback requires the preamble bit to be set on the first TX BD */
#ifdef LOOPBACK
setbits16(&qe_port->tx_cur->status, BD_SC_P);
#endif
out_be16(&bdp->status, BD_SC_WRAP | BD_SC_INTRPT);
out_be32(&bdp->buf, cpu2qe_addr(bd_virt, qe_port));
out_be16(&bdp->length, 0);
}
/*
* Initialize a UCC for UART.
*
* This function configures a given UCC to be used as a UART device. Basic
* UCC initialization is handled in qe_uart_request_port(). This function
* does all the UART-specific stuff.
*/
static void qe_uart_init_ucc(struct uart_qe_port *qe_port)
{
u32 cecr_subblock;
struct ucc_slow __iomem *uccp = qe_port->uccp;
struct ucc_uart_pram *uccup = qe_port->uccup;
unsigned int i;
/* First, disable TX and RX in the UCC */
ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
/* Program the UCC UART parameter RAM */
out_8(&uccup->common.rbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
out_8(&uccup->common.tbmr, UCC_BMR_GBL | UCC_BMR_BO_BE);
out_be16(&uccup->common.mrblr, qe_port->rx_fifosize);
out_be16(&uccup->maxidl, 0x10);
out_be16(&uccup->brkcr, 1);
out_be16(&uccup->parec, 0);
out_be16(&uccup->frmec, 0);
out_be16(&uccup->nosec, 0);
out_be16(&uccup->brkec, 0);
out_be16(&uccup->uaddr[0], 0);
out_be16(&uccup->uaddr[1], 0);
out_be16(&uccup->toseq, 0);
for (i = 0; i < 8; i++)
out_be16(&uccup->cchars[i], 0xC000);
out_be16(&uccup->rccm, 0xc0ff);
/* Configure the GUMR registers for UART */
if (soft_uart) {
/* Soft-UART requires a 1X multiplier for TX */
clrsetbits_be32(&uccp->gumr_l,
UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
UCC_SLOW_GUMR_L_RDCR_MASK,
UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_1 |
UCC_SLOW_GUMR_L_RDCR_16);
clrsetbits_be32(&uccp->gumr_h, UCC_SLOW_GUMR_H_RFW,
UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX);
} else {
clrsetbits_be32(&uccp->gumr_l,
UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
UCC_SLOW_GUMR_L_RDCR_MASK,
UCC_SLOW_GUMR_L_MODE_UART | UCC_SLOW_GUMR_L_TDCR_16 |
UCC_SLOW_GUMR_L_RDCR_16);
clrsetbits_be32(&uccp->gumr_h,
UCC_SLOW_GUMR_H_TRX | UCC_SLOW_GUMR_H_TTX,
UCC_SLOW_GUMR_H_RFW);
}
#ifdef LOOPBACK
clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
UCC_SLOW_GUMR_L_DIAG_LOOP);
clrsetbits_be32(&uccp->gumr_h,
UCC_SLOW_GUMR_H_CTSP | UCC_SLOW_GUMR_H_RSYN,
UCC_SLOW_GUMR_H_CDS);
#endif
/* Disable rx interrupts and clear all pending events. */
out_be16(&uccp->uccm, 0);
out_be16(&uccp->ucce, 0xffff);
out_be16(&uccp->udsr, 0x7e7e);
/* Initialize UPSMR */
out_be16(&uccp->upsmr, 0);
if (soft_uart) {
out_be16(&uccup->supsmr, 0x30);
out_be16(&uccup->res92, 0);
out_be32(&uccup->rx_state, 0);
out_be32(&uccup->rx_cnt, 0);
out_8(&uccup->rx_bitmark, 0);
out_8(&uccup->rx_length, 10);
out_be32(&uccup->dump_ptr, 0x4000);
out_8(&uccup->rx_temp_dlst_qe, 0);
out_be32(&uccup->rx_frame_rem, 0);
out_8(&uccup->rx_frame_rem_size, 0);
/* Soft-UART requires TX to be 1X */
out_8(&uccup->tx_mode,
UCC_UART_TX_STATE_UART | UCC_UART_TX_STATE_X1);
out_be16(&uccup->tx_state, 0);
out_8(&uccup->resD4, 0);
out_be16(&uccup->resD5, 0);
/* Set UART mode.
* Enable receive and transmit.
*/
/* From the microcode errata:
* 1.GUMR_L register, set mode=0010 (QMC).
* 2.Set GUMR_H[17] bit. (UART/AHDLC mode).
* 3.Set GUMR_H[19:20] (Transparent mode)
* 4.Clear GUMR_H[26] (RFW)
* ...
* 6.Receiver must use 16x over sampling
*/
clrsetbits_be32(&uccp->gumr_l,
UCC_SLOW_GUMR_L_MODE_MASK | UCC_SLOW_GUMR_L_TDCR_MASK |
UCC_SLOW_GUMR_L_RDCR_MASK,
UCC_SLOW_GUMR_L_MODE_QMC | UCC_SLOW_GUMR_L_TDCR_16 |
UCC_SLOW_GUMR_L_RDCR_16);
clrsetbits_be32(&uccp->gumr_h,
UCC_SLOW_GUMR_H_RFW | UCC_SLOW_GUMR_H_RSYN,
UCC_SLOW_GUMR_H_SUART | UCC_SLOW_GUMR_H_TRX |
UCC_SLOW_GUMR_H_TTX | UCC_SLOW_GUMR_H_TFL);
#ifdef LOOPBACK
clrsetbits_be32(&uccp->gumr_l, UCC_SLOW_GUMR_L_DIAG_MASK,
UCC_SLOW_GUMR_L_DIAG_LOOP);
clrbits32(&uccp->gumr_h, UCC_SLOW_GUMR_H_CTSP |
UCC_SLOW_GUMR_H_CDS);
#endif
cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num);
qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
} else {
cecr_subblock = ucc_slow_get_qe_cr_subblock(qe_port->ucc_num);
qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
QE_CR_PROTOCOL_UART, 0);
}
}
/*
* Initialize the port.
*/
static int qe_uart_startup(struct uart_port *port)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
int ret;
/*
* If we're using Soft-UART mode, then we need to make sure the
* firmware has been uploaded first.
*/
if (soft_uart && !firmware_loaded) {
dev_err(port->dev, "Soft-UART firmware not uploaded\n");
return -ENODEV;
}
qe_uart_initbd(qe_port);
qe_uart_init_ucc(qe_port);
/* Install interrupt handler. */
ret = request_irq(port->irq, qe_uart_int, IRQF_SHARED, "ucc-uart",
qe_port);
if (ret) {
dev_err(port->dev, "could not claim IRQ %u\n", port->irq);
return ret;
}
/* Startup rx-int */
setbits16(&qe_port->uccp->uccm, UCC_UART_UCCE_RX);
ucc_slow_enable(qe_port->us_private, COMM_DIR_RX_AND_TX);
return 0;
}
/*
* Shutdown the port.
*/
static void qe_uart_shutdown(struct uart_port *port)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
struct ucc_slow __iomem *uccp = qe_port->uccp;
unsigned int timeout = 20;
/* Disable RX and TX */
/* Wait for all the BDs marked sent */
while (!qe_uart_tx_empty(port)) {
if (!--timeout) {
dev_warn(port->dev, "shutdown timeout\n");
break;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(2);
}
if (qe_port->wait_closing) {
/* Wait a bit longer */
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(qe_port->wait_closing);
}
/* Stop uarts */
ucc_slow_disable(qe_port->us_private, COMM_DIR_RX_AND_TX);
clrbits16(&uccp->uccm, UCC_UART_UCCE_TX | UCC_UART_UCCE_RX);
/* Shut them really down and reinit buffer descriptors */
ucc_slow_graceful_stop_tx(qe_port->us_private);
qe_uart_initbd(qe_port);
free_irq(port->irq, qe_port);
}
/*
* Set the serial port parameters.
*/
static void qe_uart_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
struct ucc_slow __iomem *uccp = qe_port->uccp;
unsigned int baud;
unsigned long flags;
u16 upsmr = in_be16(&uccp->upsmr);
struct ucc_uart_pram __iomem *uccup = qe_port->uccup;
u16 supsmr = in_be16(&uccup->supsmr);
u8 char_length = 2; /* 1 + CL + PEN + 1 + SL */
/* Character length programmed into the mode register is the
* sum of: 1 start bit, number of data bits, 0 or 1 parity bit,
* 1 or 2 stop bits, minus 1.
* The value 'bits' counts this for us.
*/
/* byte size */
upsmr &= UCC_UART_UPSMR_CL_MASK;
supsmr &= UCC_UART_SUPSMR_CL_MASK;
switch (termios->c_cflag & CSIZE) {
case CS5:
upsmr |= UCC_UART_UPSMR_CL_5;
supsmr |= UCC_UART_SUPSMR_CL_5;
char_length += 5;
break;
case CS6:
upsmr |= UCC_UART_UPSMR_CL_6;
supsmr |= UCC_UART_SUPSMR_CL_6;
char_length += 6;
break;
case CS7:
upsmr |= UCC_UART_UPSMR_CL_7;
supsmr |= UCC_UART_SUPSMR_CL_7;
char_length += 7;
break;
default: /* case CS8 */
upsmr |= UCC_UART_UPSMR_CL_8;
supsmr |= UCC_UART_SUPSMR_CL_8;
char_length += 8;
break;
}
/* If CSTOPB is set, we want two stop bits */
if (termios->c_cflag & CSTOPB) {
upsmr |= UCC_UART_UPSMR_SL;
supsmr |= UCC_UART_SUPSMR_SL;
char_length++; /* + SL */
}
if (termios->c_cflag & PARENB) {
upsmr |= UCC_UART_UPSMR_PEN;
supsmr |= UCC_UART_SUPSMR_PEN;
char_length++; /* + PEN */
if (!(termios->c_cflag & PARODD)) {
upsmr &= ~(UCC_UART_UPSMR_RPM_MASK |
UCC_UART_UPSMR_TPM_MASK);
upsmr |= UCC_UART_UPSMR_RPM_EVEN |
UCC_UART_UPSMR_TPM_EVEN;
supsmr &= ~(UCC_UART_SUPSMR_RPM_MASK |
UCC_UART_SUPSMR_TPM_MASK);
supsmr |= UCC_UART_SUPSMR_RPM_EVEN |
UCC_UART_SUPSMR_TPM_EVEN;
}
}
/*
* Set up parity check flag
*/
port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
if (termios->c_iflag & INPCK)
port->read_status_mask |= BD_SC_FR | BD_SC_PR;
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= BD_SC_BR;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= BD_SC_PR | BD_SC_FR;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= BD_SC_BR;
/*
* If we're ignore parity and break indicators, ignore
* overruns too. (For real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= BD_SC_OV;
}
/*
* !!! ignore all characters if CREAD is not set
*/
if ((termios->c_cflag & CREAD) == 0)
port->read_status_mask &= ~BD_SC_EMPTY;
baud = uart_get_baud_rate(port, termios, old, 0, 115200);
/* Do we really need a spinlock here? */
spin_lock_irqsave(&port->lock, flags);
out_be16(&uccp->upsmr, upsmr);
if (soft_uart) {
out_be16(&uccup->supsmr, supsmr);
out_8(&uccup->rx_length, char_length);
/* Soft-UART requires a 1X multiplier for TX */
qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
qe_setbrg(qe_port->us_info.tx_clock, baud, 1);
} else {
qe_setbrg(qe_port->us_info.rx_clock, baud, 16);
qe_setbrg(qe_port->us_info.tx_clock, baud, 16);
}
spin_unlock_irqrestore(&port->lock, flags);
}
/*
* Return a pointer to a string that describes what kind of port this is.
*/
static const char *qe_uart_type(struct uart_port *port)
{
return "QE";
}
/*
* Allocate any memory and I/O resources required by the port.
*/
static int qe_uart_request_port(struct uart_port *port)
{
int ret;
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
struct ucc_slow_info *us_info = &qe_port->us_info;
struct ucc_slow_private *uccs;
unsigned int rx_size, tx_size;
void *bd_virt;
dma_addr_t bd_dma_addr = 0;
ret = ucc_slow_init(us_info, &uccs);
if (ret) {
dev_err(port->dev, "could not initialize UCC%u\n",
qe_port->ucc_num);
return ret;
}
qe_port->us_private = uccs;
qe_port->uccp = uccs->us_regs;
qe_port->uccup = (struct ucc_uart_pram *) uccs->us_pram;
qe_port->rx_bd_base = uccs->rx_bd;
qe_port->tx_bd_base = uccs->tx_bd;
/*
* Allocate the transmit and receive data buffers.
*/
rx_size = L1_CACHE_ALIGN(qe_port->rx_nrfifos * qe_port->rx_fifosize);
tx_size = L1_CACHE_ALIGN(qe_port->tx_nrfifos * qe_port->tx_fifosize);
bd_virt = dma_alloc_coherent(port->dev, rx_size + tx_size, &bd_dma_addr,
GFP_KERNEL);
if (!bd_virt) {
dev_err(port->dev, "could not allocate buffer descriptors\n");
return -ENOMEM;
}
qe_port->bd_virt = bd_virt;
qe_port->bd_dma_addr = bd_dma_addr;
qe_port->bd_size = rx_size + tx_size;
qe_port->rx_buf = bd_virt;
qe_port->tx_buf = qe_port->rx_buf + rx_size;
return 0;
}
/*
* Configure the port.
*
* We say we're a CPM-type port because that's mostly true. Once the device
* is configured, this driver operates almost identically to the CPM serial
* driver.
*/
static void qe_uart_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_CPM;
qe_uart_request_port(port);
}
}
/*
* Release any memory and I/O resources that were allocated in
* qe_uart_request_port().
*/
static void qe_uart_release_port(struct uart_port *port)
{
struct uart_qe_port *qe_port =
container_of(port, struct uart_qe_port, port);
struct ucc_slow_private *uccs = qe_port->us_private;
dma_free_coherent(port->dev, qe_port->bd_size, qe_port->bd_virt,
qe_port->bd_dma_addr);
ucc_slow_free(uccs);
}
/*
* Verify that the data in serial_struct is suitable for this device.
*/
static int qe_uart_verify_port(struct uart_port *port,
struct serial_struct *ser)
{
if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM)
return -EINVAL;
if (ser->irq < 0 || ser->irq >= nr_irqs)
return -EINVAL;
if (ser->baud_base < 9600)
return -EINVAL;
return 0;
}
/* UART operations
*
* Details on these functions can be found in Documentation/serial/driver
*/
static struct uart_ops qe_uart_pops = {
.tx_empty = qe_uart_tx_empty,
.set_mctrl = qe_uart_set_mctrl,
.get_mctrl = qe_uart_get_mctrl,
.stop_tx = qe_uart_stop_tx,
.start_tx = qe_uart_start_tx,
.stop_rx = qe_uart_stop_rx,
.enable_ms = qe_uart_enable_ms,
.break_ctl = qe_uart_break_ctl,
.startup = qe_uart_startup,
.shutdown = qe_uart_shutdown,
.set_termios = qe_uart_set_termios,
.type = qe_uart_type,
.release_port = qe_uart_release_port,
.request_port = qe_uart_request_port,
.config_port = qe_uart_config_port,
.verify_port = qe_uart_verify_port,
};
/*
* Obtain the SOC model number and revision level
*
* This function parses the device tree to obtain the SOC model. It then
* reads the SVR register to the revision.
*
* The device tree stores the SOC model two different ways.
*
* The new way is:
*
* cpu@0 {
* compatible = "PowerPC,8323";
* device_type = "cpu";
* ...
*
*
* The old way is:
* PowerPC,8323@0 {
* device_type = "cpu";
* ...
*
* This code first checks the new way, and then the old way.
*/
static unsigned int soc_info(unsigned int *rev_h, unsigned int *rev_l)
{
struct device_node *np;
const char *soc_string;
unsigned int svr;
unsigned int soc;
/* Find the CPU node */
np = of_find_node_by_type(NULL, "cpu");
if (!np)
return 0;
/* Find the compatible property */
soc_string = of_get_property(np, "compatible", NULL);
if (!soc_string)
/* No compatible property, so try the name. */
soc_string = np->name;
/* Extract the SOC number from the "PowerPC," string */
if ((sscanf(soc_string, "PowerPC,%u", &soc) != 1) || !soc)
return 0;
/* Get the revision from the SVR */
svr = mfspr(SPRN_SVR);
*rev_h = (svr >> 4) & 0xf;
*rev_l = svr & 0xf;
return soc;
}
/*
* requst_firmware_nowait() callback function
*
* This function is called by the kernel when a firmware is made available,
* or if it times out waiting for the firmware.
*/
static void uart_firmware_cont(const struct firmware *fw, void *context)
{
struct qe_firmware *firmware;
struct device *dev = context;
int ret;
if (!fw) {
dev_err(dev, "firmware not found\n");
return;
}
firmware = (struct qe_firmware *) fw->data;
if (firmware->header.length != fw->size) {
dev_err(dev, "invalid firmware\n");
return;
}
ret = qe_upload_firmware(firmware);
if (ret) {
dev_err(dev, "could not load firmware\n");
return;
}
firmware_loaded = 1;
}
static int ucc_uart_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
struct device_node *np = ofdev->node;
const unsigned int *iprop; /* Integer OF properties */
const char *sprop; /* String OF properties */
struct uart_qe_port *qe_port = NULL;
struct resource res;
int ret;
/*
* Determine if we need Soft-UART mode
*/
if (of_find_property(np, "soft-uart", NULL)) {
dev_dbg(&ofdev->dev, "using Soft-UART mode\n");
soft_uart = 1;
}
/*
* If we are using Soft-UART, determine if we need to upload the
* firmware, too.
*/
if (soft_uart) {
struct qe_firmware_info *qe_fw_info;
qe_fw_info = qe_get_firmware_info();
/* Check if the firmware has been uploaded. */
if (qe_fw_info && strstr(qe_fw_info->id, "Soft-UART")) {
firmware_loaded = 1;
} else {
char filename[32];
unsigned int soc;
unsigned int rev_h;
unsigned int rev_l;
soc = soc_info(&rev_h, &rev_l);
if (!soc) {
dev_err(&ofdev->dev, "unknown CPU model\n");
return -ENXIO;
}
sprintf(filename, "fsl_qe_ucode_uart_%u_%u%u.bin",
soc, rev_h, rev_l);
dev_info(&ofdev->dev, "waiting for firmware %s\n",
filename);
/*
* We call request_firmware_nowait instead of
* request_firmware so that the driver can load and
* initialize the ports without holding up the rest of
* the kernel. If hotplug support is enabled in the
* kernel, then we use it.
*/
ret = request_firmware_nowait(THIS_MODULE,
FW_ACTION_HOTPLUG, filename, &ofdev->dev,
&ofdev->dev, uart_firmware_cont);
if (ret) {
dev_err(&ofdev->dev,
"could not load firmware %s\n",
filename);
return ret;
}
}
}
qe_port = kzalloc(sizeof(struct uart_qe_port), GFP_KERNEL);
if (!qe_port) {
dev_err(&ofdev->dev, "can't allocate QE port structure\n");
return -ENOMEM;
}
/* Search for IRQ and mapbase */
ret = of_address_to_resource(np, 0, &res);
if (ret) {
dev_err(&ofdev->dev, "missing 'reg' property in device tree\n");
kfree(qe_port);
return ret;
}
if (!res.start) {
dev_err(&ofdev->dev, "invalid 'reg' property in device tree\n");
kfree(qe_port);
return -EINVAL;
}
qe_port->port.mapbase = res.start;
/* Get the UCC number (device ID) */
/* UCCs are numbered 1-7 */
iprop = of_get_property(np, "cell-index", NULL);
if (!iprop) {
iprop = of_get_property(np, "device-id", NULL);
if (!iprop) {
kfree(qe_port);
dev_err(&ofdev->dev, "UCC is unspecified in "
"device tree\n");
return -EINVAL;
}
}
if ((*iprop < 1) || (*iprop > UCC_MAX_NUM)) {
dev_err(&ofdev->dev, "no support for UCC%u\n", *iprop);
kfree(qe_port);
return -ENODEV;
}
qe_port->ucc_num = *iprop - 1;
/*
* In the future, we should not require the BRG to be specified in the
* device tree. If no clock-source is specified, then just pick a BRG
* to use. This requires a new QE library function that manages BRG
* assignments.
*/
sprop = of_get_property(np, "rx-clock-name", NULL);
if (!sprop) {
dev_err(&ofdev->dev, "missing rx-clock-name in device tree\n");
kfree(qe_port);
return -ENODEV;
}
qe_port->us_info.rx_clock = qe_clock_source(sprop);
if ((qe_port->us_info.rx_clock < QE_BRG1) ||
(qe_port->us_info.rx_clock > QE_BRG16)) {
dev_err(&ofdev->dev, "rx-clock-name must be a BRG for UART\n");
kfree(qe_port);
return -ENODEV;
}
#ifdef LOOPBACK
/* In internal loopback mode, TX and RX must use the same clock */
qe_port->us_info.tx_clock = qe_port->us_info.rx_clock;
#else
sprop = of_get_property(np, "tx-clock-name", NULL);
if (!sprop) {
dev_err(&ofdev->dev, "missing tx-clock-name in device tree\n");
kfree(qe_port);
return -ENODEV;
}
qe_port->us_info.tx_clock = qe_clock_source(sprop);
#endif
if ((qe_port->us_info.tx_clock < QE_BRG1) ||
(qe_port->us_info.tx_clock > QE_BRG16)) {
dev_err(&ofdev->dev, "tx-clock-name must be a BRG for UART\n");
kfree(qe_port);
return -ENODEV;
}
/* Get the port number, numbered 0-3 */
iprop = of_get_property(np, "port-number", NULL);
if (!iprop) {
dev_err(&ofdev->dev, "missing port-number in device tree\n");
kfree(qe_port);
return -EINVAL;
}
qe_port->port.line = *iprop;
if (qe_port->port.line >= UCC_MAX_UART) {
dev_err(&ofdev->dev, "port-number must be 0-%u\n",
UCC_MAX_UART - 1);
kfree(qe_port);
return -EINVAL;
}
qe_port->port.irq = irq_of_parse_and_map(np, 0);
if (qe_port->port.irq == NO_IRQ) {
dev_err(&ofdev->dev, "could not map IRQ for UCC%u\n",
qe_port->ucc_num + 1);
kfree(qe_port);
return -EINVAL;
}
/*
* Newer device trees have an "fsl,qe" compatible property for the QE
* node, but we still need to support older device trees.
*/
np = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!np) {
np = of_find_node_by_type(NULL, "qe");
if (!np) {
dev_err(&ofdev->dev, "could not find 'qe' node\n");
kfree(qe_port);
return -EINVAL;
}
}
iprop = of_get_property(np, "brg-frequency", NULL);
if (!iprop) {
dev_err(&ofdev->dev,
"missing brg-frequency in device tree\n");
kfree(qe_port);
return -EINVAL;
}
if (*iprop)
qe_port->port.uartclk = *iprop;
else {
/*
* Older versions of U-Boot do not initialize the brg-frequency
* property, so in this case we assume the BRG frequency is
* half the QE bus frequency.
*/
iprop = of_get_property(np, "bus-frequency", NULL);
if (!iprop) {
dev_err(&ofdev->dev,
"missing QE bus-frequency in device tree\n");
kfree(qe_port);
return -EINVAL;
}
if (*iprop)
qe_port->port.uartclk = *iprop / 2;
else {
dev_err(&ofdev->dev,
"invalid QE bus-frequency in device tree\n");
kfree(qe_port);
return -EINVAL;
}
}
spin_lock_init(&qe_port->port.lock);
qe_port->np = np;
qe_port->port.dev = &ofdev->dev;
qe_port->port.ops = &qe_uart_pops;
qe_port->port.iotype = UPIO_MEM;
qe_port->tx_nrfifos = TX_NUM_FIFO;
qe_port->tx_fifosize = TX_BUF_SIZE;
qe_port->rx_nrfifos = RX_NUM_FIFO;
qe_port->rx_fifosize = RX_BUF_SIZE;
qe_port->wait_closing = UCC_WAIT_CLOSING;
qe_port->port.fifosize = 512;
qe_port->port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
qe_port->us_info.ucc_num = qe_port->ucc_num;
qe_port->us_info.regs = (phys_addr_t) res.start;
qe_port->us_info.irq = qe_port->port.irq;
qe_port->us_info.rx_bd_ring_len = qe_port->rx_nrfifos;
qe_port->us_info.tx_bd_ring_len = qe_port->tx_nrfifos;
/* Make sure ucc_slow_init() initializes both TX and RX */
qe_port->us_info.init_tx = 1;
qe_port->us_info.init_rx = 1;
/* Add the port to the uart sub-system. This will cause
* qe_uart_config_port() to be called, so the us_info structure must
* be initialized.
*/
ret = uart_add_one_port(&ucc_uart_driver, &qe_port->port);
if (ret) {
dev_err(&ofdev->dev, "could not add /dev/ttyQE%u\n",
qe_port->port.line);
kfree(qe_port);
return ret;
}
dev_set_drvdata(&ofdev->dev, qe_port);
dev_info(&ofdev->dev, "UCC%u assigned to /dev/ttyQE%u\n",
qe_port->ucc_num + 1, qe_port->port.line);
/* Display the mknod command for this device */
dev_dbg(&ofdev->dev, "mknod command is 'mknod /dev/ttyQE%u c %u %u'\n",
qe_port->port.line, SERIAL_QE_MAJOR,
SERIAL_QE_MINOR + qe_port->port.line);
return 0;
}
static int ucc_uart_remove(struct of_device *ofdev)
{
struct uart_qe_port *qe_port = dev_get_drvdata(&ofdev->dev);
dev_info(&ofdev->dev, "removing /dev/ttyQE%u\n", qe_port->port.line);
uart_remove_one_port(&ucc_uart_driver, &qe_port->port);
dev_set_drvdata(&ofdev->dev, NULL);
kfree(qe_port);
return 0;
}
static struct of_device_id ucc_uart_match[] = {
{
.type = "serial",
.compatible = "ucc_uart",
},
{},
};
MODULE_DEVICE_TABLE(of, ucc_uart_match);
static struct of_platform_driver ucc_uart_of_driver = {
.owner = THIS_MODULE,
.name = "ucc_uart",
.match_table = ucc_uart_match,
.probe = ucc_uart_probe,
.remove = ucc_uart_remove,
};
static int __init ucc_uart_init(void)
{
int ret;
printk(KERN_INFO "Freescale QUICC Engine UART device driver\n");
#ifdef LOOPBACK
printk(KERN_INFO "ucc-uart: Using loopback mode\n");
#endif
ret = uart_register_driver(&ucc_uart_driver);
if (ret) {
printk(KERN_ERR "ucc-uart: could not register UART driver\n");
return ret;
}
ret = of_register_platform_driver(&ucc_uart_of_driver);
if (ret)
printk(KERN_ERR
"ucc-uart: could not register platform driver\n");
return ret;
}
static void __exit ucc_uart_exit(void)
{
printk(KERN_INFO
"Freescale QUICC Engine UART device driver unloading\n");
of_unregister_platform_driver(&ucc_uart_of_driver);
uart_unregister_driver(&ucc_uart_driver);
}
module_init(ucc_uart_init);
module_exit(ucc_uart_exit);
MODULE_DESCRIPTION("Freescale QUICC Engine (QE) UART");
MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_QE_MAJOR);
| gpl-2.0 |
ehigh2014/linux | drivers/media/i2c/ir-kbd-i2c.c | 706 | 12666 | /*
*
* keyboard input driver for i2c IR remote controls
*
* Copyright (c) 2000-2003 Gerd Knorr <kraxel@bytesex.org>
* modified for PixelView (BT878P+W/FM) by
* Michal Kochanowicz <mkochano@pld.org.pl>
* Christoph Bartelmus <lirc@bartelmus.de>
* modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by
* Ulrich Mueller <ulrich.mueller42@web.de>
* modified for em2820 based USB TV tuners by
* Markus Rechberger <mrechberger@gmail.com>
* modified for DViCO Fusion HDTV 5 RT GOLD by
* Chaogui Zhang <czhang1974@gmail.com>
* modified for MSI TV@nywhere Plus by
* Henry Wong <henry@stuffedcow.net>
* Mark Schultz <n9xmj@yahoo.com>
* Brian Rogers <brian_rogers@comcast.net>
* modified for AVerMedia Cardbus by
* Oldrich Jedlicka <oldium.pro@seznam.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/workqueue.h>
#include <media/rc-core.h>
#include <media/ir-kbd-i2c.h>
/* ----------------------------------------------------------------------- */
/* insmod parameters */
static int debug;
module_param(debug, int, 0644); /* debug level (0,1,2) */
#define MODULE_NAME "ir-kbd-i2c"
#define dprintk(level, fmt, arg...) if (debug >= level) \
printk(KERN_DEBUG MODULE_NAME ": " fmt , ## arg)
/* ----------------------------------------------------------------------- */
static int get_key_haup_common(struct IR_i2c *ir, enum rc_type *protocol,
u32 *scancode, u8 *ptoggle, int size, int offset)
{
unsigned char buf[6];
int start, range, toggle, dev, code, ircode;
/* poll IR chip */
if (size != i2c_master_recv(ir->c, buf, size))
return -EIO;
/* split rc5 data block ... */
start = (buf[offset] >> 7) & 1;
range = (buf[offset] >> 6) & 1;
toggle = (buf[offset] >> 5) & 1;
dev = buf[offset] & 0x1f;
code = (buf[offset+1] >> 2) & 0x3f;
/* rc5 has two start bits
* the first bit must be one
* the second bit defines the command range (1 = 0-63, 0 = 64 - 127)
*/
if (!start)
/* no key pressed */
return 0;
/* filter out invalid key presses */
ircode = (start << 12) | (toggle << 11) | (dev << 6) | code;
if ((ircode & 0x1fff) == 0x1fff)
return 0;
if (!range)
code += 64;
dprintk(1,"ir hauppauge (rc5): s%d r%d t%d dev=%d code=%d\n",
start, range, toggle, dev, code);
*protocol = RC_TYPE_RC5;
*scancode = RC_SCANCODE_RC5(dev, code);
*ptoggle = toggle;
return 1;
}
static int get_key_haup(struct IR_i2c *ir, enum rc_type *protocol,
u32 *scancode, u8 *toggle)
{
return get_key_haup_common (ir, protocol, scancode, toggle, 3, 0);
}
static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_type *protocol,
u32 *scancode, u8 *toggle)
{
int ret;
unsigned char buf[1] = { 0 };
/*
* This is the same apparent "are you ready?" poll command observed
* watching Windows driver traffic and implemented in lirc_zilog. With
* this added, we get far saner remote behavior with z8 chips on usb
* connected devices, even with the default polling interval of 100ms.
*/
ret = i2c_master_send(ir->c, buf, 1);
if (ret != 1)
return (ret < 0) ? ret : -EINVAL;
return get_key_haup_common(ir, protocol, scancode, toggle, 6, 3);
}
static int get_key_pixelview(struct IR_i2c *ir, enum rc_type *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char b;
/* poll IR chip */
if (1 != i2c_master_recv(ir->c, &b, 1)) {
dprintk(1,"read error\n");
return -EIO;
}
*protocol = RC_TYPE_OTHER;
*scancode = b;
*toggle = 0;
return 1;
}
static int get_key_fusionhdtv(struct IR_i2c *ir, enum rc_type *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char buf[4];
/* poll IR chip */
if (4 != i2c_master_recv(ir->c, buf, 4)) {
dprintk(1,"read error\n");
return -EIO;
}
if(buf[0] !=0 || buf[1] !=0 || buf[2] !=0 || buf[3] != 0)
dprintk(2, "%s: 0x%2x 0x%2x 0x%2x 0x%2x\n", __func__,
buf[0], buf[1], buf[2], buf[3]);
/* no key pressed or signal from other ir remote */
if(buf[0] != 0x1 || buf[1] != 0xfe)
return 0;
*protocol = RC_TYPE_UNKNOWN;
*scancode = buf[2];
*toggle = 0;
return 1;
}
static int get_key_knc1(struct IR_i2c *ir, enum rc_type *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char b;
/* poll IR chip */
if (1 != i2c_master_recv(ir->c, &b, 1)) {
dprintk(1,"read error\n");
return -EIO;
}
/* it seems that 0xFE indicates that a button is still hold
down, while 0xff indicates that no button is hold
down. 0xfe sequences are sometimes interrupted by 0xFF */
dprintk(2,"key %02x\n", b);
if (b == 0xff)
return 0;
if (b == 0xfe)
/* keep old data */
return 1;
*protocol = RC_TYPE_UNKNOWN;
*scancode = b;
*toggle = 0;
return 1;
}
static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_type *protocol,
u32 *scancode, u8 *toggle)
{
unsigned char subaddr, key, keygroup;
struct i2c_msg msg[] = { { .addr = ir->c->addr, .flags = 0,
.buf = &subaddr, .len = 1},
{ .addr = ir->c->addr, .flags = I2C_M_RD,
.buf = &key, .len = 1} };
subaddr = 0x0d;
if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
dprintk(1, "read error\n");
return -EIO;
}
if (key == 0xff)
return 0;
subaddr = 0x0b;
msg[1].buf = &keygroup;
if (2 != i2c_transfer(ir->c->adapter, msg, 2)) {
dprintk(1, "read error\n");
return -EIO;
}
if (keygroup == 0xff)
return 0;
dprintk(1, "read key 0x%02x/0x%02x\n", key, keygroup);
if (keygroup < 2 || keygroup > 4) {
/* Only a warning */
dprintk(1, "warning: invalid key group 0x%02x for key 0x%02x\n",
keygroup, key);
}
key |= (keygroup & 1) << 6;
*protocol = RC_TYPE_UNKNOWN;
*scancode = key;
if (ir->c->addr == 0x41) /* AVerMedia EM78P153 */
*scancode |= keygroup << 8;
*toggle = 0;
return 1;
}
/* ----------------------------------------------------------------------- */
static int ir_key_poll(struct IR_i2c *ir)
{
enum rc_type protocol;
u32 scancode;
u8 toggle;
int rc;
dprintk(3, "%s\n", __func__);
rc = ir->get_key(ir, &protocol, &scancode, &toggle);
if (rc < 0) {
dprintk(2,"error\n");
return rc;
}
if (rc) {
dprintk(1, "%s: proto = 0x%04x, scancode = 0x%08x\n",
__func__, protocol, scancode);
rc_keydown(ir->rc, protocol, scancode, toggle);
}
return 0;
}
static void ir_work(struct work_struct *work)
{
int rc;
struct IR_i2c *ir = container_of(work, struct IR_i2c, work.work);
rc = ir_key_poll(ir);
if (rc == -ENODEV) {
rc_unregister_device(ir->rc);
ir->rc = NULL;
return;
}
schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling_interval));
}
/* ----------------------------------------------------------------------- */
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
char *ir_codes = NULL;
const char *name = NULL;
u64 rc_type = RC_BIT_UNKNOWN;
struct IR_i2c *ir;
struct rc_dev *rc = NULL;
struct i2c_adapter *adap = client->adapter;
unsigned short addr = client->addr;
int err;
ir = devm_kzalloc(&client->dev, sizeof(*ir), GFP_KERNEL);
if (!ir)
return -ENOMEM;
ir->c = client;
ir->polling_interval = DEFAULT_POLLING_INTERVAL;
i2c_set_clientdata(client, ir);
switch(addr) {
case 0x64:
name = "Pixelview";
ir->get_key = get_key_pixelview;
rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x18:
case 0x1f:
case 0x1a:
name = "Hauppauge";
ir->get_key = get_key_haup;
rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_HAUPPAUGE;
break;
case 0x30:
name = "KNC One";
ir->get_key = get_key_knc1;
rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_EMPTY;
break;
case 0x6b:
name = "FusionHDTV";
ir->get_key = get_key_fusionhdtv;
rc_type = RC_BIT_UNKNOWN;
ir_codes = RC_MAP_FUSIONHDTV_MCE;
break;
case 0x40:
name = "AVerMedia Cardbus remote";
ir->get_key = get_key_avermedia_cardbus;
rc_type = RC_BIT_OTHER;
ir_codes = RC_MAP_AVERMEDIA_CARDBUS;
break;
case 0x41:
name = "AVerMedia EM78P153";
ir->get_key = get_key_avermedia_cardbus;
rc_type = RC_BIT_OTHER;
/* RM-KV remote, seems to be same as RM-K6 */
ir_codes = RC_MAP_AVERMEDIA_M733A_RM_K6;
break;
case 0x71:
name = "Hauppauge/Zilog Z8";
ir->get_key = get_key_haup_xvr;
rc_type = RC_BIT_RC5;
ir_codes = RC_MAP_HAUPPAUGE;
break;
}
/* Let the caller override settings */
if (client->dev.platform_data) {
const struct IR_i2c_init_data *init_data =
client->dev.platform_data;
ir_codes = init_data->ir_codes;
rc = init_data->rc_dev;
name = init_data->name;
if (init_data->type)
rc_type = init_data->type;
if (init_data->polling_interval)
ir->polling_interval = init_data->polling_interval;
switch (init_data->internal_get_key_func) {
case IR_KBD_GET_KEY_CUSTOM:
/* The bridge driver provided us its own function */
ir->get_key = init_data->get_key;
break;
case IR_KBD_GET_KEY_PIXELVIEW:
ir->get_key = get_key_pixelview;
break;
case IR_KBD_GET_KEY_HAUP:
ir->get_key = get_key_haup;
break;
case IR_KBD_GET_KEY_KNC1:
ir->get_key = get_key_knc1;
break;
case IR_KBD_GET_KEY_FUSIONHDTV:
ir->get_key = get_key_fusionhdtv;
break;
case IR_KBD_GET_KEY_HAUP_XVR:
ir->get_key = get_key_haup_xvr;
break;
case IR_KBD_GET_KEY_AVERMEDIA_CARDBUS:
ir->get_key = get_key_avermedia_cardbus;
break;
}
}
if (!rc) {
/*
* If platform_data doesn't specify rc_dev, initialize it
* internally
*/
rc = rc_allocate_device();
if (!rc)
return -ENOMEM;
}
ir->rc = rc;
/* Make sure we are all setup before going on */
if (!name || !ir->get_key || !rc_type || !ir_codes) {
dprintk(1, ": Unsupported device at address 0x%02x\n",
addr);
err = -ENODEV;
goto err_out_free;
}
/* Sets name */
snprintf(ir->name, sizeof(ir->name), "i2c IR (%s)", name);
ir->ir_codes = ir_codes;
snprintf(ir->phys, sizeof(ir->phys), "%s/%s/ir0",
dev_name(&adap->dev),
dev_name(&client->dev));
/*
* Initialize input_dev fields
* It doesn't make sense to allow overriding them via platform_data
*/
rc->input_id.bustype = BUS_I2C;
rc->input_phys = ir->phys;
rc->input_name = ir->name;
/*
* Initialize the other fields of rc_dev
*/
rc->map_name = ir->ir_codes;
rc->allowed_protocols = rc_type;
rc->enabled_protocols = rc_type;
if (!rc->driver_name)
rc->driver_name = MODULE_NAME;
err = rc_register_device(rc);
if (err)
goto err_out_free;
printk(MODULE_NAME ": %s detected at %s [%s]\n",
ir->name, ir->phys, adap->name);
/* start polling via eventd */
INIT_DELAYED_WORK(&ir->work, ir_work);
schedule_delayed_work(&ir->work, 0);
return 0;
err_out_free:
/* Only frees rc if it were allocated internally */
rc_free_device(rc);
return err;
}
static int ir_remove(struct i2c_client *client)
{
struct IR_i2c *ir = i2c_get_clientdata(client);
/* kill outstanding polls */
cancel_delayed_work_sync(&ir->work);
/* unregister device */
rc_unregister_device(ir->rc);
/* free memory */
return 0;
}
static const struct i2c_device_id ir_kbd_id[] = {
/* Generic entry for any IR receiver */
{ "ir_video", 0 },
/* IR device specific entries should be added here */
{ "ir_rx_z8f0811_haup", 0 },
{ "ir_rx_z8f0811_hdpvr", 0 },
{ }
};
static struct i2c_driver ir_kbd_driver = {
.driver = {
.name = "ir-kbd-i2c",
},
.probe = ir_probe,
.remove = ir_remove,
.id_table = ir_kbd_id,
};
module_i2c_driver(ir_kbd_driver);
/* ----------------------------------------------------------------------- */
MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, Ulrich Mueller");
MODULE_DESCRIPTION("input driver for i2c IR remote controls");
MODULE_LICENSE("GPL");
| gpl-2.0 |
kbc-developers/android_kernel_samsung_tuna | fs/eventpoll.c | 706 | 51371 | /*
* fs/eventpoll.c (Efficient event retrieval implementation)
* Copyright (C) 2001,...,2009 Davide Libenzi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Davide Libenzi <davidel@xmailserver.org>
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/rbtree.h>
#include <linux/wait.h>
#include <linux/eventpoll.h>
#include <linux/mount.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/anon_inodes.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/mman.h>
#include <asm/atomic.h>
/*
* LOCKING:
* There are three level of locking required by epoll :
*
* 1) epmutex (mutex)
* 2) ep->mtx (mutex)
* 3) ep->lock (spinlock)
*
* The acquire order is the one listed above, from 1 to 3.
* We need a spinlock (ep->lock) because we manipulate objects
* from inside the poll callback, that might be triggered from
* a wake_up() that in turn might be called from IRQ context.
* So we can't sleep inside the poll callback and hence we need
* a spinlock. During the event transfer loop (from kernel to
* user space) we could end up sleeping due a copy_to_user(), so
* we need a lock that will allow us to sleep. This lock is a
* mutex (ep->mtx). It is acquired during the event transfer loop,
* during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
* Then we also need a global mutex to serialize eventpoll_release_file()
* and ep_free().
* This mutex is acquired by ep_free() during the epoll file
* cleanup path and it is also acquired by eventpoll_release_file()
* if a file has been pushed inside an epoll set and it is then
* close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
* It is also acquired when inserting an epoll fd onto another epoll
* fd. We do this so that we walk the epoll tree and ensure that this
* insertion does not create a cycle of epoll file descriptors, which
* could lead to deadlock. We need a global mutex to prevent two
* simultaneous inserts (A into B and B into A) from racing and
* constructing a cycle without either insert observing that it is
* going to.
* It is necessary to acquire multiple "ep->mtx"es at once in the
* case when one epoll fd is added to another. In this case, we
* always acquire the locks in the order of nesting (i.e. after
* epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
* before e2->mtx). Since we disallow cycles of epoll file
* descriptors, this ensures that the mutexes are well-ordered. In
* order to communicate this nesting to lockdep, when walking a tree
* of epoll file descriptors, we use the current recursion depth as
* the lockdep subkey.
* It is possible to drop the "ep->mtx" and to use the global
* mutex "epmutex" (together with "ep->lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
* Events that require holding "epmutex" are very rare, while for
* normal operations the epoll private "ep->mtx" will guarantee
* a better scalability.
*/
/* Epoll private bits inside the event mask */
#define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET)
/* Maximum number of nesting allowed inside epoll sets */
#define EP_MAX_NESTS 4
#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
#define EP_UNACTIVE_PTR ((void *) -1L)
#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
struct epoll_filefd {
struct file *file;
int fd;
};
/*
* Structure used to track possible nested calls, for too deep recursions
* and loop cycles.
*/
struct nested_call_node {
struct list_head llink;
void *cookie;
void *ctx;
};
/*
* This structure is used as collector for nested calls, to check for
* maximum recursion dept and loop cycles.
*/
struct nested_calls {
struct list_head tasks_call_list;
spinlock_t lock;
};
/*
* Each file descriptor added to the eventpoll interface will
* have an entry of this type linked to the "rbr" RB tree.
*/
struct epitem {
/* RB tree node used to link this structure to the eventpoll RB tree */
struct rb_node rbn;
/* List header used to link this structure to the eventpoll ready list */
struct list_head rdllink;
/*
* Works together "struct eventpoll"->ovflist in keeping the
* single linked chain of items.
*/
struct epitem *next;
/* The file descriptor information this item refers to */
struct epoll_filefd ffd;
/* Number of active wait queue attached to poll operations */
int nwait;
/* List containing poll wait queues */
struct list_head pwqlist;
/* The "container" of this item */
struct eventpoll *ep;
/* List header used to link this item to the "struct file" items list */
struct list_head fllink;
/* The structure that describe the interested events and the source fd */
struct epoll_event event;
};
/*
* This structure is stored inside the "private_data" member of the file
* structure and represents the main data structure for the eventpoll
* interface.
*/
struct eventpoll {
/* Protect the access to this structure */
spinlock_t lock;
/*
* This mutex is used to ensure that files are not removed
* while epoll is using them. This is held during the event
* collection loop, the file cleanup path, the epoll file exit
* code and the ctl operations.
*/
struct mutex mtx;
/* Wait queue used by sys_epoll_wait() */
wait_queue_head_t wq;
/* Wait queue used by file->poll() */
wait_queue_head_t poll_wait;
/* List of ready file descriptors */
struct list_head rdllist;
/* RB tree root used to store monitored fd structs */
struct rb_root rbr;
/*
* This is a single linked list that chains all the "struct epitem" that
* happened while transferring ready events to userspace w/out
* holding ->lock.
*/
struct epitem *ovflist;
/* The user that created the eventpoll descriptor */
struct user_struct *user;
struct file *file;
/* used to optimize loop detection check */
int visited;
struct list_head visited_list_link;
};
/* Wait structure used by the poll hooks */
struct eppoll_entry {
/* List header used to link this structure to the "struct epitem" */
struct list_head llink;
/* The "base" pointer is set to the container "struct epitem" */
struct epitem *base;
/*
* Wait queue item that will be linked to the target file wait
* queue head.
*/
wait_queue_t wait;
/* The wait queue head that linked the "wait" wait queue item */
wait_queue_head_t *whead;
};
/* Wrapper struct used by poll queueing */
struct ep_pqueue {
poll_table pt;
struct epitem *epi;
};
/* Used by the ep_send_events() function as callback private data */
struct ep_send_events_data {
int maxevents;
struct epoll_event __user *events;
};
/*
* Configuration options available inside /proc/sys/fs/epoll/
*/
/* Maximum number of epoll watched descriptors, per user */
static long max_user_watches __read_mostly;
/*
* This mutex is used to serialize ep_free() and eventpoll_release_file().
*/
static DEFINE_MUTEX(epmutex);
/* Used to check for epoll file descriptor inclusion loops */
static struct nested_calls poll_loop_ncalls;
/* Used for safe wake up implementation */
static struct nested_calls poll_safewake_ncalls;
/* Used to call file's f_op->poll() under the nested calls boundaries */
static struct nested_calls poll_readywalk_ncalls;
/* Slab cache used to allocate "struct epitem" */
static struct kmem_cache *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */
static struct kmem_cache *pwq_cache __read_mostly;
/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
static LIST_HEAD(visited_list);
/*
* List of files with newly added links, where we may need to limit the number
* of emanating paths. Protected by the epmutex.
*/
static LIST_HEAD(tfile_check_list);
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
static long zero;
static long long_max = LONG_MAX;
ctl_table epoll_table[] = {
{
.procname = "max_user_watches",
.data = &max_user_watches,
.maxlen = sizeof(max_user_watches),
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
.extra1 = &zero,
.extra2 = &long_max,
},
{ }
};
#endif /* CONFIG_SYSCTL */
static const struct file_operations eventpoll_fops;
static inline int is_file_epoll(struct file *f)
{
return f->f_op == &eventpoll_fops;
}
/* Setup the structure that is used as key for the RB tree */
static inline void ep_set_ffd(struct epoll_filefd *ffd,
struct file *file, int fd)
{
ffd->file = file;
ffd->fd = fd;
}
/* Compare RB tree keys */
static inline int ep_cmp_ffd(struct epoll_filefd *p1,
struct epoll_filefd *p2)
{
return (p1->file > p2->file ? +1:
(p1->file < p2->file ? -1 : p1->fd - p2->fd));
}
/* Tells us if the item is currently linked */
static inline int ep_is_linked(struct list_head *p)
{
return !list_empty(p);
}
static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
{
return container_of(p, struct eppoll_entry, wait);
}
/* Get the "struct epitem" from a wait queue pointer */
static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
{
return container_of(p, struct eppoll_entry, wait)->base;
}
/* Get the "struct epitem" from an epoll queue wrapper */
static inline struct epitem *ep_item_from_epqueue(poll_table *p)
{
return container_of(p, struct ep_pqueue, pt)->epi;
}
/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
static inline int ep_op_has_event(int op)
{
return op != EPOLL_CTL_DEL;
}
/* Initialize the poll safe wake up structure */
static void ep_nested_calls_init(struct nested_calls *ncalls)
{
INIT_LIST_HEAD(&ncalls->tasks_call_list);
spin_lock_init(&ncalls->lock);
}
/**
* ep_events_available - Checks if ready events might be available.
*
* @ep: Pointer to the eventpoll context.
*
* Returns: Returns a value different than zero if ready events are available,
* or zero otherwise.
*/
static inline int ep_events_available(struct eventpoll *ep)
{
return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
}
/**
* ep_call_nested - Perform a bound (possibly) nested call, by checking
* that the recursion limit is not exceeded, and that
* the same nested call (by the meaning of same cookie) is
* no re-entered.
*
* @ncalls: Pointer to the nested_calls structure to be used for this call.
* @max_nests: Maximum number of allowed nesting calls.
* @nproc: Nested call core function pointer.
* @priv: Opaque data to be passed to the @nproc callback.
* @cookie: Cookie to be used to identify this nested call.
* @ctx: This instance context.
*
* Returns: Returns the code returned by the @nproc callback, or -1 if
* the maximum recursion limit has been exceeded.
*/
static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
int (*nproc)(void *, void *, int), void *priv,
void *cookie, void *ctx)
{
int error, call_nests = 0;
unsigned long flags;
struct list_head *lsthead = &ncalls->tasks_call_list;
struct nested_call_node *tncur;
struct nested_call_node tnode;
spin_lock_irqsave(&ncalls->lock, flags);
/*
* Try to see if the current task is already inside this wakeup call.
* We use a list here, since the population inside this set is always
* very much limited.
*/
list_for_each_entry(tncur, lsthead, llink) {
if (tncur->ctx == ctx &&
(tncur->cookie == cookie || ++call_nests > max_nests)) {
/*
* Ops ... loop detected or maximum nest level reached.
* We abort this wake by breaking the cycle itself.
*/
error = -1;
goto out_unlock;
}
}
/* Add the current task and cookie to the list */
tnode.ctx = ctx;
tnode.cookie = cookie;
list_add(&tnode.llink, lsthead);
spin_unlock_irqrestore(&ncalls->lock, flags);
/* Call the nested function */
error = (*nproc)(priv, cookie, call_nests);
/* Remove the current task from the list */
spin_lock_irqsave(&ncalls->lock, flags);
list_del(&tnode.llink);
out_unlock:
spin_unlock_irqrestore(&ncalls->lock, flags);
return error;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
unsigned long events, int subclass)
{
unsigned long flags;
spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
wake_up_locked_poll(wqueue, events);
spin_unlock_irqrestore(&wqueue->lock, flags);
}
#else
static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
unsigned long events, int subclass)
{
wake_up_poll(wqueue, events);
}
#endif
static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
{
ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
1 + call_nests);
return 0;
}
/*
* Perform a safe wake up of the poll wait list. The problem is that
* with the new callback'd wake up system, it is possible that the
* poll callback is reentered from inside the call to wake_up() done
* on the poll wait queue head. The rule is that we cannot reenter the
* wake up code from the same task more than EP_MAX_NESTS times,
* and we cannot reenter the same wait queue head at all. This will
* enable to have a hierarchy of epoll file descriptor of no more than
* EP_MAX_NESTS deep.
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
int this_cpu = get_cpu();
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
put_cpu();
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
{
wait_queue_head_t *whead;
rcu_read_lock();
/* If it is cleared by POLLFREE, it should be rcu-safe */
whead = rcu_dereference(pwq->whead);
if (whead)
remove_wait_queue(whead, &pwq->wait);
rcu_read_unlock();
}
/*
* This function unregisters poll callbacks from the associated file
* descriptor. Must be called with "mtx" held (or "epmutex" if called from
* ep_free).
*/
static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
{
struct list_head *lsthead = &epi->pwqlist;
struct eppoll_entry *pwq;
while (!list_empty(lsthead)) {
pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
list_del(&pwq->llink);
ep_remove_wait_queue(pwq);
kmem_cache_free(pwq_cache, pwq);
}
}
/**
* ep_scan_ready_list - Scans the ready list in a way that makes possible for
* the scan code, to call f_op->poll(). Also allows for
* O(NumReady) performance.
*
* @ep: Pointer to the epoll private data structure.
* @sproc: Pointer to the scan callback.
* @priv: Private opaque data passed to the @sproc callback.
* @depth: The current depth of recursive f_op->poll calls.
*
* Returns: The same integer error code returned by the @sproc callback.
*/
static int ep_scan_ready_list(struct eventpoll *ep,
int (*sproc)(struct eventpoll *,
struct list_head *, void *),
void *priv,
int depth)
{
int error, pwake = 0;
unsigned long flags;
struct epitem *epi, *nepi;
LIST_HEAD(txlist);
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() and epoll_ctl().
*/
mutex_lock_nested(&ep->mtx, depth);
/*
* Steal the ready list, and re-init the original one to the
* empty list. Also, set ep->ovflist to NULL so that events
* happening while looping w/out locks, are not lost. We cannot
* have the poll callback to queue directly on ep->rdllist,
* because we want the "sproc" callback to be able to do it
* in a lockless way.
*/
spin_lock_irqsave(&ep->lock, flags);
list_splice_init(&ep->rdllist, &txlist);
ep->ovflist = NULL;
spin_unlock_irqrestore(&ep->lock, flags);
/*
* Now call the callback function.
*/
error = (*sproc)(ep, &txlist, priv);
spin_lock_irqsave(&ep->lock, flags);
/*
* During the time we spent inside the "sproc" callback, some
* other events might have been queued by the poll callback.
* We re-insert them inside the main ready-list here.
*/
for (nepi = ep->ovflist; (epi = nepi) != NULL;
nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
/*
* We need to check if the item is already in the list.
* During the "sproc" callback execution time, items are
* queued into ->ovflist but the "txlist" might already
* contain them, and the list_splice() below takes care of them.
*/
if (!ep_is_linked(&epi->rdllink))
list_add_tail(&epi->rdllink, &ep->rdllist);
}
/*
* We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
* releasing the lock, events will be queued in the normal way inside
* ep->rdllist.
*/
ep->ovflist = EP_UNACTIVE_PTR;
/*
* Quickly re-inject items left on "txlist".
*/
list_splice(&txlist, &ep->rdllist);
if (!list_empty(&ep->rdllist)) {
/*
* Wake up (if active) both the eventpoll wait list and
* the ->poll() wait list (delayed after we release the lock).
*/
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irqrestore(&ep->lock, flags);
mutex_unlock(&ep->mtx);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return error;
}
/*
* Removes a "struct epitem" from the eventpoll RB tree and deallocates
* all the associated resources. Must be called with "mtx" held.
*/
static int ep_remove(struct eventpoll *ep, struct epitem *epi)
{
unsigned long flags;
struct file *file = epi->ffd.file;
/*
* Removes poll wait queue hooks. We _have_ to do this without holding
* the "ep->lock" otherwise a deadlock might occur. This because of the
* sequence of the lock acquisition. Here we do "ep->lock" then the wait
* queue head lock when unregistering the wait queue. The wakeup callback
* will run by holding the wait queue head lock and will call our callback
* that will try to get "ep->lock".
*/
ep_unregister_pollwait(ep, epi);
/* Remove the current item from the list of epoll hooks */
spin_lock(&file->f_lock);
if (ep_is_linked(&epi->fllink))
list_del_init(&epi->fllink);
spin_unlock(&file->f_lock);
rb_erase(&epi->rbn, &ep->rbr);
spin_lock_irqsave(&ep->lock, flags);
if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink);
spin_unlock_irqrestore(&ep->lock, flags);
/* At this point it is safe to free the eventpoll item */
kmem_cache_free(epi_cache, epi);
atomic_long_dec(&ep->user->epoll_watches);
return 0;
}
static void ep_free(struct eventpoll *ep)
{
struct rb_node *rbp;
struct epitem *epi;
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
ep_poll_safewake(&ep->poll_wait);
/*
* We need to lock this because we could be hit by
* eventpoll_release_file() while we're freeing the "struct eventpoll".
* We do not need to hold "ep->mtx" here because the epoll file
* is on the way to be removed and no one has references to it
* anymore. The only hit might come from eventpoll_release_file() but
* holding "epmutex" is sufficient here.
*/
mutex_lock(&epmutex);
/*
* Walks through the whole tree by unregistering poll callbacks.
*/
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_unregister_pollwait(ep, epi);
}
/*
* Walks through the whole tree by freeing each "struct epitem". At this
* point we are sure no poll callbacks will be lingering around, and also by
* holding "epmutex" we can be sure that no file cleanup code will hit
* us during this operation. So we can avoid the lock on "ep->lock".
*/
while ((rbp = rb_first(&ep->rbr)) != NULL) {
epi = rb_entry(rbp, struct epitem, rbn);
ep_remove(ep, epi);
}
mutex_unlock(&epmutex);
mutex_destroy(&ep->mtx);
free_uid(ep->user);
kfree(ep);
}
static int ep_eventpoll_release(struct inode *inode, struct file *file)
{
struct eventpoll *ep = file->private_data;
if (ep)
ep_free(ep);
return 0;
}
static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{
struct epitem *epi, *tmp;
list_for_each_entry_safe(epi, tmp, head, rdllink) {
if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
epi->event.events)
return POLLIN | POLLRDNORM;
else {
/*
* Item has been dropped into the ready list by the poll
* callback, but it's not actually ready, as far as
* caller requested events goes. We can remove it here.
*/
list_del_init(&epi->rdllink);
}
}
return 0;
}
static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
{
return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
}
static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
{
int pollflags;
struct eventpoll *ep = file->private_data;
/* Insert inside our poll wait queue */
poll_wait(file, &ep->poll_wait, wait);
/*
* Proceed to find out if wanted events are really available inside
* the ready list. This need to be done under ep_call_nested()
* supervision, since the call to f_op->poll() done on listed files
* could re-enter here.
*/
pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
ep_poll_readyevents_proc, ep, ep, current);
return pollflags != -1 ? pollflags : 0;
}
/* File callbacks that implement the eventpoll file behaviour */
static const struct file_operations eventpoll_fops = {
.release = ep_eventpoll_release,
.poll = ep_eventpoll_poll,
.llseek = noop_llseek,
};
/*
* This is called from eventpoll_release() to unlink files from the eventpoll
* interface. We need to have this facility to cleanup correctly files that are
* closed without being removed from the eventpoll interface.
*/
void eventpoll_release_file(struct file *file)
{
struct list_head *lsthead = &file->f_ep_links;
struct eventpoll *ep;
struct epitem *epi;
/*
* We don't want to get "file->f_lock" because it is not
* necessary. It is not necessary because we're in the "struct file"
* cleanup path, and this means that no one is using this file anymore.
* So, for example, epoll_ctl() cannot hit here since if we reach this
* point, the file counter already went to zero and fget() would fail.
* The only hit might come from ep_free() but by holding the mutex
* will correctly serialize the operation. We do need to acquire
* "ep->mtx" after "epmutex" because ep_remove() requires it when called
* from anywhere but ep_free().
*
* Besides, ep_remove() acquires the lock, so we can't hold it here.
*/
mutex_lock(&epmutex);
while (!list_empty(lsthead)) {
epi = list_first_entry(lsthead, struct epitem, fllink);
ep = epi->ep;
list_del_init(&epi->fllink);
mutex_lock_nested(&ep->mtx, 0);
ep_remove(ep, epi);
mutex_unlock(&ep->mtx);
}
mutex_unlock(&epmutex);
}
static int ep_alloc(struct eventpoll **pep)
{
int error;
struct user_struct *user;
struct eventpoll *ep;
user = get_current_user();
error = -ENOMEM;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
goto free_uid;
spin_lock_init(&ep->lock);
mutex_init(&ep->mtx);
init_waitqueue_head(&ep->wq);
init_waitqueue_head(&ep->poll_wait);
INIT_LIST_HEAD(&ep->rdllist);
ep->rbr = RB_ROOT;
ep->ovflist = EP_UNACTIVE_PTR;
ep->user = user;
*pep = ep;
return 0;
free_uid:
free_uid(user);
return error;
}
/*
* Search the file inside the eventpoll tree. The RB tree operations
* are protected by the "mtx" mutex, and ep_find() must be called with
* "mtx" held.
*/
static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
{
int kcmp;
struct rb_node *rbp;
struct epitem *epi, *epir = NULL;
struct epoll_filefd ffd;
ep_set_ffd(&ffd, file, fd);
for (rbp = ep->rbr.rb_node; rbp; ) {
epi = rb_entry(rbp, struct epitem, rbn);
kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
if (kcmp > 0)
rbp = rbp->rb_right;
else if (kcmp < 0)
rbp = rbp->rb_left;
else {
epir = epi;
break;
}
}
return epir;
}
/*
* This is the callback that is passed to the wait queue wakeup
* mechanism. It is called by the stored file descriptors when they
* have events to report.
*/
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
int pwake = 0;
unsigned long flags;
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
if ((unsigned long)key & POLLFREE) {
ep_pwq_from_wait(wait)->whead = NULL;
/*
* whead = NULL above can race with ep_remove_wait_queue()
* which can do another remove_wait_queue() after us, so we
* can't use __remove_wait_queue(). whead->lock is held by
* the caller.
*/
list_del_init(&wait->task_list);
}
spin_lock_irqsave(&ep->lock, flags);
/*
* If the event mask does not contain any poll(2) event, we consider the
* descriptor to be disabled. This condition is likely the effect of the
* EPOLLONESHOT bit that disables the descriptor when an event is received,
* until the next EPOLL_CTL_MOD will be issued.
*/
if (!(epi->event.events & ~EP_PRIVATE_BITS))
goto out_unlock;
/*
* Check the events coming with the callback. At this stage, not
* every device reports the events in the "key" parameter of the
* callback. We need to be able to handle both cases here, hence the
* test for "key" != NULL before the event match test.
*/
if (key && !((unsigned long) key & epi->event.events))
goto out_unlock;
/*
* If we are transferring events to userspace, we can hold no locks
* (because we're accessing user memory, and because of linux f_op->poll()
* semantics). All the events that happen during that period of time are
* chained in ep->ovflist and requeued later on.
*/
if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
if (epi->next == EP_UNACTIVE_PTR) {
epi->next = ep->ovflist;
ep->ovflist = epi;
}
goto out_unlock;
}
/* If this file is already in the ready list we exit soon */
if (!ep_is_linked(&epi->rdllink))
list_add_tail(&epi->rdllink, &ep->rdllist);
/*
* Wake up ( if active ) both the eventpoll wait list and the ->poll()
* wait list.
*/
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
out_unlock:
spin_unlock_irqrestore(&ep->lock, flags);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return 1;
}
/*
* This is the callback that is used to add our wait queue to the
* target file wakeup lists.
*/
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
poll_table *pt)
{
struct epitem *epi = ep_item_from_epqueue(pt);
struct eppoll_entry *pwq;
if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
pwq->whead = whead;
pwq->base = epi;
add_wait_queue(whead, &pwq->wait);
list_add_tail(&pwq->llink, &epi->pwqlist);
epi->nwait++;
} else {
/* We have to signal that an error occurred */
epi->nwait = -1;
}
}
static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
{
int kcmp;
struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
struct epitem *epic;
while (*p) {
parent = *p;
epic = rb_entry(parent, struct epitem, rbn);
kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
if (kcmp > 0)
p = &parent->rb_right;
else
p = &parent->rb_left;
}
rb_link_node(&epi->rbn, parent, p);
rb_insert_color(&epi->rbn, &ep->rbr);
}
#define PATH_ARR_SIZE 5
/*
* These are the number paths of length 1 to 5, that we are allowing to emanate
* from a single file of interest. For example, we allow 1000 paths of length
* 1, to emanate from each file of interest. This essentially represents the
* potential wakeup paths, which need to be limited in order to avoid massive
* uncontrolled wakeup storms. The common use case should be a single ep which
* is connected to n file sources. In this case each file source has 1 path
* of length 1. Thus, the numbers below should be more than sufficient. These
* path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
* and delete can't add additional paths. Protected by the epmutex.
*/
static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
static int path_count[PATH_ARR_SIZE];
static int path_count_inc(int nests)
{
/* Allow an arbitrary number of depth 1 paths */
if (nests == 0)
return 0;
if (++path_count[nests] > path_limits[nests])
return -1;
return 0;
}
static void path_count_init(void)
{
int i;
for (i = 0; i < PATH_ARR_SIZE; i++)
path_count[i] = 0;
}
static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
{
int error = 0;
struct file *file = priv;
struct file *child_file;
struct epitem *epi;
list_for_each_entry(epi, &file->f_ep_links, fllink) {
child_file = epi->ep->file;
if (is_file_epoll(child_file)) {
if (list_empty(&child_file->f_ep_links)) {
if (path_count_inc(call_nests)) {
error = -1;
break;
}
} else {
error = ep_call_nested(&poll_loop_ncalls,
EP_MAX_NESTS,
reverse_path_check_proc,
child_file, child_file,
current);
}
if (error != 0)
break;
} else {
printk(KERN_ERR "reverse_path_check_proc: "
"file is not an ep!\n");
}
}
return error;
}
/**
* reverse_path_check - The tfile_check_list is list of file *, which have
* links that are proposed to be newly added. We need to
* make sure that those added links don't add too many
* paths such that we will spend all our time waking up
* eventpoll objects.
*
* Returns: Returns zero if the proposed links don't create too many paths,
* -1 otherwise.
*/
static int reverse_path_check(void)
{
int length = 0;
int error = 0;
struct file *current_file;
/* let's call this for all tfiles */
list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
length++;
path_count_init();
error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
reverse_path_check_proc, current_file,
current_file, current);
if (error)
break;
}
return error;
}
/*
* Must be called with "mtx" held.
*/
static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
struct file *tfile, int fd)
{
int error, revents, pwake = 0;
unsigned long flags;
long user_watches;
struct epitem *epi;
struct ep_pqueue epq;
user_watches = atomic_long_read(&ep->user->epoll_watches);
if (unlikely(user_watches >= max_user_watches))
return -ENOSPC;
if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
return -ENOMEM;
/* Item initialization follow here ... */
INIT_LIST_HEAD(&epi->rdllink);
INIT_LIST_HEAD(&epi->fllink);
INIT_LIST_HEAD(&epi->pwqlist);
epi->ep = ep;
ep_set_ffd(&epi->ffd, tfile, fd);
epi->event = *event;
epi->nwait = 0;
epi->next = EP_UNACTIVE_PTR;
/* Initialize the poll table using the queue callback */
epq.epi = epi;
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
/*
* Attach the item to the poll hooks and get current event bits.
* We can safely use the file* here because its usage count has
* been increased by the caller of this function. Note that after
* this operation completes, the poll callback can start hitting
* the new item.
*/
revents = tfile->f_op->poll(tfile, &epq.pt);
/*
* We have to check if something went wrong during the poll wait queue
* install process. Namely an allocation for a wait queue failed due
* high memory pressure.
*/
error = -ENOMEM;
if (epi->nwait < 0)
goto error_unregister;
/* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock);
list_add_tail(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */
error = -EINVAL;
if (reverse_path_check())
goto error_remove_epi;
/* We have to drop the new item inside our item list to keep track of it */
spin_lock_irqsave(&ep->lock, flags);
/* If the file is already "ready" we drop it inside the ready list */
if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irqrestore(&ep->lock, flags);
atomic_long_inc(&ep->user->epoll_watches);
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return 0;
error_remove_epi:
spin_lock(&tfile->f_lock);
if (ep_is_linked(&epi->fllink))
list_del_init(&epi->fllink);
spin_unlock(&tfile->f_lock);
rb_erase(&epi->rbn, &ep->rbr);
error_unregister:
ep_unregister_pollwait(ep, epi);
/*
* We need to do this because an event could have been arrived on some
* allocated wait queue. Note that we don't care about the ep->ovflist
* list, since that is used/cleaned only inside a section bound by "mtx".
* And ep_insert() is called with "mtx" held.
*/
spin_lock_irqsave(&ep->lock, flags);
if (ep_is_linked(&epi->rdllink))
list_del_init(&epi->rdllink);
spin_unlock_irqrestore(&ep->lock, flags);
kmem_cache_free(epi_cache, epi);
return error;
}
/*
* Modify the interest event mask by dropping an event if the new mask
* has a match in the current file status. Must be called with "mtx" held.
*/
static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
{
int pwake = 0;
unsigned int revents;
/*
* Set the new event interest mask before calling f_op->poll();
* otherwise we might miss an event that happens between the
* f_op->poll() call and the new event set registering.
*/
epi->event.events = event->events;
epi->event.data = event->data; /* protected by mtx */
/*
* Get current event bits. We can safely use the file* here because
* its usage count has been increased by the caller of this function.
*/
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL);
/*
* If the item is "hot" and it is not registered inside the ready
* list, push it inside.
*/
if (revents & event->events) {
spin_lock_irq(&ep->lock);
if (!ep_is_linked(&epi->rdllink)) {
list_add_tail(&epi->rdllink, &ep->rdllist);
/* Notify waiting tasks that events are available */
if (waitqueue_active(&ep->wq))
wake_up_locked(&ep->wq);
if (waitqueue_active(&ep->poll_wait))
pwake++;
}
spin_unlock_irq(&ep->lock);
}
/* We have to call this outside the lock */
if (pwake)
ep_poll_safewake(&ep->poll_wait);
return 0;
}
static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
void *priv)
{
struct ep_send_events_data *esed = priv;
int eventcnt;
unsigned int revents;
struct epitem *epi;
struct epoll_event __user *uevent;
/*
* We can loop without lock because we are passed a task private list.
* Items cannot vanish during the loop because ep_scan_ready_list() is
* holding "mtx" during this call.
*/
for (eventcnt = 0, uevent = esed->events;
!list_empty(head) && eventcnt < esed->maxevents;) {
epi = list_first_entry(head, struct epitem, rdllink);
list_del_init(&epi->rdllink);
revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) &
epi->event.events;
/*
* If the event mask intersect the caller-requested one,
* deliver the event to userspace. Again, ep_scan_ready_list()
* is holding "mtx", so no operations coming from userspace
* can change the item.
*/
if (revents) {
if (__put_user(revents, &uevent->events) ||
__put_user(epi->event.data, &uevent->data)) {
list_add(&epi->rdllink, head);
return eventcnt ? eventcnt : -EFAULT;
}
eventcnt++;
uevent++;
if (epi->event.events & EPOLLONESHOT)
epi->event.events &= EP_PRIVATE_BITS;
else if (!(epi->event.events & EPOLLET)) {
/*
* If this file has been added with Level
* Trigger mode, we need to insert back inside
* the ready list, so that the next call to
* epoll_wait() will check again the events
* availability. At this point, no one can insert
* into ep->rdllist besides us. The epoll_ctl()
* callers are locked out by
* ep_scan_ready_list() holding "mtx" and the
* poll callback will queue them in ep->ovflist.
*/
list_add_tail(&epi->rdllink, &ep->rdllist);
}
}
}
return eventcnt;
}
static int ep_send_events(struct eventpoll *ep,
struct epoll_event __user *events, int maxevents)
{
struct ep_send_events_data esed;
esed.maxevents = maxevents;
esed.events = events;
return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
}
static inline struct timespec ep_set_mstimeout(long ms)
{
struct timespec now, ts = {
.tv_sec = ms / MSEC_PER_SEC,
.tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
};
ktime_get_ts(&now);
return timespec_add_safe(now, ts);
}
/**
* ep_poll - Retrieves ready events, and delivers them to the caller supplied
* event buffer.
*
* @ep: Pointer to the eventpoll context.
* @events: Pointer to the userspace buffer where the ready events should be
* stored.
* @maxevents: Size (in terms of number of events) of the caller event buffer.
* @timeout: Maximum timeout for the ready events fetch operation, in
* milliseconds. If the @timeout is zero, the function will not block,
* while if the @timeout is less than zero, the function will block
* until at least one event has been retrieved (or an error
* occurred).
*
* Returns: Returns the number of ready events which have been fetched, or an
* error code, in case of error.
*/
static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
int maxevents, long timeout)
{
int res = 0, eavail, timed_out = 0;
unsigned long flags;
long slack = 0;
wait_queue_t wait;
ktime_t expires, *to = NULL;
if (timeout > 0) {
struct timespec end_time = ep_set_mstimeout(timeout);
slack = select_estimate_accuracy(&end_time);
to = &expires;
*to = timespec_to_ktime(end_time);
} else if (timeout == 0) {
/*
* Avoid the unnecessary trip to the wait queue loop, if the
* caller specified a non blocking operation.
*/
timed_out = 1;
spin_lock_irqsave(&ep->lock, flags);
goto check_events;
}
fetch_events:
spin_lock_irqsave(&ep->lock, flags);
if (!ep_events_available(ep)) {
/*
* We don't have any available event to return to the caller.
* We need to sleep here, and we will be wake up by
* ep_poll_callback() when events will become available.
*/
init_waitqueue_entry(&wait, current);
__add_wait_queue_exclusive(&ep->wq, &wait);
for (;;) {
/*
* We don't want to sleep if the ep_poll_callback() sends us
* a wakeup in between. That's why we set the task state
* to TASK_INTERRUPTIBLE before doing the checks.
*/
set_current_state(TASK_INTERRUPTIBLE);
if (ep_events_available(ep) || timed_out)
break;
if (signal_pending(current)) {
res = -EINTR;
break;
}
spin_unlock_irqrestore(&ep->lock, flags);
if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
timed_out = 1;
spin_lock_irqsave(&ep->lock, flags);
}
__remove_wait_queue(&ep->wq, &wait);
set_current_state(TASK_RUNNING);
}
check_events:
/* Is it worth to try to dig for events ? */
eavail = ep_events_available(ep);
spin_unlock_irqrestore(&ep->lock, flags);
/*
* Try to transfer events to user space. In case we get 0 events and
* there's still timeout left over, we go trying again in search of
* more luck.
*/
if (!res && eavail &&
!(res = ep_send_events(ep, events, maxevents)) && !timed_out)
goto fetch_events;
return res;
}
/**
* ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
* API, to verify that adding an epoll file inside another
* epoll structure, does not violate the constraints, in
* terms of closed loops, or too deep chains (which can
* result in excessive stack usage).
*
* @priv: Pointer to the epoll file to be currently checked.
* @cookie: Original cookie for this call. This is the top-of-the-chain epoll
* data structure pointer.
* @call_nests: Current dept of the @ep_call_nested() call stack.
*
* Returns: Returns zero if adding the epoll @file inside current epoll
* structure @ep does not violate the constraints, or -1 otherwise.
*/
static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
{
int error = 0;
struct file *file = priv;
struct eventpoll *ep = file->private_data;
struct eventpoll *ep_tovisit;
struct rb_node *rbp;
struct epitem *epi;
mutex_lock_nested(&ep->mtx, call_nests + 1);
ep->visited = 1;
list_add(&ep->visited_list_link, &visited_list);
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
if (unlikely(is_file_epoll(epi->ffd.file))) {
ep_tovisit = epi->ffd.file->private_data;
if (ep_tovisit->visited)
continue;
error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
ep_loop_check_proc, epi->ffd.file,
ep_tovisit, current);
if (error != 0)
break;
} else {
/*
* If we've reached a file that is not associated with
* an ep, then we need to check if the newly added
* links are going to add too many wakeup paths. We do
* this by adding it to the tfile_check_list, if it's
* not already there, and calling reverse_path_check()
* during ep_insert().
*/
if (list_empty(&epi->ffd.file->f_tfile_llink))
list_add(&epi->ffd.file->f_tfile_llink,
&tfile_check_list);
}
}
mutex_unlock(&ep->mtx);
return error;
}
/**
* ep_loop_check - Performs a check to verify that adding an epoll file (@file)
* another epoll file (represented by @ep) does not create
* closed loops or too deep chains.
*
* @ep: Pointer to the epoll private data structure.
* @file: Pointer to the epoll file to be checked.
*
* Returns: Returns zero if adding the epoll @file inside current epoll
* structure @ep does not violate the constraints, or -1 otherwise.
*/
static int ep_loop_check(struct eventpoll *ep, struct file *file)
{
int ret;
struct eventpoll *ep_cur, *ep_next;
ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
ep_loop_check_proc, file, ep, current);
/* clear visited list */
list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
visited_list_link) {
ep_cur->visited = 0;
list_del(&ep_cur->visited_list_link);
}
return ret;
}
static void clear_tfile_check_list(void)
{
struct file *file;
/* first clear the tfile_check_list */
while (!list_empty(&tfile_check_list)) {
file = list_first_entry(&tfile_check_list, struct file,
f_tfile_llink);
list_del_init(&file->f_tfile_llink);
}
INIT_LIST_HEAD(&tfile_check_list);
}
/*
* Open an eventpoll file descriptor.
*/
SYSCALL_DEFINE1(epoll_create1, int, flags)
{
int error, fd;
struct eventpoll *ep = NULL;
struct file *file;
/* Check the EPOLL_* constant for consistency. */
BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
if (flags & ~EPOLL_CLOEXEC)
return -EINVAL;
/*
* Create the internal data structure ("struct eventpoll").
*/
error = ep_alloc(&ep);
if (error < 0)
return error;
/*
* Creates all the items needed to setup an eventpoll file. That is,
* a file structure and a free file descriptor.
*/
fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
if (fd < 0) {
error = fd;
goto out_free_ep;
}
file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
O_RDWR | (flags & O_CLOEXEC));
if (IS_ERR(file)) {
error = PTR_ERR(file);
goto out_free_fd;
}
fd_install(fd, file);
ep->file = file;
return fd;
out_free_fd:
put_unused_fd(fd);
out_free_ep:
ep_free(ep);
return error;
}
SYSCALL_DEFINE1(epoll_create, int, size)
{
if (size <= 0)
return -EINVAL;
return sys_epoll_create1(0);
}
/*
* The following function implements the controller interface for
* the eventpoll file that enables the insertion/removal/change of
* file descriptors inside the interest set.
*/
SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
struct epoll_event __user *, event)
{
int error;
int did_lock_epmutex = 0;
struct file *file, *tfile;
struct eventpoll *ep;
struct epitem *epi;
struct epoll_event epds;
error = -EFAULT;
if (ep_op_has_event(op) &&
copy_from_user(&epds, event, sizeof(struct epoll_event)))
goto error_return;
/* Get the "struct file *" for the eventpoll file */
error = -EBADF;
file = fget(epfd);
if (!file)
goto error_return;
/* Get the "struct file *" for the target file */
tfile = fget(fd);
if (!tfile)
goto error_fput;
/* The target file descriptor must support poll */
error = -EPERM;
if (!tfile->f_op || !tfile->f_op->poll)
goto error_tgt_fput;
/*
* We have to check that the file structure underneath the file descriptor
* the user passed to us _is_ an eventpoll file. And also we do not permit
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
if (file == tfile || !is_file_epoll(file))
goto error_tgt_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
ep = file->private_data;
/*
* When we insert an epoll file descriptor, inside another epoll file
* descriptor, there is the change of creating closed loops, which are
* better be handled here, than in more critical paths. While we are
* checking for loops we also determine the list of files reachable
* and hang them on the tfile_check_list, so we can check that we
* haven't created too many possible wakeup paths.
*
* We need to hold the epmutex across both ep_insert and ep_remove
* b/c we want to make sure we are looking at a coherent view of
* epoll network.
*/
if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
mutex_lock(&epmutex);
did_lock_epmutex = 1;
}
if (op == EPOLL_CTL_ADD) {
if (is_file_epoll(tfile)) {
error = -ELOOP;
if (ep_loop_check(ep, tfile) != 0)
goto error_tgt_fput;
} else
list_add(&tfile->f_tfile_llink, &tfile_check_list);
}
mutex_lock_nested(&ep->mtx, 0);
/*
* Try to lookup the file inside our RB tree, Since we grabbed "mtx"
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
epi = ep_find(ep, tfile, fd);
error = -EINVAL;
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
epds.events |= POLLERR | POLLHUP;
error = ep_insert(ep, &epds, tfile, fd);
} else
error = -EEXIST;
clear_tfile_check_list();
break;
case EPOLL_CTL_DEL:
if (epi)
error = ep_remove(ep, epi);
else
error = -ENOENT;
break;
case EPOLL_CTL_MOD:
if (epi) {
epds.events |= POLLERR | POLLHUP;
error = ep_modify(ep, epi, &epds);
} else
error = -ENOENT;
break;
}
mutex_unlock(&ep->mtx);
error_tgt_fput:
if (did_lock_epmutex)
mutex_unlock(&epmutex);
fput(tfile);
error_fput:
fput(file);
error_return:
return error;
}
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_wait(2).
*/
SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
int, maxevents, int, timeout)
{
int error;
struct file *file;
struct eventpoll *ep;
/* The maximum number of event must be greater than zero */
if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
return -EINVAL;
/* Verify that the area passed by the user is writeable */
if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) {
error = -EFAULT;
goto error_return;
}
/* Get the "struct file *" for the eventpoll file */
error = -EBADF;
file = fget(epfd);
if (!file)
goto error_return;
/*
* We have to check that the file structure underneath the fd
* the user passed to us _is_ an eventpoll file.
*/
error = -EINVAL;
if (!is_file_epoll(file))
goto error_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
ep = file->private_data;
/* Time to fish for events ... */
error = ep_poll(ep, events, maxevents, timeout);
error_fput:
fput(file);
error_return:
return error;
}
#ifdef HAVE_SET_RESTORE_SIGMASK
/*
* Implement the event wait interface for the eventpoll file. It is the kernel
* part of the user space epoll_pwait(2).
*/
SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
int, maxevents, int, timeout, const sigset_t __user *, sigmask,
size_t, sigsetsize)
{
int error;
sigset_t ksigmask, sigsaved;
/*
* If the caller wants a certain signal mask to be set during the wait,
* we apply it here.
*/
if (sigmask) {
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
return -EFAULT;
sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
}
error = sys_epoll_wait(epfd, events, maxevents, timeout);
/*
* If we changed the signal mask, we need to restore the original one.
* In case we've got a signal while waiting, we do not restore the
* signal mask yet, and we allow do_signal() to deliver the signal on
* the way back to userspace, before the signal mask is restored.
*/
if (sigmask) {
if (error == -EINTR) {
memcpy(¤t->saved_sigmask, &sigsaved,
sizeof(sigsaved));
set_restore_sigmask();
} else
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
}
return error;
}
#endif /* HAVE_SET_RESTORE_SIGMASK */
static int __init eventpoll_init(void)
{
struct sysinfo si;
si_meminfo(&si);
/*
* Allows top 4% of lomem to be allocated for epoll watches (per user).
*/
max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;
BUG_ON(max_user_watches < 0);
/*
* Initialize the structure used to perform epoll file descriptor
* inclusion loops checks.
*/
ep_nested_calls_init(&poll_loop_ncalls);
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);
/* Initialize the structure used to perform file's f_op->poll() calls */
ep_nested_calls_init(&poll_readywalk_ncalls);
/* Allocates slab cache used to allocate "struct epitem" items */
epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
/* Allocates slab cache used to allocate "struct eppoll_entry" */
pwq_cache = kmem_cache_create("eventpoll_pwq",
sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
return 0;
}
fs_initcall(eventpoll_init);
| gpl-2.0 |
TianyuanPan/mac82011_wifi_driver | drivers/net/wireless/orinoco/wext.c | 706 | 34746 | /* Wireless extensions support.
*
* See copyright notice in main.c
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/if_arp.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
#include <linux/etherdevice.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
#include <net/cfg80211-wext.h>
#include "hermes.h"
#include "hermes_rid.h"
#include "orinoco.h"
#include "hw.h"
#include "mic.h"
#include "scan.h"
#include "main.h"
#include "wext.h"
#define MAX_RID_LEN 1024
/* Helper routine to record keys
* It is called under orinoco_lock so it may not sleep */
static int orinoco_set_key(struct orinoco_private *priv, int index,
enum orinoco_alg alg, const u8 *key, int key_len,
const u8 *seq, int seq_len)
{
kzfree(priv->keys[index].key);
kzfree(priv->keys[index].seq);
if (key_len) {
priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
if (!priv->keys[index].key)
goto nomem;
} else
priv->keys[index].key = NULL;
if (seq_len) {
priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
if (!priv->keys[index].seq)
goto free_key;
} else
priv->keys[index].seq = NULL;
priv->keys[index].key_len = key_len;
priv->keys[index].seq_len = seq_len;
if (key_len)
memcpy((void *)priv->keys[index].key, key, key_len);
if (seq_len)
memcpy((void *)priv->keys[index].seq, seq, seq_len);
switch (alg) {
case ORINOCO_ALG_TKIP:
priv->keys[index].cipher = WLAN_CIPHER_SUITE_TKIP;
break;
case ORINOCO_ALG_WEP:
priv->keys[index].cipher = (key_len > SMALL_KEY_SIZE) ?
WLAN_CIPHER_SUITE_WEP104 : WLAN_CIPHER_SUITE_WEP40;
break;
case ORINOCO_ALG_NONE:
default:
priv->keys[index].cipher = 0;
break;
}
return 0;
free_key:
kfree(priv->keys[index].key);
priv->keys[index].key = NULL;
nomem:
priv->keys[index].key_len = 0;
priv->keys[index].seq_len = 0;
priv->keys[index].cipher = 0;
return -ENOMEM;
}
static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
{
struct orinoco_private *priv = ndev_priv(dev);
struct hermes *hw = &priv->hw;
struct iw_statistics *wstats = &priv->wstats;
int err;
unsigned long flags;
if (!netif_device_present(dev)) {
printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n",
dev->name);
return NULL; /* FIXME: Can we do better than this? */
}
/* If busy, return the old stats. Returning NULL may cause
* the interface to disappear from /proc/net/wireless */
if (orinoco_lock(priv, &flags) != 0)
return wstats;
/* We can't really wait for the tallies inquiry command to
* complete, so we just use the previous results and trigger
* a new tallies inquiry command for next time - Jean II */
/* FIXME: Really we should wait for the inquiry to come back -
* as it is the stats we give don't make a whole lot of sense.
* Unfortunately, it's not clear how to do that within the
* wireless extensions framework: I think we're in user
* context, but a lock seems to be held by the time we get in
* here so we're not safe to sleep here. */
hermes_inquire(hw, HERMES_INQ_TALLIES);
if (priv->iw_mode == NL80211_IFTYPE_ADHOC) {
memset(&wstats->qual, 0, sizeof(wstats->qual));
/* If a spy address is defined, we report stats of the
* first spy address - Jean II */
if (SPY_NUMBER(priv)) {
wstats->qual.qual = priv->spy_data.spy_stat[0].qual;
wstats->qual.level = priv->spy_data.spy_stat[0].level;
wstats->qual.noise = priv->spy_data.spy_stat[0].noise;
wstats->qual.updated =
priv->spy_data.spy_stat[0].updated;
}
} else {
struct {
__le16 qual, signal, noise, unused;
} __packed cq;
err = HERMES_READ_RECORD(hw, USER_BAP,
HERMES_RID_COMMSQUALITY, &cq);
if (!err) {
wstats->qual.qual = (int)le16_to_cpu(cq.qual);
wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95;
wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95;
wstats->qual.updated =
IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
}
}
orinoco_unlock(priv, &flags);
return wstats;
}
/********************************************************************/
/* Wireless extensions */
/********************************************************************/
static int orinoco_ioctl_setwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *ap_addr,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
/* Enable automatic roaming - no sanity checks are needed */
if (is_zero_ether_addr(ap_addr->sa_data) ||
is_broadcast_ether_addr(ap_addr->sa_data)) {
priv->bssid_fixed = 0;
memset(priv->desired_bssid, 0, ETH_ALEN);
/* "off" means keep existing connection */
if (ap_addr->sa_data[0] == 0) {
__orinoco_hw_set_wap(priv);
err = 0;
}
goto out;
}
if (priv->firmware_type == FIRMWARE_TYPE_AGERE) {
printk(KERN_WARNING "%s: Lucent/Agere firmware doesn't "
"support manual roaming\n",
dev->name);
err = -EOPNOTSUPP;
goto out;
}
if (priv->iw_mode != NL80211_IFTYPE_STATION) {
printk(KERN_WARNING "%s: Manual roaming supported only in "
"managed mode\n", dev->name);
err = -EOPNOTSUPP;
goto out;
}
/* Intersil firmware hangs without Desired ESSID */
if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL &&
strlen(priv->desired_essid) == 0) {
printk(KERN_WARNING "%s: Desired ESSID must be set for "
"manual roaming\n", dev->name);
err = -EOPNOTSUPP;
goto out;
}
/* Finally, enable manual roaming */
priv->bssid_fixed = 1;
memcpy(priv->desired_bssid, &ap_addr->sa_data, ETH_ALEN);
out:
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_getwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *ap_addr,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int err = 0;
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
ap_addr->sa_family = ARPHRD_ETHER;
err = orinoco_hw_get_current_bssid(priv, ap_addr->sa_data);
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_setiwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq,
char *keybuf)
{
struct orinoco_private *priv = ndev_priv(dev);
int index = (erq->flags & IW_ENCODE_INDEX) - 1;
int setindex = priv->tx_key;
enum orinoco_alg encode_alg = priv->encode_alg;
int restricted = priv->wep_restrict;
int err = -EINPROGRESS; /* Call commit handler */
unsigned long flags;
if (!priv->has_wep)
return -EOPNOTSUPP;
if (erq->pointer) {
/* We actually have a key to set - check its length */
if (erq->length > LARGE_KEY_SIZE)
return -E2BIG;
if ((erq->length > SMALL_KEY_SIZE) && !priv->has_big_wep)
return -E2BIG;
}
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
/* Clear any TKIP key we have */
if ((priv->has_wpa) && (priv->encode_alg == ORINOCO_ALG_TKIP))
(void) orinoco_clear_tkip_key(priv, setindex);
if (erq->length > 0) {
if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
index = priv->tx_key;
/* Switch on WEP if off */
if (encode_alg != ORINOCO_ALG_WEP) {
setindex = index;
encode_alg = ORINOCO_ALG_WEP;
}
} else {
/* Important note : if the user do "iwconfig eth0 enc off",
* we will arrive there with an index of -1. This is valid
* but need to be taken care off... Jean II */
if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) {
if ((index != -1) || (erq->flags == 0)) {
err = -EINVAL;
goto out;
}
} else {
/* Set the index : Check that the key is valid */
if (priv->keys[index].key_len == 0) {
err = -EINVAL;
goto out;
}
setindex = index;
}
}
if (erq->flags & IW_ENCODE_DISABLED)
encode_alg = ORINOCO_ALG_NONE;
if (erq->flags & IW_ENCODE_OPEN)
restricted = 0;
if (erq->flags & IW_ENCODE_RESTRICTED)
restricted = 1;
if (erq->pointer && erq->length > 0) {
err = orinoco_set_key(priv, index, ORINOCO_ALG_WEP, keybuf,
erq->length, NULL, 0);
}
priv->tx_key = setindex;
/* Try fast key change if connected and only keys are changed */
if ((priv->encode_alg == encode_alg) &&
(priv->wep_restrict == restricted) &&
netif_carrier_ok(dev)) {
err = __orinoco_hw_setup_wepkeys(priv);
/* No need to commit if successful */
goto out;
}
priv->encode_alg = encode_alg;
priv->wep_restrict = restricted;
out:
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_getiwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq,
char *keybuf)
{
struct orinoco_private *priv = ndev_priv(dev);
int index = (erq->flags & IW_ENCODE_INDEX) - 1;
unsigned long flags;
if (!priv->has_wep)
return -EOPNOTSUPP;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
index = priv->tx_key;
erq->flags = 0;
if (!priv->encode_alg)
erq->flags |= IW_ENCODE_DISABLED;
erq->flags |= index + 1;
if (priv->wep_restrict)
erq->flags |= IW_ENCODE_RESTRICTED;
else
erq->flags |= IW_ENCODE_OPEN;
erq->length = priv->keys[index].key_len;
memcpy(keybuf, priv->keys[index].key, erq->length);
orinoco_unlock(priv, &flags);
return 0;
}
static int orinoco_ioctl_setessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq,
char *essidbuf)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
/* Note : ESSID is ignored in Ad-Hoc demo mode, but we can set it
* anyway... - Jean II */
/* Hum... Should not use Wireless Extension constant (may change),
* should use our own... - Jean II */
if (erq->length > IW_ESSID_MAX_SIZE)
return -E2BIG;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
/* NULL the string (for NULL termination & ESSID = ANY) - Jean II */
memset(priv->desired_essid, 0, sizeof(priv->desired_essid));
/* If not ANY, get the new ESSID */
if (erq->flags)
memcpy(priv->desired_essid, essidbuf, erq->length);
orinoco_unlock(priv, &flags);
return -EINPROGRESS; /* Call commit handler */
}
static int orinoco_ioctl_getessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq,
char *essidbuf)
{
struct orinoco_private *priv = ndev_priv(dev);
int active;
int err = 0;
unsigned long flags;
if (netif_running(dev)) {
err = orinoco_hw_get_essid(priv, &active, essidbuf);
if (err < 0)
return err;
erq->length = err;
} else {
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
memcpy(essidbuf, priv->desired_essid, IW_ESSID_MAX_SIZE);
erq->length = strlen(priv->desired_essid);
orinoco_unlock(priv, &flags);
}
erq->flags = 1;
return 0;
}
static int orinoco_ioctl_setfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *frq,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int chan = -1;
unsigned long flags;
int err = -EINPROGRESS; /* Call commit handler */
/* In infrastructure mode the AP sets the channel */
if (priv->iw_mode == NL80211_IFTYPE_STATION)
return -EBUSY;
if ((frq->e == 0) && (frq->m <= 1000)) {
/* Setting by channel number */
chan = frq->m;
} else {
/* Setting by frequency */
int denom = 1;
int i;
/* Calculate denominator to rescale to MHz */
for (i = 0; i < (6 - frq->e); i++)
denom *= 10;
chan = ieee80211_frequency_to_channel(frq->m / denom);
}
if ((chan < 1) || (chan > NUM_CHANNELS) ||
!(priv->channel_mask & (1 << (chan - 1))))
return -EINVAL;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
priv->channel = chan;
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Fast channel change - no commit if successful */
struct hermes *hw = &priv->hw;
err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_SET_CHANNEL,
chan, NULL);
}
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_getfreq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *frq,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int tmp;
/* Locking done in there */
tmp = orinoco_hw_get_freq(priv);
if (tmp < 0)
return tmp;
frq->m = tmp * 100000;
frq->e = 1;
return 0;
}
static int orinoco_ioctl_getsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *srq,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
struct hermes *hw = &priv->hw;
u16 val;
int err;
unsigned long flags;
if (!priv->has_sensitivity)
return -EOPNOTSUPP;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFSYSTEMSCALE, &val);
orinoco_unlock(priv, &flags);
if (err)
return err;
srq->value = val;
srq->fixed = 0; /* auto */
return 0;
}
static int orinoco_ioctl_setsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *srq,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int val = srq->value;
unsigned long flags;
if (!priv->has_sensitivity)
return -EOPNOTSUPP;
if ((val < 1) || (val > 3))
return -EINVAL;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
priv->ap_density = val;
orinoco_unlock(priv, &flags);
return -EINPROGRESS; /* Call commit handler */
}
static int orinoco_ioctl_setrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int ratemode;
int bitrate; /* 100s of kilobits */
unsigned long flags;
/* As the user space doesn't know our highest rate, it uses -1
* to ask us to set the highest rate. Test it using "iwconfig
* ethX rate auto" - Jean II */
if (rrq->value == -1)
bitrate = 110;
else {
if (rrq->value % 100000)
return -EINVAL;
bitrate = rrq->value / 100000;
}
ratemode = orinoco_get_bitratemode(bitrate, !rrq->fixed);
if (ratemode == -1)
return -EINVAL;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
priv->bitratemode = ratemode;
orinoco_unlock(priv, &flags);
return -EINPROGRESS;
}
static int orinoco_ioctl_getrate(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *rrq,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int err = 0;
int bitrate, automatic;
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
orinoco_get_ratemode_cfg(priv->bitratemode, &bitrate, &automatic);
/* If the interface is running we try to find more about the
current mode */
if (netif_running(dev)) {
int act_bitrate;
int lerr;
/* Ignore errors if we can't get the actual bitrate */
lerr = orinoco_hw_get_act_bitrate(priv, &act_bitrate);
if (!lerr)
bitrate = act_bitrate;
}
orinoco_unlock(priv, &flags);
rrq->value = bitrate;
rrq->fixed = !automatic;
rrq->disabled = 0;
return err;
}
static int orinoco_ioctl_setpower(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *prq,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
if (prq->disabled) {
priv->pm_on = 0;
} else {
switch (prq->flags & IW_POWER_MODE) {
case IW_POWER_UNICAST_R:
priv->pm_mcast = 0;
priv->pm_on = 1;
break;
case IW_POWER_ALL_R:
priv->pm_mcast = 1;
priv->pm_on = 1;
break;
case IW_POWER_ON:
/* No flags : but we may have a value - Jean II */
break;
default:
err = -EINVAL;
goto out;
}
if (prq->flags & IW_POWER_TIMEOUT) {
priv->pm_on = 1;
priv->pm_timeout = prq->value / 1000;
}
if (prq->flags & IW_POWER_PERIOD) {
priv->pm_on = 1;
priv->pm_period = prq->value / 1000;
}
/* It's valid to not have a value if we are just toggling
* the flags... Jean II */
if (!priv->pm_on) {
err = -EINVAL;
goto out;
}
}
out:
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_getpower(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *prq,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
struct hermes *hw = &priv->hw;
int err = 0;
u16 enable, period, timeout, mcast;
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFPMENABLED, &enable);
if (err)
goto out;
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFMAXSLEEPDURATION, &period);
if (err)
goto out;
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFPMHOLDOVERDURATION, &timeout);
if (err)
goto out;
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFMULTICASTRECEIVE, &mcast);
if (err)
goto out;
prq->disabled = !enable;
/* Note : by default, display the period */
if ((prq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
prq->flags = IW_POWER_TIMEOUT;
prq->value = timeout * 1000;
} else {
prq->flags = IW_POWER_PERIOD;
prq->value = period * 1000;
}
if (mcast)
prq->flags |= IW_POWER_ALL_R;
else
prq->flags |= IW_POWER_UNICAST_R;
out:
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_set_encodeext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int idx, alg = ext->alg, set_key = 1;
unsigned long flags;
int err = -EINVAL;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
/* Determine and validate the key index */
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if ((idx < 1) || (idx > 4))
goto out;
idx--;
} else
idx = priv->tx_key;
if (encoding->flags & IW_ENCODE_DISABLED)
alg = IW_ENCODE_ALG_NONE;
if (priv->has_wpa && (alg != IW_ENCODE_ALG_TKIP)) {
/* Clear any TKIP TX key we had */
(void) orinoco_clear_tkip_key(priv, priv->tx_key);
}
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
priv->tx_key = idx;
set_key = ((alg == IW_ENCODE_ALG_TKIP) ||
(ext->key_len > 0)) ? 1 : 0;
}
if (set_key) {
/* Set the requested key first */
switch (alg) {
case IW_ENCODE_ALG_NONE:
priv->encode_alg = ORINOCO_ALG_NONE;
err = orinoco_set_key(priv, idx, ORINOCO_ALG_NONE,
NULL, 0, NULL, 0);
break;
case IW_ENCODE_ALG_WEP:
if (ext->key_len <= 0)
goto out;
priv->encode_alg = ORINOCO_ALG_WEP;
err = orinoco_set_key(priv, idx, ORINOCO_ALG_WEP,
ext->key, ext->key_len, NULL, 0);
break;
case IW_ENCODE_ALG_TKIP:
{
u8 *tkip_iv = NULL;
if (!priv->has_wpa ||
(ext->key_len > sizeof(struct orinoco_tkip_key)))
goto out;
priv->encode_alg = ORINOCO_ALG_TKIP;
if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
tkip_iv = &ext->rx_seq[0];
err = orinoco_set_key(priv, idx, ORINOCO_ALG_TKIP,
ext->key, ext->key_len, tkip_iv,
ORINOCO_SEQ_LEN);
err = __orinoco_hw_set_tkip_key(priv, idx,
ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY,
priv->keys[idx].key,
tkip_iv, ORINOCO_SEQ_LEN, NULL, 0);
if (err)
printk(KERN_ERR "%s: Error %d setting TKIP key"
"\n", dev->name, err);
goto out;
}
default:
goto out;
}
}
err = -EINPROGRESS;
out:
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_get_encodeext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int idx, max_key_len;
unsigned long flags;
int err;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
err = -EINVAL;
max_key_len = encoding->length - sizeof(*ext);
if (max_key_len < 0)
goto out;
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if ((idx < 1) || (idx > 4))
goto out;
idx--;
} else
idx = priv->tx_key;
encoding->flags = idx + 1;
memset(ext, 0, sizeof(*ext));
switch (priv->encode_alg) {
case ORINOCO_ALG_NONE:
ext->alg = IW_ENCODE_ALG_NONE;
ext->key_len = 0;
encoding->flags |= IW_ENCODE_DISABLED;
break;
case ORINOCO_ALG_WEP:
ext->alg = IW_ENCODE_ALG_WEP;
ext->key_len = min(priv->keys[idx].key_len, max_key_len);
memcpy(ext->key, priv->keys[idx].key, ext->key_len);
encoding->flags |= IW_ENCODE_ENABLED;
break;
case ORINOCO_ALG_TKIP:
ext->alg = IW_ENCODE_ALG_TKIP;
ext->key_len = min(priv->keys[idx].key_len, max_key_len);
memcpy(ext->key, priv->keys[idx].key, ext->key_len);
encoding->flags |= IW_ENCODE_ENABLED;
break;
}
err = 0;
out:
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
struct hermes *hw = &priv->hw;
struct iw_param *param = &wrqu->param;
unsigned long flags;
int ret = -EINPROGRESS;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
case IW_AUTH_PRIVACY_INVOKED:
case IW_AUTH_DROP_UNENCRYPTED:
/*
* orinoco does not use these parameters
*/
break;
case IW_AUTH_MFP:
/* Management Frame Protection not supported.
* Only fail if set to required.
*/
if (param->value == IW_AUTH_MFP_REQUIRED)
ret = -EINVAL;
break;
case IW_AUTH_KEY_MGMT:
/* wl_lkm implies value 2 == PSK for Hermes I
* which ties in with WEXT
* no other hints tho :(
*/
priv->key_mgmt = param->value;
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
/* When countermeasures are enabled, shut down the
* card; when disabled, re-enable the card. This must
* take effect immediately.
*
* TODO: Make sure that the EAPOL message is getting
* out before card disabled
*/
if (param->value) {
priv->tkip_cm_active = 1;
ret = hermes_disable_port(hw, 0);
} else {
priv->tkip_cm_active = 0;
ret = hermes_enable_port(hw, 0);
}
break;
case IW_AUTH_80211_AUTH_ALG:
if (param->value & IW_AUTH_ALG_SHARED_KEY)
priv->wep_restrict = 1;
else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM)
priv->wep_restrict = 0;
else
ret = -EINVAL;
break;
case IW_AUTH_WPA_ENABLED:
if (priv->has_wpa) {
priv->wpa_enabled = param->value ? 1 : 0;
} else {
if (param->value)
ret = -EOPNOTSUPP;
/* else silently accept disable of WPA */
priv->wpa_enabled = 0;
}
break;
default:
ret = -EOPNOTSUPP;
}
orinoco_unlock(priv, &flags);
return ret;
}
static int orinoco_ioctl_get_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
struct iw_param *param = &wrqu->param;
unsigned long flags;
int ret = 0;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_KEY_MGMT:
param->value = priv->key_mgmt;
break;
case IW_AUTH_TKIP_COUNTERMEASURES:
param->value = priv->tkip_cm_active;
break;
case IW_AUTH_80211_AUTH_ALG:
if (priv->wep_restrict)
param->value = IW_AUTH_ALG_SHARED_KEY;
else
param->value = IW_AUTH_ALG_OPEN_SYSTEM;
break;
case IW_AUTH_WPA_ENABLED:
param->value = priv->wpa_enabled;
break;
default:
ret = -EOPNOTSUPP;
}
orinoco_unlock(priv, &flags);
return ret;
}
static int orinoco_ioctl_set_genie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
u8 *buf;
unsigned long flags;
/* cut off at IEEE80211_MAX_DATA_LEN */
if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) ||
(wrqu->data.length && (extra == NULL)))
return -EINVAL;
if (wrqu->data.length) {
buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
} else
buf = NULL;
if (orinoco_lock(priv, &flags) != 0) {
kfree(buf);
return -EBUSY;
}
kfree(priv->wpa_ie);
priv->wpa_ie = buf;
priv->wpa_ie_len = wrqu->data.length;
if (priv->wpa_ie) {
/* Looks like wl_lkm wants to check the auth alg, and
* somehow pass it to the firmware.
* Instead it just calls the key mgmt rid
* - we do this in set auth.
*/
}
orinoco_unlock(priv, &flags);
return 0;
}
static int orinoco_ioctl_get_genie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
int err = 0;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
if ((priv->wpa_ie_len == 0) || (priv->wpa_ie == NULL)) {
wrqu->data.length = 0;
goto out;
}
if (wrqu->data.length < priv->wpa_ie_len) {
err = -E2BIG;
goto out;
}
wrqu->data.length = priv->wpa_ie_len;
memcpy(extra, priv->wpa_ie, priv->wpa_ie_len);
out:
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
unsigned long flags;
int ret = 0;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
switch (mlme->cmd) {
case IW_MLME_DEAUTH:
/* silently ignore */
break;
case IW_MLME_DISASSOC:
ret = orinoco_hw_disassociate(priv, mlme->addr.sa_data,
mlme->reason_code);
break;
default:
ret = -EOPNOTSUPP;
}
orinoco_unlock(priv, &flags);
return ret;
}
static int orinoco_ioctl_reset(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (info->cmd == (SIOCIWFIRSTPRIV + 0x1)) {
printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name);
/* Firmware reset */
orinoco_reset(&priv->reset_work);
} else {
printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name);
schedule_work(&priv->reset_work);
}
return 0;
}
static int orinoco_ioctl_setibssport(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int val = *((int *) extra);
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
priv->ibss_port = val;
/* Actually update the mode we are using */
set_port_type(priv);
orinoco_unlock(priv, &flags);
return -EINPROGRESS; /* Call commit handler */
}
static int orinoco_ioctl_getibssport(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int *val = (int *) extra;
*val = priv->ibss_port;
return 0;
}
static int orinoco_ioctl_setport3(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int val = *((int *) extra);
int err = 0;
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
switch (val) {
case 0: /* Try to do IEEE ad-hoc mode */
if (!priv->has_ibss) {
err = -EINVAL;
break;
}
priv->prefer_port3 = 0;
break;
case 1: /* Try to do Lucent proprietary ad-hoc mode */
if (!priv->has_port3) {
err = -EINVAL;
break;
}
priv->prefer_port3 = 1;
break;
default:
err = -EINVAL;
}
if (!err) {
/* Actually update the mode we are using */
set_port_type(priv);
err = -EINPROGRESS;
}
orinoco_unlock(priv, &flags);
return err;
}
static int orinoco_ioctl_getport3(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int *val = (int *) extra;
*val = priv->prefer_port3;
return 0;
}
static int orinoco_ioctl_setpreamble(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
int val;
if (!priv->has_preamble)
return -EOPNOTSUPP;
/* 802.11b has recently defined some short preamble.
* Basically, the Phy header has been reduced in size.
* This increase performance, especially at high rates
* (the preamble is transmitted at 1Mb/s), unfortunately
* this give compatibility troubles... - Jean II */
val = *((int *) extra);
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
if (val)
priv->preamble = 1;
else
priv->preamble = 0;
orinoco_unlock(priv, &flags);
return -EINPROGRESS; /* Call commit handler */
}
static int orinoco_ioctl_getpreamble(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
int *val = (int *) extra;
if (!priv->has_preamble)
return -EOPNOTSUPP;
*val = priv->preamble;
return 0;
}
/* ioctl interface to hermes_read_ltv()
* To use with iwpriv, pass the RID as the token argument, e.g.
* iwpriv get_rid [0xfc00]
* At least Wireless Tools 25 is required to use iwpriv.
* For Wireless Tools 25 and 26 append "dummy" are the end. */
static int orinoco_ioctl_getrid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
struct hermes *hw = &priv->hw;
int rid = data->flags;
u16 length;
int err;
unsigned long flags;
/* It's a "get" function, but we don't want users to access the
* WEP key and other raw firmware data */
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (rid < 0xfc00 || rid > 0xffff)
return -EINVAL;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
err = hw->ops->read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
extra);
if (err)
goto out;
data->length = min_t(u16, HERMES_RECLEN_TO_BYTES(length),
MAX_RID_LEN);
out:
orinoco_unlock(priv, &flags);
return err;
}
/* Commit handler, called after set operations */
static int orinoco_ioctl_commit(struct net_device *dev,
struct iw_request_info *info,
void *wrqu,
char *extra)
{
struct orinoco_private *priv = ndev_priv(dev);
unsigned long flags;
int err = 0;
if (!priv->open)
return 0;
if (orinoco_lock(priv, &flags) != 0)
return err;
err = orinoco_commit(priv);
orinoco_unlock(priv, &flags);
return err;
}
static const struct iw_priv_args orinoco_privtab[] = {
{ SIOCIWFIRSTPRIV + 0x0, 0, 0, "force_reset" },
{ SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" },
{ SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0, "set_port3" },
{ SIOCIWFIRSTPRIV + 0x3, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get_port3" },
{ SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0, "set_preamble" },
{ SIOCIWFIRSTPRIV + 0x5, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get_preamble" },
{ SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0, "set_ibssport" },
{ SIOCIWFIRSTPRIV + 0x7, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get_ibssport" },
{ SIOCIWFIRSTPRIV + 0x9, 0, IW_PRIV_TYPE_BYTE | MAX_RID_LEN,
"get_rid" },
};
/*
* Structures to export the Wireless Handlers
*/
static const iw_handler orinoco_handler[] = {
IW_HANDLER(SIOCSIWCOMMIT, (iw_handler)orinoco_ioctl_commit),
IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
IW_HANDLER(SIOCSIWFREQ, (iw_handler)orinoco_ioctl_setfreq),
IW_HANDLER(SIOCGIWFREQ, (iw_handler)orinoco_ioctl_getfreq),
IW_HANDLER(SIOCSIWMODE, (iw_handler)cfg80211_wext_siwmode),
IW_HANDLER(SIOCGIWMODE, (iw_handler)cfg80211_wext_giwmode),
IW_HANDLER(SIOCSIWSENS, (iw_handler)orinoco_ioctl_setsens),
IW_HANDLER(SIOCGIWSENS, (iw_handler)orinoco_ioctl_getsens),
IW_HANDLER(SIOCGIWRANGE, (iw_handler)cfg80211_wext_giwrange),
IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
IW_HANDLER(SIOCSIWAP, (iw_handler)orinoco_ioctl_setwap),
IW_HANDLER(SIOCGIWAP, (iw_handler)orinoco_ioctl_getwap),
IW_HANDLER(SIOCSIWSCAN, (iw_handler)cfg80211_wext_siwscan),
IW_HANDLER(SIOCGIWSCAN, (iw_handler)cfg80211_wext_giwscan),
IW_HANDLER(SIOCSIWESSID, (iw_handler)orinoco_ioctl_setessid),
IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid),
IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate),
IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate),
IW_HANDLER(SIOCSIWRTS, (iw_handler)cfg80211_wext_siwrts),
IW_HANDLER(SIOCGIWRTS, (iw_handler)cfg80211_wext_giwrts),
IW_HANDLER(SIOCSIWFRAG, (iw_handler)cfg80211_wext_siwfrag),
IW_HANDLER(SIOCGIWFRAG, (iw_handler)cfg80211_wext_giwfrag),
IW_HANDLER(SIOCGIWRETRY, (iw_handler)cfg80211_wext_giwretry),
IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode),
IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode),
IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower),
IW_HANDLER(SIOCGIWPOWER, (iw_handler)orinoco_ioctl_getpower),
IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
};
/*
Added typecasting since we no longer use iwreq_data -- Moustafa
*/
static const iw_handler orinoco_private_handler[] = {
[0] = (iw_handler)orinoco_ioctl_reset,
[1] = (iw_handler)orinoco_ioctl_reset,
[2] = (iw_handler)orinoco_ioctl_setport3,
[3] = (iw_handler)orinoco_ioctl_getport3,
[4] = (iw_handler)orinoco_ioctl_setpreamble,
[5] = (iw_handler)orinoco_ioctl_getpreamble,
[6] = (iw_handler)orinoco_ioctl_setibssport,
[7] = (iw_handler)orinoco_ioctl_getibssport,
[9] = (iw_handler)orinoco_ioctl_getrid,
};
const struct iw_handler_def orinoco_handler_def = {
.num_standard = ARRAY_SIZE(orinoco_handler),
.num_private = ARRAY_SIZE(orinoco_private_handler),
.num_private_args = ARRAY_SIZE(orinoco_privtab),
.standard = orinoco_handler,
.private = orinoco_private_handler,
.private_args = orinoco_privtab,
.get_wireless_stats = orinoco_get_wireless_stats,
};
| gpl-2.0 |
sohkis/android_kernel_motorola_shamu | arch/arm/mach-shmobile/setup-emev2.c | 1986 | 9326 | /*
* Emma Mobile EV2 processor support
*
* Copyright (C) 2012 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/platform_device.h>
#include <linux/platform_data/gpio-em.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/io.h>
#include <linux/irqchip/arm-gic.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/emev2.h>
#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
static struct map_desc emev2_io_desc[] __initdata = {
#ifdef CONFIG_SMP
/* 128K entity map for 0xe0100000 (SMU) */
{
.virtual = 0xe0100000,
.pfn = __phys_to_pfn(0xe0100000),
.length = SZ_128K,
.type = MT_DEVICE
},
/* 2M mapping for SCU + L2 controller */
{
.virtual = 0xf0000000,
.pfn = __phys_to_pfn(0x1e000000),
.length = SZ_2M,
.type = MT_DEVICE
},
#endif
};
void __init emev2_map_io(void)
{
iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc));
}
/* UART */
static struct resource uart0_resources[] = {
[0] = {
.start = 0xe1020000,
.end = 0xe1020037,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 40,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device uart0_device = {
.name = "serial8250-em",
.id = 0,
.num_resources = ARRAY_SIZE(uart0_resources),
.resource = uart0_resources,
};
static struct resource uart1_resources[] = {
[0] = {
.start = 0xe1030000,
.end = 0xe1030037,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 41,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device uart1_device = {
.name = "serial8250-em",
.id = 1,
.num_resources = ARRAY_SIZE(uart1_resources),
.resource = uart1_resources,
};
static struct resource uart2_resources[] = {
[0] = {
.start = 0xe1040000,
.end = 0xe1040037,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 42,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device uart2_device = {
.name = "serial8250-em",
.id = 2,
.num_resources = ARRAY_SIZE(uart2_resources),
.resource = uart2_resources,
};
static struct resource uart3_resources[] = {
[0] = {
.start = 0xe1050000,
.end = 0xe1050037,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 43,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device uart3_device = {
.name = "serial8250-em",
.id = 3,
.num_resources = ARRAY_SIZE(uart3_resources),
.resource = uart3_resources,
};
/* STI */
static struct resource sti_resources[] = {
[0] = {
.name = "STI",
.start = 0xe0180000,
.end = 0xe0180053,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 157,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device sti_device = {
.name = "em_sti",
.id = 0,
.resource = sti_resources,
.num_resources = ARRAY_SIZE(sti_resources),
};
/* GIO */
static struct gpio_em_config gio0_config = {
.gpio_base = 0,
.irq_base = EMEV2_GPIO_IRQ(0),
.number_of_pins = 32,
};
static struct resource gio0_resources[] = {
[0] = {
.name = "GIO_000",
.start = 0xe0050000,
.end = 0xe005002b,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "GIO_000",
.start = 0xe0050040,
.end = 0xe005005f,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = 99,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = 100,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device gio0_device = {
.name = "em_gio",
.id = 0,
.resource = gio0_resources,
.num_resources = ARRAY_SIZE(gio0_resources),
.dev = {
.platform_data = &gio0_config,
},
};
static struct gpio_em_config gio1_config = {
.gpio_base = 32,
.irq_base = EMEV2_GPIO_IRQ(32),
.number_of_pins = 32,
};
static struct resource gio1_resources[] = {
[0] = {
.name = "GIO_032",
.start = 0xe0050080,
.end = 0xe00500ab,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "GIO_032",
.start = 0xe00500c0,
.end = 0xe00500df,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = 101,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = 102,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device gio1_device = {
.name = "em_gio",
.id = 1,
.resource = gio1_resources,
.num_resources = ARRAY_SIZE(gio1_resources),
.dev = {
.platform_data = &gio1_config,
},
};
static struct gpio_em_config gio2_config = {
.gpio_base = 64,
.irq_base = EMEV2_GPIO_IRQ(64),
.number_of_pins = 32,
};
static struct resource gio2_resources[] = {
[0] = {
.name = "GIO_064",
.start = 0xe0050100,
.end = 0xe005012b,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "GIO_064",
.start = 0xe0050140,
.end = 0xe005015f,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = 103,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = 104,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device gio2_device = {
.name = "em_gio",
.id = 2,
.resource = gio2_resources,
.num_resources = ARRAY_SIZE(gio2_resources),
.dev = {
.platform_data = &gio2_config,
},
};
static struct gpio_em_config gio3_config = {
.gpio_base = 96,
.irq_base = EMEV2_GPIO_IRQ(96),
.number_of_pins = 32,
};
static struct resource gio3_resources[] = {
[0] = {
.name = "GIO_096",
.start = 0xe0050180,
.end = 0xe00501ab,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "GIO_096",
.start = 0xe00501c0,
.end = 0xe00501df,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = 105,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = 106,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device gio3_device = {
.name = "em_gio",
.id = 3,
.resource = gio3_resources,
.num_resources = ARRAY_SIZE(gio3_resources),
.dev = {
.platform_data = &gio3_config,
},
};
static struct gpio_em_config gio4_config = {
.gpio_base = 128,
.irq_base = EMEV2_GPIO_IRQ(128),
.number_of_pins = 31,
};
static struct resource gio4_resources[] = {
[0] = {
.name = "GIO_128",
.start = 0xe0050200,
.end = 0xe005022b,
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "GIO_128",
.start = 0xe0050240,
.end = 0xe005025f,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = 107,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = 108,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device gio4_device = {
.name = "em_gio",
.id = 4,
.resource = gio4_resources,
.num_resources = ARRAY_SIZE(gio4_resources),
.dev = {
.platform_data = &gio4_config,
},
};
static struct resource pmu_resources[] = {
[0] = {
.start = 152,
.end = 152,
.flags = IORESOURCE_IRQ,
},
[1] = {
.start = 153,
.end = 153,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device pmu_device = {
.name = "arm-pmu",
.id = -1,
.num_resources = ARRAY_SIZE(pmu_resources),
.resource = pmu_resources,
};
static struct platform_device *emev2_early_devices[] __initdata = {
&uart0_device,
&uart1_device,
&uart2_device,
&uart3_device,
};
static struct platform_device *emev2_late_devices[] __initdata = {
&sti_device,
&gio0_device,
&gio1_device,
&gio2_device,
&gio3_device,
&gio4_device,
&pmu_device,
};
void __init emev2_add_standard_devices(void)
{
emev2_clock_init();
platform_add_devices(emev2_early_devices,
ARRAY_SIZE(emev2_early_devices));
platform_add_devices(emev2_late_devices,
ARRAY_SIZE(emev2_late_devices));
}
static void __init emev2_init_delay(void)
{
shmobile_setup_delay(533, 1, 3); /* Cortex-A9 @ 533MHz */
}
void __init emev2_add_early_devices(void)
{
emev2_init_delay();
early_platform_add_devices(emev2_early_devices,
ARRAY_SIZE(emev2_early_devices));
/* setup early console here as well */
shmobile_setup_console();
}
void __init emev2_init_irq(void)
{
void __iomem *gic_dist_base;
void __iomem *gic_cpu_base;
/* Static mappings, never released */
gic_dist_base = ioremap(0xe0028000, PAGE_SIZE);
gic_cpu_base = ioremap(0xe0020000, PAGE_SIZE);
BUG_ON(!gic_dist_base || !gic_cpu_base);
/* Use GIC to handle interrupts */
gic_init(0, 29, gic_dist_base, gic_cpu_base);
}
#ifdef CONFIG_USE_OF
static const struct of_dev_auxdata emev2_auxdata_lookup[] __initconst = {
{ }
};
static void __init emev2_add_standard_devices_dt(void)
{
of_platform_populate(NULL, of_default_bus_match_table,
emev2_auxdata_lookup, NULL);
}
static const char *emev2_boards_compat_dt[] __initdata = {
"renesas,emev2",
NULL,
};
DT_MACHINE_START(EMEV2_DT, "Generic Emma Mobile EV2 (Flattened Device Tree)")
.smp = smp_ops(emev2_smp_ops),
.init_early = emev2_init_delay,
.nr_irqs = NR_IRQS_LEGACY,
.init_irq = irqchip_init,
.init_machine = emev2_add_standard_devices_dt,
.dt_compat = emev2_boards_compat_dt,
MACHINE_END
#endif /* CONFIG_USE_OF */
| gpl-2.0 |
keecker/kernel_msm-3.10 | drivers/net/ethernet/dlink/dl2k.c | 2242 | 46301 | /* D-Link DL2000-based Gigabit Ethernet Adapter Linux driver */
/*
Copyright (c) 2001, 2002 by D-Link Corporation
Written by Edward Peng.<edward_peng@dlink.com.tw>
Created 03-May-2001, base on Linux' sundance.c.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
*/
#define DRV_NAME "DL2000/TC902x-based linux driver"
#define DRV_VERSION "v1.19"
#define DRV_RELDATE "2007/08/12"
#include "dl2k.h"
#include <linux/dma-mapping.h>
#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
#define dw8(reg, val) iowrite8(val, ioaddr + (reg))
#define dr32(reg) ioread32(ioaddr + (reg))
#define dr16(reg) ioread16(ioaddr + (reg))
#define dr8(reg) ioread8(ioaddr + (reg))
static char version[] =
KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
#define MAX_UNITS 8
static int mtu[MAX_UNITS];
static int vlan[MAX_UNITS];
static int jumbo[MAX_UNITS];
static char *media[MAX_UNITS];
static int tx_flow=-1;
static int rx_flow=-1;
static int copy_thresh;
static int rx_coalesce=10; /* Rx frame count each interrupt */
static int rx_timeout=200; /* Rx DMA wait time in 640ns increments */
static int tx_coalesce=16; /* HW xmit count each TxDMAComplete */
MODULE_AUTHOR ("Edward Peng");
MODULE_DESCRIPTION ("D-Link DL2000-based Gigabit Ethernet Adapter");
MODULE_LICENSE("GPL");
module_param_array(mtu, int, NULL, 0);
module_param_array(media, charp, NULL, 0);
module_param_array(vlan, int, NULL, 0);
module_param_array(jumbo, int, NULL, 0);
module_param(tx_flow, int, 0);
module_param(rx_flow, int, 0);
module_param(copy_thresh, int, 0);
module_param(rx_coalesce, int, 0); /* Rx frame count each interrupt */
module_param(rx_timeout, int, 0); /* Rx DMA wait time in 64ns increments */
module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
/* Enable the default interrupts */
#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
UpdateStats | LinkEvent)
static void dl2k_enable_int(struct netdev_private *np)
{
void __iomem *ioaddr = np->ioaddr;
dw16(IntEnable, DEFAULT_INTR);
}
static const int max_intrloop = 50;
static const int multicast_filter_limit = 0x40;
static int rio_open (struct net_device *dev);
static void rio_timer (unsigned long data);
static void rio_tx_timeout (struct net_device *dev);
static void alloc_list (struct net_device *dev);
static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rio_interrupt (int irq, void *dev_instance);
static void rio_free_tx (struct net_device *dev, int irq);
static void tx_error (struct net_device *dev, int tx_status);
static int receive_packet (struct net_device *dev);
static void rio_error (struct net_device *dev, int int_status);
static int change_mtu (struct net_device *dev, int new_mtu);
static void set_multicast (struct net_device *dev);
static struct net_device_stats *get_stats (struct net_device *dev);
static int clear_stats (struct net_device *dev);
static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
static int rio_close (struct net_device *dev);
static int find_miiphy (struct net_device *dev);
static int parse_eeprom (struct net_device *dev);
static int read_eeprom (struct netdev_private *, int eep_addr);
static int mii_wait_link (struct net_device *dev, int wait);
static int mii_set_media (struct net_device *dev);
static int mii_get_media (struct net_device *dev);
static int mii_set_media_pcs (struct net_device *dev);
static int mii_get_media_pcs (struct net_device *dev);
static int mii_read (struct net_device *dev, int phy_addr, int reg_num);
static int mii_write (struct net_device *dev, int phy_addr, int reg_num,
u16 data);
static const struct ethtool_ops ethtool_ops;
static const struct net_device_ops netdev_ops = {
.ndo_open = rio_open,
.ndo_start_xmit = start_xmit,
.ndo_stop = rio_close,
.ndo_get_stats = get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_rx_mode = set_multicast,
.ndo_do_ioctl = rio_ioctl,
.ndo_tx_timeout = rio_tx_timeout,
.ndo_change_mtu = change_mtu,
};
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
struct netdev_private *np;
static int card_idx;
int chip_idx = ent->driver_data;
int err, irq;
void __iomem *ioaddr;
static int version_printed;
void *ring_space;
dma_addr_t ring_dma;
if (!version_printed++)
printk ("%s", version);
err = pci_enable_device (pdev);
if (err)
return err;
irq = pdev->irq;
err = pci_request_regions (pdev, "dl2k");
if (err)
goto err_out_disable;
pci_set_master (pdev);
err = -ENOMEM;
dev = alloc_etherdev (sizeof (*np));
if (!dev)
goto err_out_res;
SET_NETDEV_DEV(dev, &pdev->dev);
np = netdev_priv(dev);
/* IO registers range. */
ioaddr = pci_iomap(pdev, 0, 0);
if (!ioaddr)
goto err_out_dev;
np->eeprom_addr = ioaddr;
#ifdef MEM_MAPPING
/* MM registers range. */
ioaddr = pci_iomap(pdev, 1, 0);
if (!ioaddr)
goto err_out_iounmap;
#endif
np->ioaddr = ioaddr;
np->chip_id = chip_idx;
np->pdev = pdev;
spin_lock_init (&np->tx_lock);
spin_lock_init (&np->rx_lock);
/* Parse manual configuration */
np->an_enable = 1;
np->tx_coalesce = 1;
if (card_idx < MAX_UNITS) {
if (media[card_idx] != NULL) {
np->an_enable = 0;
if (strcmp (media[card_idx], "auto") == 0 ||
strcmp (media[card_idx], "autosense") == 0 ||
strcmp (media[card_idx], "0") == 0 ) {
np->an_enable = 2;
} else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
strcmp (media[card_idx], "4") == 0) {
np->speed = 100;
np->full_duplex = 1;
} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
strcmp (media[card_idx], "3") == 0) {
np->speed = 100;
np->full_duplex = 0;
} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
strcmp (media[card_idx], "2") == 0) {
np->speed = 10;
np->full_duplex = 1;
} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
strcmp (media[card_idx], "1") == 0) {
np->speed = 10;
np->full_duplex = 0;
} else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
strcmp (media[card_idx], "6") == 0) {
np->speed=1000;
np->full_duplex=1;
} else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
strcmp (media[card_idx], "5") == 0) {
np->speed = 1000;
np->full_duplex = 0;
} else {
np->an_enable = 1;
}
}
if (jumbo[card_idx] != 0) {
np->jumbo = 1;
dev->mtu = MAX_JUMBO;
} else {
np->jumbo = 0;
if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
dev->mtu = mtu[card_idx];
}
np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
vlan[card_idx] : 0;
if (rx_coalesce > 0 && rx_timeout > 0) {
np->rx_coalesce = rx_coalesce;
np->rx_timeout = rx_timeout;
np->coalesce = 1;
}
np->tx_flow = (tx_flow == 0) ? 0 : 1;
np->rx_flow = (rx_flow == 0) ? 0 : 1;
if (tx_coalesce < 1)
tx_coalesce = 1;
else if (tx_coalesce > TX_RING_SIZE-1)
tx_coalesce = TX_RING_SIZE - 1;
}
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
SET_ETHTOOL_OPS(dev, ðtool_ops);
#if 0
dev->features = NETIF_F_IP_CSUM;
#endif
pci_set_drvdata (pdev, dev);
ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_iounmap;
np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
np->rx_ring = ring_space;
np->rx_ring_dma = ring_dma;
/* Parse eeprom data */
parse_eeprom (dev);
/* Find PHY address */
err = find_miiphy (dev);
if (err)
goto err_out_unmap_rx;
/* Fiber device? */
np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
np->link_status = 0;
/* Set media and reset PHY */
if (np->phy_media) {
/* default Auto-Negotiation for fiber deivices */
if (np->an_enable == 2) {
np->an_enable = 1;
}
mii_set_media_pcs (dev);
} else {
/* Auto-Negotiation is mandatory for 1000BASE-T,
IEEE 802.3ab Annex 28D page 14 */
if (np->speed == 1000)
np->an_enable = 1;
mii_set_media (dev);
}
err = register_netdev (dev);
if (err)
goto err_out_unmap_rx;
card_idx++;
printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
dev->name, np->name, dev->dev_addr, irq);
if (tx_coalesce > 1)
printk(KERN_INFO "tx_coalesce:\t%d packets\n",
tx_coalesce);
if (np->coalesce)
printk(KERN_INFO
"rx_coalesce:\t%d packets\n"
"rx_timeout: \t%d ns\n",
np->rx_coalesce, np->rx_timeout*640);
if (np->vlan)
printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
return 0;
err_out_unmap_rx:
pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
err_out_unmap_tx:
pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
err_out_iounmap:
#ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr);
#endif
pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
free_netdev (dev);
err_out_res:
pci_release_regions (pdev);
err_out_disable:
pci_disable_device (pdev);
return err;
}
static int
find_miiphy (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int i, phy_found = 0;
np = netdev_priv(dev);
np->phy_addr = 1;
for (i = 31; i >= 0; i--) {
int mii_status = mii_read (dev, i, 1);
if (mii_status != 0xffff && mii_status != 0x0000) {
np->phy_addr = i;
phy_found++;
}
}
if (!phy_found) {
printk (KERN_ERR "%s: No MII PHY found!\n", dev->name);
return -ENODEV;
}
return 0;
}
static int
parse_eeprom (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
int i, j;
u8 sromdata[256];
u8 *psib;
u32 crc;
PSROM_t psrom = (PSROM_t) sromdata;
int cid, next;
for (i = 0; i < 128; i++)
((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
/* Check CRC */
crc = ~ether_crc_le (256 - 4, sromdata);
if (psrom->crc != cpu_to_le32(crc)) {
printk (KERN_ERR "%s: EEPROM data CRC error.\n",
dev->name);
return -1;
}
}
/* Set MAC address */
for (i = 0; i < 6; i++)
dev->dev_addr[i] = psrom->mac_addr[i];
if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
return 0;
}
/* Parse Software Information Block */
i = 0x30;
psib = (u8 *) sromdata;
do {
cid = psib[i++];
next = psib[i++];
if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
printk (KERN_ERR "Cell data error\n");
return -1;
}
switch (cid) {
case 0: /* Format version */
break;
case 1: /* End of cell */
return 0;
case 2: /* Duplex Polarity */
np->duplex_polarity = psib[i];
dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
break;
case 3: /* Wake Polarity */
np->wake_polarity = psib[i];
break;
case 9: /* Adapter description */
j = (next - i > 255) ? 255 : next - i;
memcpy (np->name, &(psib[i]), j);
break;
case 4:
case 5:
case 6:
case 7:
case 8: /* Reversed */
break;
default: /* Unknown cell */
return -1;
}
i = next;
} while (1);
return 0;
}
static int
rio_open (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
const int irq = np->pdev->irq;
int i;
u16 macctrl;
i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
if (i)
return i;
/* Reset all logic functions */
dw16(ASICCtrl + 2,
GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
mdelay(10);
/* DebugCtrl bit 4, 5, 9 must set */
dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
/* Jumbo frame */
if (np->jumbo != 0)
dw16(MaxFrameSize, MAX_JUMBO+14);
alloc_list (dev);
/* Get station address */
for (i = 0; i < 6; i++)
dw8(StationAddr0 + i, dev->dev_addr[i]);
set_multicast (dev);
if (np->coalesce) {
dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
}
/* Set RIO to poll every N*320nsec. */
dw8(RxDMAPollPeriod, 0x20);
dw8(TxDMAPollPeriod, 0xff);
dw8(RxDMABurstThresh, 0x30);
dw8(RxDMAUrgentThresh, 0x30);
dw32(RmonStatMask, 0x0007ffff);
/* clear statistics */
clear_stats (dev);
/* VLAN supported */
if (np->vlan) {
/* priority field in RxDMAIntCtrl */
dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
/* VLANId */
dw16(VLANId, np->vlan);
/* Length/Type should be 0x8100 */
dw32(VLANTag, 0x8100 << 16 | np->vlan);
/* Enable AutoVLANuntagging, but disable AutoVLANtagging.
VLAN information tagged by TFC' VID, CFI fields. */
dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
}
init_timer (&np->timer);
np->timer.expires = jiffies + 1*HZ;
np->timer.data = (unsigned long) dev;
np->timer.function = rio_timer;
add_timer (&np->timer);
/* Start Tx/Rx */
dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
macctrl = 0;
macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
macctrl |= (np->full_duplex) ? DuplexSelect : 0;
macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
dw16(MACCtrl, macctrl);
netif_start_queue (dev);
dl2k_enable_int(np);
return 0;
}
static void
rio_timer (unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct netdev_private *np = netdev_priv(dev);
unsigned int entry;
int next_tick = 1*HZ;
unsigned long flags;
spin_lock_irqsave(&np->rx_lock, flags);
/* Recover rx ring exhausted error */
if (np->cur_rx - np->old_rx >= RX_RING_SIZE) {
printk(KERN_INFO "Try to recover rx ring exhausted...\n");
/* Re-allocate skbuffs to fill the descriptor ring */
for (; np->cur_rx - np->old_rx > 0; np->old_rx++) {
struct sk_buff *skb;
entry = np->old_rx % RX_RING_SIZE;
/* Dropped packets don't need to re-allocate */
if (np->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb_ip_align(dev,
np->rx_buf_sz);
if (skb == NULL) {
np->rx_ring[entry].fraginfo = 0;
printk (KERN_INFO
"%s: Still unable to re-allocate Rx skbuff.#%d\n",
dev->name, entry);
break;
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
cpu_to_le64 (pci_map_single
(np->pdev, skb->data, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
np->rx_ring[entry].status = 0;
} /* end for */
} /* end if */
spin_unlock_irqrestore (&np->rx_lock, flags);
np->timer.expires = jiffies + next_tick;
add_timer(&np->timer);
}
static void
rio_tx_timeout (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
dev->name, dr32(TxStatus));
rio_free_tx(dev, 0);
dev->if_port = 0;
dev->trans_start = jiffies; /* prevent tx timeout */
}
/* allocate and initialize Tx and Rx descriptors */
static void
alloc_list (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
int i;
np->cur_rx = np->cur_tx = 0;
np->old_rx = np->old_tx = 0;
np->rx_buf_sz = (dev->mtu <= 1500 ? PACKET_SIZE : dev->mtu + 32);
/* Initialize Tx descriptors, TFDListPtr leaves in start_xmit(). */
for (i = 0; i < TX_RING_SIZE; i++) {
np->tx_skbuff[i] = NULL;
np->tx_ring[i].status = cpu_to_le64 (TFDDone);
np->tx_ring[i].next_desc = cpu_to_le64 (np->tx_ring_dma +
((i+1)%TX_RING_SIZE) *
sizeof (struct netdev_desc));
}
/* Initialize Rx descriptors */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].next_desc = cpu_to_le64 (np->rx_ring_dma +
((i + 1) % RX_RING_SIZE) *
sizeof (struct netdev_desc));
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
np->rx_skbuff[i] = NULL;
}
/* Allocate the rx buffers */
for (i = 0; i < RX_RING_SIZE; i++) {
/* Allocated fixed size of skbuff */
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
cpu_to_le64 ( pci_map_single (
np->pdev, skb->data, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
/* Set RFDListPtr */
dw32(RFDListPtr0, np->rx_ring_dma);
dw32(RFDListPtr1, 0);
}
static netdev_tx_t
start_xmit (struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
struct netdev_desc *txdesc;
unsigned entry;
u64 tfc_vlan_tag = 0;
if (np->link_status == 0) { /* Link Down */
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
txdesc = &np->tx_ring[entry];
#if 0
if (skb->ip_summed == CHECKSUM_PARTIAL) {
txdesc->status |=
cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
IPChecksumEnable);
}
#endif
if (np->vlan) {
tfc_vlan_tag = VLANTagInsert |
((u64)np->vlan << 32) |
((u64)skb->priority << 45);
}
txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
skb->len,
PCI_DMA_TODEVICE));
txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
* Work around: Always use 1 descriptor in 10Mbps mode */
if (entry % np->tx_coalesce == 0 || np->speed == 10)
txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
WordAlignDisable |
TxDMAIndicate |
(1 << FragCountShift));
else
txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
WordAlignDisable |
(1 << FragCountShift));
/* TxDMAPollNow */
dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
/* Schedule ISR */
dw32(CountDown, 10000);
np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
< TX_QUEUE_LEN - 1 && np->speed != 10) {
/* do nothing */
} else if (!netif_queue_stopped(dev)) {
netif_stop_queue (dev);
}
/* The first TFDListPtr */
if (!dr32(TFDListPtr0)) {
dw32(TFDListPtr0, np->tx_ring_dma +
entry * sizeof (struct netdev_desc));
dw32(TFDListPtr1, 0);
}
return NETDEV_TX_OK;
}
static irqreturn_t
rio_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
unsigned int_status;
int cnt = max_intrloop;
int handled = 0;
while (1) {
int_status = dr16(IntStatus);
dw16(IntStatus, int_status);
int_status &= DEFAULT_INTR;
if (int_status == 0 || --cnt < 0)
break;
handled = 1;
/* Processing received packets */
if (int_status & RxDMAComplete)
receive_packet (dev);
/* TxDMAComplete interrupt */
if ((int_status & (TxDMAComplete|IntRequested))) {
int tx_status;
tx_status = dr32(TxStatus);
if (tx_status & 0x01)
tx_error (dev, tx_status);
/* Free used tx skbuffs */
rio_free_tx (dev, 1);
}
/* Handle uncommon events */
if (int_status &
(HostError | LinkEvent | UpdateStats))
rio_error (dev, int_status);
}
if (np->cur_tx != np->old_tx)
dw32(CountDown, 100);
return IRQ_RETVAL(handled);
}
static inline dma_addr_t desc_to_dma(struct netdev_desc *desc)
{
return le64_to_cpu(desc->fraginfo) & DMA_BIT_MASK(48);
}
static void
rio_free_tx (struct net_device *dev, int irq)
{
struct netdev_private *np = netdev_priv(dev);
int entry = np->old_tx % TX_RING_SIZE;
int tx_use = 0;
unsigned long flag = 0;
if (irq)
spin_lock(&np->tx_lock);
else
spin_lock_irqsave(&np->tx_lock, flag);
/* Free used tx skbuffs */
while (entry != np->cur_tx) {
struct sk_buff *skb;
if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
break;
skb = np->tx_skbuff[entry];
pci_unmap_single (np->pdev,
desc_to_dma(&np->tx_ring[entry]),
skb->len, PCI_DMA_TODEVICE);
if (irq)
dev_kfree_skb_irq (skb);
else
dev_kfree_skb (skb);
np->tx_skbuff[entry] = NULL;
entry = (entry + 1) % TX_RING_SIZE;
tx_use++;
}
if (irq)
spin_unlock(&np->tx_lock);
else
spin_unlock_irqrestore(&np->tx_lock, flag);
np->old_tx = entry;
/* If the ring is no longer full, clear tx_full and
call netif_wake_queue() */
if (netif_queue_stopped(dev) &&
((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
< TX_QUEUE_LEN - 1 || np->speed == 10)) {
netif_wake_queue (dev);
}
}
static void
tx_error (struct net_device *dev, int tx_status)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
int frame_id;
int i;
frame_id = (tx_status & 0xffff0000);
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
dev->name, tx_status, frame_id);
np->stats.tx_errors++;
/* Ttransmit Underrun */
if (tx_status & 0x10) {
np->stats.tx_fifo_errors++;
dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
/* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
dw16(ASICCtrl + 2,
TxReset | DMAReset | FIFOReset | NetworkReset);
/* Wait for ResetBusy bit clear */
for (i = 50; i > 0; i--) {
if (!(dr16(ASICCtrl + 2) & ResetBusy))
break;
mdelay (1);
}
rio_free_tx (dev, 1);
/* Reset TFDListPtr */
dw32(TFDListPtr0, np->tx_ring_dma +
np->old_tx * sizeof (struct netdev_desc));
dw32(TFDListPtr1, 0);
/* Let TxStartThresh stay default value */
}
/* Late Collision */
if (tx_status & 0x04) {
np->stats.tx_fifo_errors++;
/* TxReset and clear FIFO */
dw16(ASICCtrl + 2, TxReset | FIFOReset);
/* Wait reset done */
for (i = 50; i > 0; i--) {
if (!(dr16(ASICCtrl + 2) & ResetBusy))
break;
mdelay (1);
}
/* Let TxStartThresh stay default value */
}
/* Maximum Collisions */
#ifdef ETHER_STATS
if (tx_status & 0x08)
np->stats.collisions16++;
#else
if (tx_status & 0x08)
np->stats.collisions++;
#endif
/* Restart the Tx */
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
}
static int
receive_packet (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
int entry = np->cur_rx % RX_RING_SIZE;
int cnt = 30;
/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
while (1) {
struct netdev_desc *desc = &np->rx_ring[entry];
int pkt_len;
u64 frame_status;
if (!(desc->status & cpu_to_le64(RFDDone)) ||
!(desc->status & cpu_to_le64(FrameStart)) ||
!(desc->status & cpu_to_le64(FrameEnd)))
break;
/* Chip omits the CRC. */
frame_status = le64_to_cpu(desc->status);
pkt_len = frame_status & 0xffff;
if (--cnt < 0)
break;
/* Update rx error statistics, drop packet. */
if (frame_status & RFS_Errors) {
np->stats.rx_errors++;
if (frame_status & (RxRuntFrame | RxLengthError))
np->stats.rx_length_errors++;
if (frame_status & RxFCSError)
np->stats.rx_crc_errors++;
if (frame_status & RxAlignmentError && np->speed != 1000)
np->stats.rx_frame_errors++;
if (frame_status & RxFIFOOverrun)
np->stats.rx_fifo_errors++;
} else {
struct sk_buff *skb;
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
pci_unmap_single (np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
pci_dma_sync_single_for_cpu(np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
pkt_len);
skb_put (skb, pkt_len);
pci_dma_sync_single_for_device(np->pdev,
desc_to_dma(desc),
np->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
skb->protocol = eth_type_trans (skb, dev);
#if 0
/* Checksum done by hw, but csum value unavailable. */
if (np->pdev->pci_rev_id >= 0x0c &&
!(frame_status & (TCPError | UDPError | IPError))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
#endif
netif_rx (skb);
}
entry = (entry + 1) % RX_RING_SIZE;
}
spin_lock(&np->rx_lock);
np->cur_rx = entry;
/* Re-allocate skbuffs to fill the descriptor ring */
entry = np->old_rx;
while (entry != np->cur_rx) {
struct sk_buff *skb;
/* Dropped packets don't need to re-allocate */
if (np->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
if (skb == NULL) {
np->rx_ring[entry].fraginfo = 0;
printk (KERN_INFO
"%s: receive_packet: "
"Unable to re-allocate Rx skbuff.#%d\n",
dev->name, entry);
break;
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
cpu_to_le64 (pci_map_single
(np->pdev, skb->data, np->rx_buf_sz,
PCI_DMA_FROMDEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
np->rx_ring[entry].status = 0;
entry = (entry + 1) % RX_RING_SIZE;
}
np->old_rx = entry;
spin_unlock(&np->rx_lock);
return 0;
}
static void
rio_error (struct net_device *dev, int int_status)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
u16 macctrl;
/* Link change event */
if (int_status & LinkEvent) {
if (mii_wait_link (dev, 10) == 0) {
printk (KERN_INFO "%s: Link up\n", dev->name);
if (np->phy_media)
mii_get_media_pcs (dev);
else
mii_get_media (dev);
if (np->speed == 1000)
np->tx_coalesce = tx_coalesce;
else
np->tx_coalesce = 1;
macctrl = 0;
macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
macctrl |= (np->full_duplex) ? DuplexSelect : 0;
macctrl |= (np->tx_flow) ?
TxFlowControlEnable : 0;
macctrl |= (np->rx_flow) ?
RxFlowControlEnable : 0;
dw16(MACCtrl, macctrl);
np->link_status = 1;
netif_carrier_on(dev);
} else {
printk (KERN_INFO "%s: Link off\n", dev->name);
np->link_status = 0;
netif_carrier_off(dev);
}
}
/* UpdateStats statistics registers */
if (int_status & UpdateStats) {
get_stats (dev);
}
/* PCI Error, a catastronphic error related to the bus interface
occurs, set GlobalReset and HostReset to reset. */
if (int_status & HostError) {
printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
dev->name, int_status);
dw16(ASICCtrl + 2, GlobalReset | HostReset);
mdelay (500);
}
}
static struct net_device_stats *
get_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
#ifdef MEM_MAPPING
int i;
#endif
unsigned int stat_reg;
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
np->stats.rx_packets += dr32(FramesRcvOk);
np->stats.tx_packets += dr32(FramesXmtOk);
np->stats.rx_bytes += dr32(OctetRcvOk);
np->stats.tx_bytes += dr32(OctetXmtOk);
np->stats.multicast = dr32(McstFramesRcvdOk);
np->stats.collisions += dr32(SingleColFrames)
+ dr32(MultiColFrames);
/* detailed tx errors */
stat_reg = dr16(FramesAbortXSColls);
np->stats.tx_aborted_errors += stat_reg;
np->stats.tx_errors += stat_reg;
stat_reg = dr16(CarrierSenseErrors);
np->stats.tx_carrier_errors += stat_reg;
np->stats.tx_errors += stat_reg;
/* Clear all other statistic register. */
dr32(McstOctetXmtOk);
dr16(BcstFramesXmtdOk);
dr32(McstFramesXmtdOk);
dr16(BcstFramesRcvdOk);
dr16(MacControlFramesRcvd);
dr16(FrameTooLongErrors);
dr16(InRangeLengthErrors);
dr16(FramesCheckSeqErrors);
dr16(FramesLostRxErrors);
dr32(McstOctetXmtOk);
dr32(BcstOctetXmtOk);
dr32(McstFramesXmtdOk);
dr32(FramesWDeferredXmt);
dr32(LateCollisions);
dr16(BcstFramesXmtdOk);
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
#ifdef MEM_MAPPING
for (i = 0x100; i <= 0x150; i += 4)
dr32(i);
#endif
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
return &np->stats;
}
static int
clear_stats (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
#ifdef MEM_MAPPING
int i;
#endif
/* All statistics registers need to be acknowledged,
else statistic overflow could cause problems */
dr32(FramesRcvOk);
dr32(FramesXmtOk);
dr32(OctetRcvOk);
dr32(OctetXmtOk);
dr32(McstFramesRcvdOk);
dr32(SingleColFrames);
dr32(MultiColFrames);
dr32(LateCollisions);
/* detailed rx errors */
dr16(FrameTooLongErrors);
dr16(InRangeLengthErrors);
dr16(FramesCheckSeqErrors);
dr16(FramesLostRxErrors);
/* detailed tx errors */
dr16(FramesAbortXSColls);
dr16(CarrierSenseErrors);
/* Clear all other statistic register. */
dr32(McstOctetXmtOk);
dr16(BcstFramesXmtdOk);
dr32(McstFramesXmtdOk);
dr16(BcstFramesRcvdOk);
dr16(MacControlFramesRcvd);
dr32(McstOctetXmtOk);
dr32(BcstOctetXmtOk);
dr32(McstFramesXmtdOk);
dr32(FramesWDeferredXmt);
dr16(BcstFramesXmtdOk);
dr16(MacControlFramesXmtd);
dr16(FramesWEXDeferal);
#ifdef MEM_MAPPING
for (i = 0x100; i <= 0x150; i += 4)
dr32(i);
#endif
dr16(TxJumboFrames);
dr16(RxJumboFrames);
dr16(TCPCheckSumErrors);
dr16(UDPCheckSumErrors);
dr16(IPCheckSumErrors);
return 0;
}
static int
change_mtu (struct net_device *dev, int new_mtu)
{
struct netdev_private *np = netdev_priv(dev);
int max = (np->jumbo) ? MAX_JUMBO : 1536;
if ((new_mtu < 68) || (new_mtu > max)) {
return -EINVAL;
}
dev->mtu = new_mtu;
return 0;
}
static void
set_multicast (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
u32 hash_table[2];
u16 rx_mode = 0;
hash_table[0] = hash_table[1] = 0;
/* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
hash_table[1] |= 0x02000000;
if (dev->flags & IFF_PROMISC) {
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
} else if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
netdev_for_each_mc_addr(ha, dev) {
int bit, index = 0;
int crc = ether_crc_le(ETH_ALEN, ha->addr);
/* The inverted high significant 6 bits of CRC are
used as an index to hashtable */
for (bit = 0; bit < 6; bit++)
if (crc & (1 << (31 - bit)))
index |= (1 << bit);
hash_table[index / 32] |= (1 << (index % 32));
}
} else {
rx_mode = ReceiveBroadcast | ReceiveUnicast;
}
if (np->vlan) {
/* ReceiveVLANMatch field in ReceiveMode */
rx_mode |= ReceiveVLANMatch;
}
dw32(HashTable0, hash_table[0]);
dw32(HashTable1, hash_table[1]);
dw16(ReceiveMode, rx_mode);
}
static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
strlcpy(info->driver, "dl2k", sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct netdev_private *np = netdev_priv(dev);
if (np->phy_media) {
/* fiber device */
cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
cmd->port = PORT_FIBRE;
cmd->transceiver = XCVR_INTERNAL;
} else {
/* copper device */
cmd->supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_MII;
cmd->advertising = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full|
ADVERTISED_Autoneg | ADVERTISED_MII;
cmd->port = PORT_MII;
cmd->transceiver = XCVR_INTERNAL;
}
if ( np->link_status ) {
ethtool_cmd_speed_set(cmd, np->speed);
cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
ethtool_cmd_speed_set(cmd, -1);
cmd->duplex = -1;
}
if ( np->an_enable)
cmd->autoneg = AUTONEG_ENABLE;
else
cmd->autoneg = AUTONEG_DISABLE;
cmd->phy_address = np->phy_addr;
return 0;
}
static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct netdev_private *np = netdev_priv(dev);
netif_carrier_off(dev);
if (cmd->autoneg == AUTONEG_ENABLE) {
if (np->an_enable)
return 0;
else {
np->an_enable = 1;
mii_set_media(dev);
return 0;
}
} else {
np->an_enable = 0;
if (np->speed == 1000) {
ethtool_cmd_speed_set(cmd, SPEED_100);
cmd->duplex = DUPLEX_FULL;
printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
}
switch (ethtool_cmd_speed(cmd)) {
case SPEED_10:
np->speed = 10;
np->full_duplex = (cmd->duplex == DUPLEX_FULL);
break;
case SPEED_100:
np->speed = 100;
np->full_duplex = (cmd->duplex == DUPLEX_FULL);
break;
case SPEED_1000: /* not supported */
default:
return -EINVAL;
}
mii_set_media(dev);
}
return 0;
}
static u32 rio_get_link(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
return np->link_status;
}
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = rio_get_drvinfo,
.get_settings = rio_get_settings,
.set_settings = rio_set_settings,
.get_link = rio_get_link,
};
static int
rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{
int phy_addr;
struct netdev_private *np = netdev_priv(dev);
struct mii_ioctl_data *miidata = if_mii(rq);
phy_addr = np->phy_addr;
switch (cmd) {
case SIOCGMIIPHY:
miidata->phy_id = phy_addr;
break;
case SIOCGMIIREG:
miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
break;
case SIOCSMIIREG:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
#define EEP_READ 0x0200
#define EEP_BUSY 0x8000
/* Read the EEPROM word */
/* We use I/O instruction to read/write eeprom to avoid fail on some machines */
static int read_eeprom(struct netdev_private *np, int eep_addr)
{
void __iomem *ioaddr = np->eeprom_addr;
int i = 1000;
dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
while (i-- > 0) {
if (!(dr16(EepromCtrl) & EEP_BUSY))
return dr16(EepromData);
}
return 0;
}
enum phy_ctrl_bits {
MII_READ = 0x00, MII_CLK = 0x01, MII_DATA1 = 0x02, MII_WRITE = 0x04,
MII_DUPLEX = 0x08,
};
#define mii_delay() dr8(PhyCtrl)
static void
mii_sendbit (struct net_device *dev, u32 data)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
dw8(PhyCtrl, data);
mii_delay ();
dw8(PhyCtrl, data | MII_CLK);
mii_delay ();
}
static int
mii_getbit (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
u8 data;
data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
dw8(PhyCtrl, data);
mii_delay ();
dw8(PhyCtrl, data | MII_CLK);
mii_delay ();
return (dr8(PhyCtrl) >> 1) & 1;
}
static void
mii_send_bits (struct net_device *dev, u32 data, int len)
{
int i;
for (i = len - 1; i >= 0; i--) {
mii_sendbit (dev, data & (1 << i));
}
}
static int
mii_read (struct net_device *dev, int phy_addr, int reg_num)
{
u32 cmd;
int i;
u32 retval = 0;
/* Preamble */
mii_send_bits (dev, 0xffffffff, 32);
/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
/* ST,OP = 0110'b for read operation */
cmd = (0x06 << 10 | phy_addr << 5 | reg_num);
mii_send_bits (dev, cmd, 14);
/* Turnaround */
if (mii_getbit (dev))
goto err_out;
/* Read data */
for (i = 0; i < 16; i++) {
retval |= mii_getbit (dev);
retval <<= 1;
}
/* End cycle */
mii_getbit (dev);
return (retval >> 1) & 0xffff;
err_out:
return 0;
}
static int
mii_write (struct net_device *dev, int phy_addr, int reg_num, u16 data)
{
u32 cmd;
/* Preamble */
mii_send_bits (dev, 0xffffffff, 32);
/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
/* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
cmd = (0x5002 << 16) | (phy_addr << 23) | (reg_num << 18) | data;
mii_send_bits (dev, cmd, 32);
/* End cycle */
mii_getbit (dev);
return 0;
}
static int
mii_wait_link (struct net_device *dev, int wait)
{
__u16 bmsr;
int phy_addr;
struct netdev_private *np;
np = netdev_priv(dev);
phy_addr = np->phy_addr;
do {
bmsr = mii_read (dev, phy_addr, MII_BMSR);
if (bmsr & BMSR_LSTATUS)
return 0;
mdelay (1);
} while (--wait > 0);
return -1;
}
static int
mii_get_media (struct net_device *dev)
{
__u16 negotiate;
__u16 bmsr;
__u16 mscr;
__u16 mssr;
int phy_addr;
struct netdev_private *np;
np = netdev_priv(dev);
phy_addr = np->phy_addr;
bmsr = mii_read (dev, phy_addr, MII_BMSR);
if (np->an_enable) {
if (!(bmsr & BMSR_ANEGCOMPLETE)) {
/* Auto-Negotiation not completed */
return -1;
}
negotiate = mii_read (dev, phy_addr, MII_ADVERTISE) &
mii_read (dev, phy_addr, MII_LPA);
mscr = mii_read (dev, phy_addr, MII_CTRL1000);
mssr = mii_read (dev, phy_addr, MII_STAT1000);
if (mscr & ADVERTISE_1000FULL && mssr & LPA_1000FULL) {
np->speed = 1000;
np->full_duplex = 1;
printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
} else if (mscr & ADVERTISE_1000HALF && mssr & LPA_1000HALF) {
np->speed = 1000;
np->full_duplex = 0;
printk (KERN_INFO "Auto 1000 Mbps, Half duplex\n");
} else if (negotiate & ADVERTISE_100FULL) {
np->speed = 100;
np->full_duplex = 1;
printk (KERN_INFO "Auto 100 Mbps, Full duplex\n");
} else if (negotiate & ADVERTISE_100HALF) {
np->speed = 100;
np->full_duplex = 0;
printk (KERN_INFO "Auto 100 Mbps, Half duplex\n");
} else if (negotiate & ADVERTISE_10FULL) {
np->speed = 10;
np->full_duplex = 1;
printk (KERN_INFO "Auto 10 Mbps, Full duplex\n");
} else if (negotiate & ADVERTISE_10HALF) {
np->speed = 10;
np->full_duplex = 0;
printk (KERN_INFO "Auto 10 Mbps, Half duplex\n");
}
if (negotiate & ADVERTISE_PAUSE_CAP) {
np->tx_flow &= 1;
np->rx_flow &= 1;
} else if (negotiate & ADVERTISE_PAUSE_ASYM) {
np->tx_flow = 0;
np->rx_flow &= 1;
}
/* else tx_flow, rx_flow = user select */
} else {
__u16 bmcr = mii_read (dev, phy_addr, MII_BMCR);
switch (bmcr & (BMCR_SPEED100 | BMCR_SPEED1000)) {
case BMCR_SPEED1000:
printk (KERN_INFO "Operating at 1000 Mbps, ");
break;
case BMCR_SPEED100:
printk (KERN_INFO "Operating at 100 Mbps, ");
break;
case 0:
printk (KERN_INFO "Operating at 10 Mbps, ");
}
if (bmcr & BMCR_FULLDPLX) {
printk (KERN_CONT "Full duplex\n");
} else {
printk (KERN_CONT "Half duplex\n");
}
}
if (np->tx_flow)
printk(KERN_INFO "Enable Tx Flow Control\n");
else
printk(KERN_INFO "Disable Tx Flow Control\n");
if (np->rx_flow)
printk(KERN_INFO "Enable Rx Flow Control\n");
else
printk(KERN_INFO "Disable Rx Flow Control\n");
return 0;
}
static int
mii_set_media (struct net_device *dev)
{
__u16 pscr;
__u16 bmcr;
__u16 bmsr;
__u16 anar;
int phy_addr;
struct netdev_private *np;
np = netdev_priv(dev);
phy_addr = np->phy_addr;
/* Does user set speed? */
if (np->an_enable) {
/* Advertise capabilities */
bmsr = mii_read (dev, phy_addr, MII_BMSR);
anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
~(ADVERTISE_100FULL | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_10HALF |
ADVERTISE_100BASE4);
if (bmsr & BMSR_100FULL)
anar |= ADVERTISE_100FULL;
if (bmsr & BMSR_100HALF)
anar |= ADVERTISE_100HALF;
if (bmsr & BMSR_100BASE4)
anar |= ADVERTISE_100BASE4;
if (bmsr & BMSR_10FULL)
anar |= ADVERTISE_10FULL;
if (bmsr & BMSR_10HALF)
anar |= ADVERTISE_10HALF;
anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
mii_write (dev, phy_addr, MII_ADVERTISE, anar);
/* Enable Auto crossover */
pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
pscr |= 3 << 5; /* 11'b */
mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
/* Soft reset PHY */
mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay(1);
} else {
/* Force speed setting */
/* 1) Disable Auto crossover */
pscr = mii_read (dev, phy_addr, MII_PHY_SCR);
pscr &= ~(3 << 5);
mii_write (dev, phy_addr, MII_PHY_SCR, pscr);
/* 2) PHY Reset */
bmcr = mii_read (dev, phy_addr, MII_BMCR);
bmcr |= BMCR_RESET;
mii_write (dev, phy_addr, MII_BMCR, bmcr);
/* 3) Power Down */
bmcr = 0x1940; /* must be 0x1940 */
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay (100); /* wait a certain time */
/* 4) Advertise nothing */
mii_write (dev, phy_addr, MII_ADVERTISE, 0);
/* 5) Set media and Power Up */
bmcr = BMCR_PDOWN;
if (np->speed == 100) {
bmcr |= BMCR_SPEED100;
printk (KERN_INFO "Manual 100 Mbps, ");
} else if (np->speed == 10) {
printk (KERN_INFO "Manual 10 Mbps, ");
}
if (np->full_duplex) {
bmcr |= BMCR_FULLDPLX;
printk (KERN_CONT "Full duplex\n");
} else {
printk (KERN_CONT "Half duplex\n");
}
#if 0
/* Set 1000BaseT Master/Slave setting */
mscr = mii_read (dev, phy_addr, MII_CTRL1000);
mscr |= MII_MSCR_CFG_ENABLE;
mscr &= ~MII_MSCR_CFG_VALUE = 0;
#endif
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay(10);
}
return 0;
}
static int
mii_get_media_pcs (struct net_device *dev)
{
__u16 negotiate;
__u16 bmsr;
int phy_addr;
struct netdev_private *np;
np = netdev_priv(dev);
phy_addr = np->phy_addr;
bmsr = mii_read (dev, phy_addr, PCS_BMSR);
if (np->an_enable) {
if (!(bmsr & BMSR_ANEGCOMPLETE)) {
/* Auto-Negotiation not completed */
return -1;
}
negotiate = mii_read (dev, phy_addr, PCS_ANAR) &
mii_read (dev, phy_addr, PCS_ANLPAR);
np->speed = 1000;
if (negotiate & PCS_ANAR_FULL_DUPLEX) {
printk (KERN_INFO "Auto 1000 Mbps, Full duplex\n");
np->full_duplex = 1;
} else {
printk (KERN_INFO "Auto 1000 Mbps, half duplex\n");
np->full_duplex = 0;
}
if (negotiate & PCS_ANAR_PAUSE) {
np->tx_flow &= 1;
np->rx_flow &= 1;
} else if (negotiate & PCS_ANAR_ASYMMETRIC) {
np->tx_flow = 0;
np->rx_flow &= 1;
}
/* else tx_flow, rx_flow = user select */
} else {
__u16 bmcr = mii_read (dev, phy_addr, PCS_BMCR);
printk (KERN_INFO "Operating at 1000 Mbps, ");
if (bmcr & BMCR_FULLDPLX) {
printk (KERN_CONT "Full duplex\n");
} else {
printk (KERN_CONT "Half duplex\n");
}
}
if (np->tx_flow)
printk(KERN_INFO "Enable Tx Flow Control\n");
else
printk(KERN_INFO "Disable Tx Flow Control\n");
if (np->rx_flow)
printk(KERN_INFO "Enable Rx Flow Control\n");
else
printk(KERN_INFO "Disable Rx Flow Control\n");
return 0;
}
static int
mii_set_media_pcs (struct net_device *dev)
{
__u16 bmcr;
__u16 esr;
__u16 anar;
int phy_addr;
struct netdev_private *np;
np = netdev_priv(dev);
phy_addr = np->phy_addr;
/* Auto-Negotiation? */
if (np->an_enable) {
/* Advertise capabilities */
esr = mii_read (dev, phy_addr, PCS_ESR);
anar = mii_read (dev, phy_addr, MII_ADVERTISE) &
~PCS_ANAR_HALF_DUPLEX &
~PCS_ANAR_FULL_DUPLEX;
if (esr & (MII_ESR_1000BT_HD | MII_ESR_1000BX_HD))
anar |= PCS_ANAR_HALF_DUPLEX;
if (esr & (MII_ESR_1000BT_FD | MII_ESR_1000BX_FD))
anar |= PCS_ANAR_FULL_DUPLEX;
anar |= PCS_ANAR_PAUSE | PCS_ANAR_ASYMMETRIC;
mii_write (dev, phy_addr, MII_ADVERTISE, anar);
/* Soft reset PHY */
mii_write (dev, phy_addr, MII_BMCR, BMCR_RESET);
bmcr = BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET;
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay(1);
} else {
/* Force speed setting */
/* PHY Reset */
bmcr = BMCR_RESET;
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay(10);
if (np->full_duplex) {
bmcr = BMCR_FULLDPLX;
printk (KERN_INFO "Manual full duplex\n");
} else {
bmcr = 0;
printk (KERN_INFO "Manual half duplex\n");
}
mii_write (dev, phy_addr, MII_BMCR, bmcr);
mdelay(10);
/* Advertise nothing */
mii_write (dev, phy_addr, MII_ADVERTISE, 0);
}
return 0;
}
static int
rio_close (struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->ioaddr;
struct pci_dev *pdev = np->pdev;
struct sk_buff *skb;
int i;
netif_stop_queue (dev);
/* Disable interrupts */
dw16(IntEnable, 0);
/* Stop Tx and Rx logics */
dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
free_irq(pdev->irq, dev);
del_timer_sync (&np->timer);
/* Free all the skbuffs in the queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
skb = np->rx_skbuff[i];
if (skb) {
pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
skb->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb (skb);
np->rx_skbuff[i] = NULL;
}
np->rx_ring[i].status = 0;
np->rx_ring[i].fraginfo = 0;
}
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
if (skb) {
pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb (skb);
np->tx_skbuff[i] = NULL;
}
}
return 0;
}
static void
rio_remove1 (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
if (dev) {
struct netdev_private *np = netdev_priv(dev);
unregister_netdev (dev);
pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
np->rx_ring_dma);
pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
np->tx_ring_dma);
#ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr);
#endif
pci_iounmap(pdev, np->eeprom_addr);
free_netdev (dev);
pci_release_regions (pdev);
pci_disable_device (pdev);
}
pci_set_drvdata (pdev, NULL);
}
static struct pci_driver rio_driver = {
.name = "dl2k",
.id_table = rio_pci_tbl,
.probe = rio_probe1,
.remove = rio_remove1,
};
module_pci_driver(rio_driver);
/*
Compile command:
gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
Read Documentation/networking/dl2k.txt for details.
*/
| gpl-2.0 |
AOSParadox/android_kernel_cyanogen_msm8916 | arch/powerpc/platforms/powermac/pic.c | 2242 | 17785 | /*
* Support for the interrupt controllers found on Power Macintosh,
* currently Apple's "Grand Central" interrupt controller in all
* it's incarnations. OpenPIC support used on newer machines is
* in a separate file
*
* Copyright (C) 1997 Paul Mackerras (paulus@samba.org)
* Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org)
* IBM, Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/syscore_ops.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/time.h>
#include <asm/pmac_feature.h>
#include <asm/mpic.h>
#include <asm/xmon.h>
#include "pmac.h"
#ifdef CONFIG_PPC32
struct pmac_irq_hw {
unsigned int event;
unsigned int enable;
unsigned int ack;
unsigned int level;
};
/* Workaround flags for 32bit powermac machines */
unsigned int of_irq_workarounds;
struct device_node *of_irq_dflt_pic;
/* Default addresses */
static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4];
static int max_irqs;
static int max_real_irqs;
static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
/* The max irq number this driver deals with is 128; see max_irqs */
static DECLARE_BITMAP(ppc_lost_interrupts, 128);
static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
static int pmac_irq_cascade = -1;
static struct irq_domain *pmac_pic_host;
static void __pmac_retrigger(unsigned int irq_nr)
{
if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) {
__set_bit(irq_nr, ppc_lost_interrupts);
irq_nr = pmac_irq_cascade;
mb();
}
if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) {
atomic_inc(&ppc_n_lost_interrupts);
set_dec(1);
}
}
static void pmac_mask_and_ack_irq(struct irq_data *d)
{
unsigned int src = irqd_to_hwirq(d);
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
unsigned long flags;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__clear_bit(src, ppc_cached_irq_mask);
if (__test_and_clear_bit(src, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
out_le32(&pmac_irq_hw[i]->ack, bit);
do {
/* make sure ack gets to controller before we enable
interrupts */
mb();
} while((in_le32(&pmac_irq_hw[i]->enable) & bit)
!= (ppc_cached_irq_mask[i] & bit));
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void pmac_ack_irq(struct irq_data *d)
{
unsigned int src = irqd_to_hwirq(d);
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
unsigned long flags;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
if (__test_and_clear_bit(src, ppc_lost_interrupts))
atomic_dec(&ppc_n_lost_interrupts);
out_le32(&pmac_irq_hw[i]->ack, bit);
(void)in_le32(&pmac_irq_hw[i]->ack);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost)
{
unsigned long bit = 1UL << (irq_nr & 0x1f);
int i = irq_nr >> 5;
if ((unsigned)irq_nr >= max_irqs)
return;
/* enable unmasked interrupts */
out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]);
do {
/* make sure mask gets to controller before we
return to user */
mb();
} while((in_le32(&pmac_irq_hw[i]->enable) & bit)
!= (ppc_cached_irq_mask[i] & bit));
/*
* Unfortunately, setting the bit in the enable register
* when the device interrupt is already on *doesn't* set
* the bit in the flag register or request another interrupt.
*/
if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level))
__pmac_retrigger(irq_nr);
}
/* When an irq gets requested for the first client, if it's an
* edge interrupt, we clear any previous one on the controller
*/
static unsigned int pmac_startup_irq(struct irq_data *d)
{
unsigned long flags;
unsigned int src = irqd_to_hwirq(d);
unsigned long bit = 1UL << (src & 0x1f);
int i = src >> 5;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
if (!irqd_is_level_type(d))
out_le32(&pmac_irq_hw[i]->ack, bit);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 0;
}
static void pmac_mask_irq(struct irq_data *d)
{
unsigned long flags;
unsigned int src = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__clear_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 1);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static void pmac_unmask_irq(struct irq_data *d)
{
unsigned long flags;
unsigned int src = irqd_to_hwirq(d);
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__set_bit(src, ppc_cached_irq_mask);
__pmac_set_irq_mask(src, 0);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
}
static int pmac_retrigger(struct irq_data *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
__pmac_retrigger(irqd_to_hwirq(d));
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return 1;
}
static struct irq_chip pmac_pic = {
.name = "PMAC-PIC",
.irq_startup = pmac_startup_irq,
.irq_mask = pmac_mask_irq,
.irq_ack = pmac_ack_irq,
.irq_mask_ack = pmac_mask_and_ack_irq,
.irq_unmask = pmac_unmask_irq,
.irq_retrigger = pmac_retrigger,
};
static irqreturn_t gatwick_action(int cpl, void *dev_id)
{
unsigned long flags;
int irq, bits;
int rc = IRQ_NONE;
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
bits |= in_le32(&pmac_irq_hw[i]->level);
bits &= ppc_cached_irq_mask[i];
if (bits == 0)
continue;
irq += __ilog2(bits);
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
generic_handle_irq(irq);
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
rc = IRQ_HANDLED;
}
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
return rc;
}
static unsigned int pmac_pic_get_irq(void)
{
int irq;
unsigned long bits = 0;
unsigned long flags;
#ifdef CONFIG_PPC_PMAC32_PSURGE
/* IPI's are a hack on the powersurge -- Cort */
if (smp_processor_id() != 0) {
return psurge_secondary_virq;
}
#endif /* CONFIG_PPC_PMAC32_PSURGE */
raw_spin_lock_irqsave(&pmac_pic_lock, flags);
for (irq = max_real_irqs; (irq -= 32) >= 0; ) {
int i = irq >> 5;
bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i];
bits |= in_le32(&pmac_irq_hw[i]->level);
bits &= ppc_cached_irq_mask[i];
if (bits == 0)
continue;
irq += __ilog2(bits);
break;
}
raw_spin_unlock_irqrestore(&pmac_pic_lock, flags);
if (unlikely(irq < 0))
return NO_IRQ;
return irq_linear_revmap(pmac_pic_host, irq);
}
#ifdef CONFIG_XMON
static struct irqaction xmon_action = {
.handler = xmon_irq,
.flags = 0,
.name = "NMI - XMON"
};
#endif
static struct irqaction gatwick_cascade_action = {
.handler = gatwick_action,
.name = "cascade",
};
static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
{
/* We match all, we don't always have a node anyway */
return 1;
}
static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
if (hw >= max_irqs)
return -EINVAL;
/* Mark level interrupts, set delayed disable for edge ones and set
* handlers
*/
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops pmac_pic_host_ops = {
.match = pmac_pic_host_match,
.map = pmac_pic_host_map,
.xlate = irq_domain_xlate_onecell,
};
static void __init pmac_pic_probe_oldstyle(void)
{
int i;
struct device_node *master = NULL;
struct device_node *slave = NULL;
u8 __iomem *addr;
struct resource r;
/* Set our get_irq function */
ppc_md.get_irq = pmac_pic_get_irq;
/*
* Find the interrupt controller type & node
*/
if ((master = of_find_node_by_name(NULL, "gc")) != NULL) {
max_irqs = max_real_irqs = 32;
} else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) {
max_irqs = max_real_irqs = 32;
/* We might have a second cascaded ohare */
slave = of_find_node_by_name(NULL, "pci106b,7");
if (slave)
max_irqs = 64;
} else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) {
max_irqs = max_real_irqs = 64;
/* We might have a second cascaded heathrow */
slave = of_find_node_by_name(master, "mac-io");
/* Check ordering of master & slave */
if (of_device_is_compatible(master, "gatwick")) {
struct device_node *tmp;
BUG_ON(slave == NULL);
tmp = master;
master = slave;
slave = tmp;
}
/* We found a slave */
if (slave)
max_irqs = 128;
}
BUG_ON(master == NULL);
/*
* Allocate an irq host
*/
pmac_pic_host = irq_domain_add_linear(master, max_irqs,
&pmac_pic_host_ops, NULL);
BUG_ON(pmac_pic_host == NULL);
irq_set_default_host(pmac_pic_host);
/* Get addresses of first controller if we have a node for it */
BUG_ON(of_address_to_resource(master, 0, &r));
/* Map interrupts of primary controller */
addr = (u8 __iomem *) ioremap(r.start, 0x40);
i = 0;
pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
(addr + 0x20);
if (max_real_irqs > 32)
pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
(addr + 0x10);
of_node_put(master);
printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n",
master->full_name, max_real_irqs);
/* Map interrupts of cascaded controller */
if (slave && !of_address_to_resource(slave, 0, &r)) {
addr = (u8 __iomem *)ioremap(r.start, 0x40);
pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *)
(addr + 0x20);
if (max_irqs > 64)
pmac_irq_hw[i++] =
(volatile struct pmac_irq_hw __iomem *)
(addr + 0x10);
pmac_irq_cascade = irq_of_parse_and_map(slave, 0);
printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs"
" cascade: %d\n", slave->full_name,
max_irqs - max_real_irqs, pmac_irq_cascade);
}
of_node_put(slave);
/* Disable all interrupts in all controllers */
for (i = 0; i * 32 < max_irqs; ++i)
out_le32(&pmac_irq_hw[i]->enable, 0);
/* Hookup cascade irq */
if (slave && pmac_irq_cascade != NO_IRQ)
setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
#ifdef CONFIG_XMON
setup_irq(irq_create_mapping(NULL, 20), &xmon_action);
#endif
}
int of_irq_map_oldworld(struct device_node *device, int index,
struct of_irq *out_irq)
{
const u32 *ints = NULL;
int intlen;
/*
* Old machines just have a list of interrupt numbers
* and no interrupt-controller nodes. We also have dodgy
* cases where the APPL,interrupts property is completely
* missing behind pci-pci bridges and we have to get it
* from the parent (the bridge itself, as apple just wired
* everything together on these)
*/
while (device) {
ints = of_get_property(device, "AAPL,interrupts", &intlen);
if (ints != NULL)
break;
device = device->parent;
if (device && strcmp(device->type, "pci") != 0)
break;
}
if (ints == NULL)
return -EINVAL;
intlen /= sizeof(u32);
if (index >= intlen)
return -EINVAL;
out_irq->controller = NULL;
out_irq->specifier[0] = ints[index];
out_irq->size = 1;
return 0;
}
#endif /* CONFIG_PPC32 */
static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
{
#if defined(CONFIG_XMON) && defined(CONFIG_PPC32)
struct device_node* pswitch;
int nmi_irq;
pswitch = of_find_node_by_name(NULL, "programmer-switch");
if (pswitch) {
nmi_irq = irq_of_parse_and_map(pswitch, 0);
if (nmi_irq != NO_IRQ) {
mpic_irq_set_priority(nmi_irq, 9);
setup_irq(nmi_irq, &xmon_action);
}
of_node_put(pswitch);
}
#endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */
}
static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
int master)
{
const char *name = master ? " MPIC 1 " : " MPIC 2 ";
struct mpic *mpic;
unsigned int flags = master ? 0 : MPIC_SECONDARY;
pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
if (of_get_property(np, "big-endian", NULL))
flags |= MPIC_BIG_ENDIAN;
/* Primary Big Endian means HT interrupts. This is quite dodgy
* but works until I find a better way
*/
if (master && (flags & MPIC_BIG_ENDIAN))
flags |= MPIC_U3_HT_IRQS;
mpic = mpic_alloc(np, 0, flags, 0, 0, name);
if (mpic == NULL)
return NULL;
mpic_init(mpic);
return mpic;
}
static int __init pmac_pic_probe_mpic(void)
{
struct mpic *mpic1, *mpic2;
struct device_node *np, *master = NULL, *slave = NULL;
/* We can have up to 2 MPICs cascaded */
for (np = NULL; (np = of_find_node_by_type(np, "open-pic"))
!= NULL;) {
if (master == NULL &&
of_get_property(np, "interrupts", NULL) == NULL)
master = of_node_get(np);
else if (slave == NULL)
slave = of_node_get(np);
if (master && slave)
break;
}
/* Check for bogus setups */
if (master == NULL && slave != NULL) {
master = slave;
slave = NULL;
}
/* Not found, default to good old pmac pic */
if (master == NULL)
return -ENODEV;
/* Set master handler */
ppc_md.get_irq = mpic_get_irq;
/* Setup master */
mpic1 = pmac_setup_one_mpic(master, 1);
BUG_ON(mpic1 == NULL);
/* Install NMI if any */
pmac_pic_setup_mpic_nmi(mpic1);
of_node_put(master);
/* Set up a cascaded controller, if present */
if (slave) {
mpic2 = pmac_setup_one_mpic(slave, 0);
if (mpic2 == NULL)
printk(KERN_ERR "Failed to setup slave MPIC\n");
of_node_put(slave);
}
return 0;
}
void __init pmac_pic_init(void)
{
/* We configure the OF parsing based on our oldworld vs. newworld
* platform type and whether we were booted by BootX.
*/
#ifdef CONFIG_PPC32
if (!pmac_newworld)
of_irq_workarounds |= OF_IMAP_OLDWORLD_MAC;
if (of_get_property(of_chosen, "linux,bootx", NULL) != NULL)
of_irq_workarounds |= OF_IMAP_NO_PHANDLE;
/* If we don't have phandles on a newworld, then try to locate a
* default interrupt controller (happens when booting with BootX).
* We do a first match here, hopefully, that only ever happens on
* machines with one controller.
*/
if (pmac_newworld && (of_irq_workarounds & OF_IMAP_NO_PHANDLE)) {
struct device_node *np;
for_each_node_with_property(np, "interrupt-controller") {
/* Skip /chosen/interrupt-controller */
if (strcmp(np->name, "chosen") == 0)
continue;
/* It seems like at least one person wants
* to use BootX on a machine with an AppleKiwi
* controller which happens to pretend to be an
* interrupt controller too. */
if (strcmp(np->name, "AppleKiwi") == 0)
continue;
/* I think we found one ! */
of_irq_dflt_pic = np;
break;
}
}
#endif /* CONFIG_PPC32 */
/* We first try to detect Apple's new Core99 chipset, since mac-io
* is quite different on those machines and contains an IBM MPIC2.
*/
if (pmac_pic_probe_mpic() == 0)
return;
#ifdef CONFIG_PPC32
pmac_pic_probe_oldstyle();
#endif
}
#if defined(CONFIG_PM) && defined(CONFIG_PPC32)
/*
* These procedures are used in implementing sleep on the powerbooks.
* sleep_save_intrs() saves the states of all interrupt enables
* and disables all interrupts except for the nominated one.
* sleep_restore_intrs() restores the states of all interrupt enables.
*/
unsigned long sleep_save_mask[2];
/* This used to be passed by the PMU driver but that link got
* broken with the new driver model. We use this tweak for now...
* We really want to do things differently though...
*/
static int pmacpic_find_viaint(void)
{
int viaint = -1;
#ifdef CONFIG_ADB_PMU
struct device_node *np;
if (pmu_get_model() != PMU_OHARE_BASED)
goto not_found;
np = of_find_node_by_name(NULL, "via-pmu");
if (np == NULL)
goto not_found;
viaint = irq_of_parse_and_map(np, 0);
not_found:
#endif /* CONFIG_ADB_PMU */
return viaint;
}
static int pmacpic_suspend(void)
{
int viaint = pmacpic_find_viaint();
sleep_save_mask[0] = ppc_cached_irq_mask[0];
sleep_save_mask[1] = ppc_cached_irq_mask[1];
ppc_cached_irq_mask[0] = 0;
ppc_cached_irq_mask[1] = 0;
if (viaint > 0)
set_bit(viaint, ppc_cached_irq_mask);
out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]);
if (max_real_irqs > 32)
out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]);
(void)in_le32(&pmac_irq_hw[0]->event);
/* make sure mask gets to controller before we return to caller */
mb();
(void)in_le32(&pmac_irq_hw[0]->enable);
return 0;
}
static void pmacpic_resume(void)
{
int i;
out_le32(&pmac_irq_hw[0]->enable, 0);
if (max_real_irqs > 32)
out_le32(&pmac_irq_hw[1]->enable, 0);
mb();
for (i = 0; i < max_real_irqs; ++i)
if (test_bit(i, sleep_save_mask))
pmac_unmask_irq(irq_get_irq_data(i));
}
static struct syscore_ops pmacpic_syscore_ops = {
.suspend = pmacpic_suspend,
.resume = pmacpic_resume,
};
static int __init init_pmacpic_syscore(void)
{
if (pmac_irq_hw[0])
register_syscore_ops(&pmacpic_syscore_ops);
return 0;
}
machine_subsys_initcall(powermac, init_pmacpic_syscore);
#endif /* CONFIG_PM && CONFIG_PPC32 */
| gpl-2.0 |
371816210/kk44 | fs/ext3/symlink.c | 4290 | 1397 | /*
* linux/fs/ext3/symlink.c
*
* Only fast symlinks left here - the rest is done by generic code. AV, 1999
*
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
*
* from
*
* linux/fs/minix/symlink.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* ext3 symlink handling code
*/
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/ext3_fs.h>
#include <linux/namei.h>
#include "xattr.h"
static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct ext3_inode_info *ei = EXT3_I(dentry->d_inode);
nd_set_link(nd, (char*)ei->i_data);
return NULL;
}
const struct inode_operations ext3_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.setattr = ext3_setattr,
#ifdef CONFIG_EXT3_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext3_listxattr,
.removexattr = generic_removexattr,
#endif
};
const struct inode_operations ext3_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ext3_follow_link,
.setattr = ext3_setattr,
#ifdef CONFIG_EXT3_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
.listxattr = ext3_listxattr,
.removexattr = generic_removexattr,
#endif
};
| gpl-2.0 |
CyanogenMod/android_kernel_bn_acclaim | net/rose/rose_in.c | 5058 | 7665 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*
* Most of this code is based on the SDL diagrams published in the 7th ARRL
* Computer Networking Conference papers. The diagrams have mistakes in them,
* but are mostly correct. Before you modify the code could you read the SDL
* diagrams as the code is not obvious and probably very easy to break.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/rose.h>
/*
* State machine for state 1, Awaiting Call Accepted State.
* The handling of the timer(s) is in file rose_timer.c.
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
switch (frametype) {
case ROSE_CALL_ACCEPTED:
rose_stop_timer(sk);
rose_start_idletimer(sk);
rose->condition = 0x00;
rose->vs = 0;
rose->va = 0;
rose->vr = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
sk->sk_state = TCP_ESTABLISHED;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
break;
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
rose->neighbour->use--;
break;
default:
break;
}
return 0;
}
/*
* State machine for state 2, Awaiting Clear Confirmation State.
* The handling of the timer(s) is in file rose_timer.c
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
switch (frametype) {
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
rose->neighbour->use--;
break;
case ROSE_CLEAR_CONFIRMATION:
rose_disconnect(sk, 0, -1, -1);
rose->neighbour->use--;
break;
default:
break;
}
return 0;
}
/*
* State machine for state 3, Connected State.
* The handling of the timer(s) is in file rose_timer.c
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
{
struct rose_sock *rose = rose_sk(sk);
int queued = 0;
switch (frametype) {
case ROSE_RESET_REQUEST:
rose_stop_timer(sk);
rose_start_idletimer(sk);
rose_write_internal(sk, ROSE_RESET_CONFIRMATION);
rose->condition = 0x00;
rose->vs = 0;
rose->vr = 0;
rose->va = 0;
rose->vl = 0;
rose_requeue_frames(sk);
break;
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
rose->neighbour->use--;
break;
case ROSE_RR:
case ROSE_RNR:
if (!rose_validate_nr(sk, nr)) {
rose_write_internal(sk, ROSE_RESET_REQUEST);
rose->condition = 0x00;
rose->vs = 0;
rose->vr = 0;
rose->va = 0;
rose->vl = 0;
rose->state = ROSE_STATE_4;
rose_start_t2timer(sk);
rose_stop_idletimer(sk);
} else {
rose_frames_acked(sk, nr);
if (frametype == ROSE_RNR) {
rose->condition |= ROSE_COND_PEER_RX_BUSY;
} else {
rose->condition &= ~ROSE_COND_PEER_RX_BUSY;
}
}
break;
case ROSE_DATA: /* XXX */
rose->condition &= ~ROSE_COND_PEER_RX_BUSY;
if (!rose_validate_nr(sk, nr)) {
rose_write_internal(sk, ROSE_RESET_REQUEST);
rose->condition = 0x00;
rose->vs = 0;
rose->vr = 0;
rose->va = 0;
rose->vl = 0;
rose->state = ROSE_STATE_4;
rose_start_t2timer(sk);
rose_stop_idletimer(sk);
break;
}
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
if (sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;
} else {
/* Should never happen ! */
rose_write_internal(sk, ROSE_RESET_REQUEST);
rose->condition = 0x00;
rose->vs = 0;
rose->vr = 0;
rose->va = 0;
rose->vl = 0;
rose->state = ROSE_STATE_4;
rose_start_t2timer(sk);
rose_stop_idletimer(sk);
break;
}
if (atomic_read(&sk->sk_rmem_alloc) >
(sk->sk_rcvbuf >> 1))
rose->condition |= ROSE_COND_OWN_RX_BUSY;
}
/*
* If the window is full, ack the frame, else start the
* acknowledge hold back timer.
*/
if (((rose->vl + sysctl_rose_window_size) % ROSE_MODULUS) == rose->vr) {
rose->condition &= ~ROSE_COND_ACK_PENDING;
rose_stop_timer(sk);
rose_enquiry_response(sk);
} else {
rose->condition |= ROSE_COND_ACK_PENDING;
rose_start_hbtimer(sk);
}
break;
default:
printk(KERN_WARNING "ROSE: unknown %02X in state 3\n", frametype);
break;
}
return queued;
}
/*
* State machine for state 4, Awaiting Reset Confirmation State.
* The handling of the timer(s) is in file rose_timer.c
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
struct rose_sock *rose = rose_sk(sk);
switch (frametype) {
case ROSE_RESET_REQUEST:
rose_write_internal(sk, ROSE_RESET_CONFIRMATION);
case ROSE_RESET_CONFIRMATION:
rose_stop_timer(sk);
rose_start_idletimer(sk);
rose->condition = 0x00;
rose->va = 0;
rose->vr = 0;
rose->vs = 0;
rose->vl = 0;
rose->state = ROSE_STATE_3;
rose_requeue_frames(sk);
break;
case ROSE_CLEAR_REQUEST:
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
rose->neighbour->use--;
break;
default:
break;
}
return 0;
}
/*
* State machine for state 5, Awaiting Call Acceptance State.
* The handling of the timer(s) is in file rose_timer.c
* Handling of state 0 and connection release is in af_rose.c.
*/
static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
{
if (frametype == ROSE_CLEAR_REQUEST) {
rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
rose_sk(sk)->neighbour->use--;
}
return 0;
}
/* Higher level upcall for a LAPB frame */
int rose_process_rx_frame(struct sock *sk, struct sk_buff *skb)
{
struct rose_sock *rose = rose_sk(sk);
int queued = 0, frametype, ns, nr, q, d, m;
if (rose->state == ROSE_STATE_0)
return 0;
frametype = rose_decode(skb, &ns, &nr, &q, &d, &m);
switch (rose->state) {
case ROSE_STATE_1:
queued = rose_state1_machine(sk, skb, frametype);
break;
case ROSE_STATE_2:
queued = rose_state2_machine(sk, skb, frametype);
break;
case ROSE_STATE_3:
queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
break;
case ROSE_STATE_4:
queued = rose_state4_machine(sk, skb, frametype);
break;
case ROSE_STATE_5:
queued = rose_state5_machine(sk, skb, frametype);
break;
}
rose_kick(sk);
return queued;
}
| gpl-2.0 |
sub77-bkp/android_kernel_samsung_matissewifi | drivers/mfd/stmpe-i2c.c | 5058 | 2647 | /*
* ST Microelectronics MFD: stmpe's i2c client specific driver
*
* Copyright (C) ST-Ericsson SA 2010
* Copyright (C) ST Microelectronics SA 2011
*
* License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
* Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
*/
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include "stmpe.h"
static int i2c_reg_read(struct stmpe *stmpe, u8 reg)
{
struct i2c_client *i2c = stmpe->client;
return i2c_smbus_read_byte_data(i2c, reg);
}
static int i2c_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
{
struct i2c_client *i2c = stmpe->client;
return i2c_smbus_write_byte_data(i2c, reg, val);
}
static int i2c_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values)
{
struct i2c_client *i2c = stmpe->client;
return i2c_smbus_read_i2c_block_data(i2c, reg, length, values);
}
static int i2c_block_write(struct stmpe *stmpe, u8 reg, u8 length,
const u8 *values)
{
struct i2c_client *i2c = stmpe->client;
return i2c_smbus_write_i2c_block_data(i2c, reg, length, values);
}
static struct stmpe_client_info i2c_ci = {
.read_byte = i2c_reg_read,
.write_byte = i2c_reg_write,
.read_block = i2c_block_read,
.write_block = i2c_block_write,
};
static int __devinit
stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
i2c_ci.data = (void *)id;
i2c_ci.irq = i2c->irq;
i2c_ci.client = i2c;
i2c_ci.dev = &i2c->dev;
return stmpe_probe(&i2c_ci, id->driver_data);
}
static int __devexit stmpe_i2c_remove(struct i2c_client *i2c)
{
struct stmpe *stmpe = dev_get_drvdata(&i2c->dev);
return stmpe_remove(stmpe);
}
static const struct i2c_device_id stmpe_i2c_id[] = {
{ "stmpe610", STMPE610 },
{ "stmpe801", STMPE801 },
{ "stmpe811", STMPE811 },
{ "stmpe1601", STMPE1601 },
{ "stmpe2401", STMPE2401 },
{ "stmpe2403", STMPE2403 },
{ }
};
MODULE_DEVICE_TABLE(i2c, stmpe_id);
static struct i2c_driver stmpe_i2c_driver = {
.driver.name = "stmpe-i2c",
.driver.owner = THIS_MODULE,
#ifdef CONFIG_PM
.driver.pm = &stmpe_dev_pm_ops,
#endif
.probe = stmpe_i2c_probe,
.remove = __devexit_p(stmpe_i2c_remove),
.id_table = stmpe_i2c_id,
};
static int __init stmpe_init(void)
{
return i2c_add_driver(&stmpe_i2c_driver);
}
subsys_initcall(stmpe_init);
static void __exit stmpe_exit(void)
{
i2c_del_driver(&stmpe_i2c_driver);
}
module_exit(stmpe_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMPE MFD I2C Interface Driver");
MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
| gpl-2.0 |
mzueger/linux-phycore-mpc5200b | drivers/gpu/drm/drm_ioc32.c | 195 | 32632 | /**
* \file drm_ioc32.c
*
* 32-bit ioctl compatibility routines for the DRM.
*
* \author Paul Mackerras <paulus@samba.org>
*
* Copyright (C) Paul Mackerras 2005.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <linux/ratelimit.h>
#include <linux/export.h>
#include "drmP.h"
#include "drm_core.h"
#define DRM_IOCTL_VERSION32 DRM_IOWR(0x00, drm_version32_t)
#define DRM_IOCTL_GET_UNIQUE32 DRM_IOWR(0x01, drm_unique32_t)
#define DRM_IOCTL_GET_MAP32 DRM_IOWR(0x04, drm_map32_t)
#define DRM_IOCTL_GET_CLIENT32 DRM_IOWR(0x05, drm_client32_t)
#define DRM_IOCTL_GET_STATS32 DRM_IOR( 0x06, drm_stats32_t)
#define DRM_IOCTL_SET_UNIQUE32 DRM_IOW( 0x10, drm_unique32_t)
#define DRM_IOCTL_ADD_MAP32 DRM_IOWR(0x15, drm_map32_t)
#define DRM_IOCTL_ADD_BUFS32 DRM_IOWR(0x16, drm_buf_desc32_t)
#define DRM_IOCTL_MARK_BUFS32 DRM_IOW( 0x17, drm_buf_desc32_t)
#define DRM_IOCTL_INFO_BUFS32 DRM_IOWR(0x18, drm_buf_info32_t)
#define DRM_IOCTL_MAP_BUFS32 DRM_IOWR(0x19, drm_buf_map32_t)
#define DRM_IOCTL_FREE_BUFS32 DRM_IOW( 0x1a, drm_buf_free32_t)
#define DRM_IOCTL_RM_MAP32 DRM_IOW( 0x1b, drm_map32_t)
#define DRM_IOCTL_SET_SAREA_CTX32 DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
#define DRM_IOCTL_GET_SAREA_CTX32 DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
#define DRM_IOCTL_RES_CTX32 DRM_IOWR(0x26, drm_ctx_res32_t)
#define DRM_IOCTL_DMA32 DRM_IOWR(0x29, drm_dma32_t)
#define DRM_IOCTL_AGP_ENABLE32 DRM_IOW( 0x32, drm_agp_mode32_t)
#define DRM_IOCTL_AGP_INFO32 DRM_IOR( 0x33, drm_agp_info32_t)
#define DRM_IOCTL_AGP_ALLOC32 DRM_IOWR(0x34, drm_agp_buffer32_t)
#define DRM_IOCTL_AGP_FREE32 DRM_IOW( 0x35, drm_agp_buffer32_t)
#define DRM_IOCTL_AGP_BIND32 DRM_IOW( 0x36, drm_agp_binding32_t)
#define DRM_IOCTL_AGP_UNBIND32 DRM_IOW( 0x37, drm_agp_binding32_t)
#define DRM_IOCTL_SG_ALLOC32 DRM_IOW( 0x38, drm_scatter_gather32_t)
#define DRM_IOCTL_SG_FREE32 DRM_IOW( 0x39, drm_scatter_gather32_t)
#define DRM_IOCTL_UPDATE_DRAW32 DRM_IOW( 0x3f, drm_update_draw32_t)
#define DRM_IOCTL_WAIT_VBLANK32 DRM_IOWR(0x3a, drm_wait_vblank32_t)
typedef struct drm_version_32 {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
u32 name_len; /**< Length of name buffer */
u32 name; /**< Name of driver */
u32 date_len; /**< Length of date buffer */
u32 date; /**< User-space buffer to hold date */
u32 desc_len; /**< Length of desc buffer */
u32 desc; /**< User-space buffer to hold desc */
} drm_version32_t;
static int compat_drm_version(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_version32_t v32;
struct drm_version __user *version;
int err;
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
return -EFAULT;
version = compat_alloc_user_space(sizeof(*version));
if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
return -EFAULT;
if (__put_user(v32.name_len, &version->name_len)
|| __put_user((void __user *)(unsigned long)v32.name,
&version->name)
|| __put_user(v32.date_len, &version->date_len)
|| __put_user((void __user *)(unsigned long)v32.date,
&version->date)
|| __put_user(v32.desc_len, &version->desc_len)
|| __put_user((void __user *)(unsigned long)v32.desc,
&version->desc))
return -EFAULT;
err = drm_ioctl(file,
DRM_IOCTL_VERSION, (unsigned long)version);
if (err)
return err;
if (__get_user(v32.version_major, &version->version_major)
|| __get_user(v32.version_minor, &version->version_minor)
|| __get_user(v32.version_patchlevel, &version->version_patchlevel)
|| __get_user(v32.name_len, &version->name_len)
|| __get_user(v32.date_len, &version->date_len)
|| __get_user(v32.desc_len, &version->desc_len))
return -EFAULT;
if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
return -EFAULT;
return 0;
}
typedef struct drm_unique32 {
u32 unique_len; /**< Length of unique */
u32 unique; /**< Unique name for driver instantiation */
} drm_unique32_t;
static int compat_drm_getunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_unique32_t uq32;
struct drm_unique __user *u;
int err;
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
u = compat_alloc_user_space(sizeof(*u));
if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
return -EFAULT;
if (__put_user(uq32.unique_len, &u->unique_len)
|| __put_user((void __user *)(unsigned long)uq32.unique,
&u->unique))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
if (err)
return err;
if (__get_user(uq32.unique_len, &u->unique_len))
return -EFAULT;
if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
return -EFAULT;
return 0;
}
static int compat_drm_setunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_unique32_t uq32;
struct drm_unique __user *u;
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
u = compat_alloc_user_space(sizeof(*u));
if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
return -EFAULT;
if (__put_user(uq32.unique_len, &u->unique_len)
|| __put_user((void __user *)(unsigned long)uq32.unique,
&u->unique))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
}
typedef struct drm_map32 {
u32 offset; /**< Requested physical address (0 for SAREA)*/
u32 size; /**< Requested physical size (bytes) */
enum drm_map_type type; /**< Type of memory to map */
enum drm_map_flags flags; /**< Flags */
u32 handle; /**< User-space: "Handle" to pass to mmap() */
int mtrr; /**< MTRR slot used */
} drm_map32_t;
static int compat_drm_getmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
struct drm_map __user *map;
int idx, err;
void *handle;
if (get_user(idx, &argp->offset))
return -EFAULT;
map = compat_alloc_user_space(sizeof(*map));
if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
return -EFAULT;
if (__put_user(idx, &map->offset))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
if (err)
return err;
if (__get_user(m32.offset, &map->offset)
|| __get_user(m32.size, &map->size)
|| __get_user(m32.type, &map->type)
|| __get_user(m32.flags, &map->flags)
|| __get_user(handle, &map->handle)
|| __get_user(m32.mtrr, &map->mtrr))
return -EFAULT;
m32.handle = (unsigned long)handle;
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
return 0;
}
static int compat_drm_addmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
struct drm_map __user *map;
int err;
void *handle;
if (copy_from_user(&m32, argp, sizeof(m32)))
return -EFAULT;
map = compat_alloc_user_space(sizeof(*map));
if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
return -EFAULT;
if (__put_user(m32.offset, &map->offset)
|| __put_user(m32.size, &map->size)
|| __put_user(m32.type, &map->type)
|| __put_user(m32.flags, &map->flags))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
if (err)
return err;
if (__get_user(m32.offset, &map->offset)
|| __get_user(m32.mtrr, &map->mtrr)
|| __get_user(handle, &map->handle))
return -EFAULT;
m32.handle = (unsigned long)handle;
if (m32.handle != (unsigned long)handle)
printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
" %p for type %d offset %x\n",
handle, m32.type, m32.offset);
if (copy_to_user(argp, &m32, sizeof(m32)))
return -EFAULT;
return 0;
}
static int compat_drm_rmmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
struct drm_map __user *map;
u32 handle;
if (get_user(handle, &argp->handle))
return -EFAULT;
map = compat_alloc_user_space(sizeof(*map));
if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
return -EFAULT;
if (__put_user((void *)(unsigned long)handle, &map->handle))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
}
typedef struct drm_client32 {
int idx; /**< Which client desired? */
int auth; /**< Is client authenticated? */
u32 pid; /**< Process ID */
u32 uid; /**< User ID */
u32 magic; /**< Magic */
u32 iocs; /**< Ioctl count */
} drm_client32_t;
static int compat_drm_getclient(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_client32_t c32;
drm_client32_t __user *argp = (void __user *)arg;
struct drm_client __user *client;
int idx, err;
if (get_user(idx, &argp->idx))
return -EFAULT;
client = compat_alloc_user_space(sizeof(*client));
if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
return -EFAULT;
if (__put_user(idx, &client->idx))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
if (err)
return err;
if (__get_user(c32.auth, &client->auth)
|| __get_user(c32.pid, &client->pid)
|| __get_user(c32.uid, &client->uid)
|| __get_user(c32.magic, &client->magic)
|| __get_user(c32.iocs, &client->iocs))
return -EFAULT;
if (copy_to_user(argp, &c32, sizeof(c32)))
return -EFAULT;
return 0;
}
typedef struct drm_stats32 {
u32 count;
struct {
u32 value;
enum drm_stat_type type;
} data[15];
} drm_stats32_t;
static int compat_drm_getstats(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_stats32_t s32;
drm_stats32_t __user *argp = (void __user *)arg;
struct drm_stats __user *stats;
int i, err;
stats = compat_alloc_user_space(sizeof(*stats));
if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
if (err)
return err;
if (__get_user(s32.count, &stats->count))
return -EFAULT;
for (i = 0; i < 15; ++i)
if (__get_user(s32.data[i].value, &stats->data[i].value)
|| __get_user(s32.data[i].type, &stats->data[i].type))
return -EFAULT;
if (copy_to_user(argp, &s32, sizeof(s32)))
return -EFAULT;
return 0;
}
typedef struct drm_buf_desc32 {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
int flags;
u32 agp_start; /**< Start address in the AGP aperture */
} drm_buf_desc32_t;
static int compat_drm_addbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc32_t __user *argp = (void __user *)arg;
struct drm_buf_desc __user *buf;
int err;
unsigned long agp_start;
buf = compat_alloc_user_space(sizeof(*buf));
if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
|| !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
return -EFAULT;
if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
|| __get_user(agp_start, &argp->agp_start)
|| __put_user(agp_start, &buf->agp_start))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
if (err)
return err;
if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
|| __get_user(agp_start, &buf->agp_start)
|| __put_user(agp_start, &argp->agp_start))
return -EFAULT;
return 0;
}
static int compat_drm_markbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc32_t b32;
drm_buf_desc32_t __user *argp = (void __user *)arg;
struct drm_buf_desc __user *buf;
if (copy_from_user(&b32, argp, sizeof(b32)))
return -EFAULT;
buf = compat_alloc_user_space(sizeof(*buf));
if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
return -EFAULT;
if (__put_user(b32.size, &buf->size)
|| __put_user(b32.low_mark, &buf->low_mark)
|| __put_user(b32.high_mark, &buf->high_mark))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
}
typedef struct drm_buf_info32 {
int count; /**< Entries in list */
u32 list;
} drm_buf_info32_t;
static int compat_drm_infobufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_info32_t req32;
drm_buf_info32_t __user *argp = (void __user *)arg;
drm_buf_desc32_t __user *to;
struct drm_buf_info __user *request;
struct drm_buf_desc __user *list;
size_t nbytes;
int i, err;
int count, actual;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
count = req32.count;
to = (drm_buf_desc32_t __user *) (unsigned long)req32.list;
if (count < 0)
count = 0;
if (count > 0
&& !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
return -EFAULT;
nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
request = compat_alloc_user_space(nbytes);
if (!access_ok(VERIFY_WRITE, request, nbytes))
return -EFAULT;
list = (struct drm_buf_desc *) (request + 1);
if (__put_user(count, &request->count)
|| __put_user(list, &request->list))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
if (err)
return err;
if (__get_user(actual, &request->count))
return -EFAULT;
if (count >= actual)
for (i = 0; i < actual; ++i)
if (__copy_in_user(&to[i], &list[i],
offsetof(struct drm_buf_desc, flags)))
return -EFAULT;
if (__put_user(actual, &argp->count))
return -EFAULT;
return 0;
}
typedef struct drm_buf_pub32 {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
u32 address; /**< Address of buffer */
} drm_buf_pub32_t;
typedef struct drm_buf_map32 {
int count; /**< Length of the buffer list */
u32 virtual; /**< Mmap'd area in user-virtual */
u32 list; /**< Buffer information */
} drm_buf_map32_t;
static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_map32_t __user *argp = (void __user *)arg;
drm_buf_map32_t req32;
drm_buf_pub32_t __user *list32;
struct drm_buf_map __user *request;
struct drm_buf_pub __user *list;
int i, err;
int count, actual;
size_t nbytes;
void __user *addr;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
count = req32.count;
list32 = (void __user *)(unsigned long)req32.list;
if (count < 0)
return -EINVAL;
nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
request = compat_alloc_user_space(nbytes);
if (!access_ok(VERIFY_WRITE, request, nbytes))
return -EFAULT;
list = (struct drm_buf_pub *) (request + 1);
if (__put_user(count, &request->count)
|| __put_user(list, &request->list))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
if (err)
return err;
if (__get_user(actual, &request->count))
return -EFAULT;
if (count >= actual)
for (i = 0; i < actual; ++i)
if (__copy_in_user(&list32[i], &list[i],
offsetof(struct drm_buf_pub, address))
|| __get_user(addr, &list[i].address)
|| __put_user((unsigned long)addr,
&list32[i].address))
return -EFAULT;
if (__put_user(actual, &argp->count)
|| __get_user(addr, &request->virtual)
|| __put_user((unsigned long)addr, &argp->virtual))
return -EFAULT;
return 0;
}
typedef struct drm_buf_free32 {
int count;
u32 list;
} drm_buf_free32_t;
static int compat_drm_freebufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_free32_t req32;
struct drm_buf_free __user *request;
drm_buf_free32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
return -EFAULT;
if (__put_user(req32.count, &request->count)
|| __put_user((int __user *)(unsigned long)req32.list,
&request->list))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
}
typedef struct drm_ctx_priv_map32 {
unsigned int ctx_id; /**< Context requesting private mapping */
u32 handle; /**< Handle of map */
} drm_ctx_priv_map32_t;
static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_ctx_priv_map32_t req32;
struct drm_ctx_priv_map __user *request;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
return -EFAULT;
if (__put_user(req32.ctx_id, &request->ctx_id)
|| __put_user((void *)(unsigned long)req32.handle,
&request->handle))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
}
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct drm_ctx_priv_map __user *request;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
int err;
unsigned int ctx_id;
void *handle;
if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
|| __get_user(ctx_id, &argp->ctx_id))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
return -EFAULT;
if (__put_user(ctx_id, &request->ctx_id))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
if (err)
return err;
if (__get_user(handle, &request->handle)
|| __put_user((unsigned long)handle, &argp->handle))
return -EFAULT;
return 0;
}
typedef struct drm_ctx_res32 {
int count;
u32 contexts;
} drm_ctx_res32_t;
static int compat_drm_resctx(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_ctx_res32_t __user *argp = (void __user *)arg;
drm_ctx_res32_t res32;
struct drm_ctx_res __user *res;
int err;
if (copy_from_user(&res32, argp, sizeof(res32)))
return -EFAULT;
res = compat_alloc_user_space(sizeof(*res));
if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
return -EFAULT;
if (__put_user(res32.count, &res->count)
|| __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
&res->contexts))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
if (err)
return err;
if (__get_user(res32.count, &res->count)
|| __put_user(res32.count, &argp->count))
return -EFAULT;
return 0;
}
typedef struct drm_dma32 {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
u32 send_indices; /**< List of handles to buffers */
u32 send_sizes; /**< Lengths of data to send */
enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
u32 request_indices; /**< Buffer information */
u32 request_sizes;
int granted_count; /**< Number of buffers granted */
} drm_dma32_t;
static int compat_drm_dma(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_dma32_t d32;
drm_dma32_t __user *argp = (void __user *)arg;
struct drm_dma __user *d;
int err;
if (copy_from_user(&d32, argp, sizeof(d32)))
return -EFAULT;
d = compat_alloc_user_space(sizeof(*d));
if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
return -EFAULT;
if (__put_user(d32.context, &d->context)
|| __put_user(d32.send_count, &d->send_count)
|| __put_user((int __user *)(unsigned long)d32.send_indices,
&d->send_indices)
|| __put_user((int __user *)(unsigned long)d32.send_sizes,
&d->send_sizes)
|| __put_user(d32.flags, &d->flags)
|| __put_user(d32.request_count, &d->request_count)
|| __put_user((int __user *)(unsigned long)d32.request_indices,
&d->request_indices)
|| __put_user((int __user *)(unsigned long)d32.request_sizes,
&d->request_sizes))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
if (err)
return err;
if (__get_user(d32.request_size, &d->request_size)
|| __get_user(d32.granted_count, &d->granted_count)
|| __put_user(d32.request_size, &argp->request_size)
|| __put_user(d32.granted_count, &argp->granted_count))
return -EFAULT;
return 0;
}
#if __OS_HAS_AGP
typedef struct drm_agp_mode32 {
u32 mode; /**< AGP mode */
} drm_agp_mode32_t;
static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_mode32_t __user *argp = (void __user *)arg;
drm_agp_mode32_t m32;
struct drm_agp_mode __user *mode;
if (get_user(m32.mode, &argp->mode))
return -EFAULT;
mode = compat_alloc_user_space(sizeof(*mode));
if (put_user(m32.mode, &mode->mode))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
}
typedef struct drm_agp_info32 {
int agp_version_major;
int agp_version_minor;
u32 mode;
u32 aperture_base; /* physical address */
u32 aperture_size; /* bytes */
u32 memory_allowed; /* bytes */
u32 memory_used;
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
} drm_agp_info32_t;
static int compat_drm_agp_info(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_info32_t __user *argp = (void __user *)arg;
drm_agp_info32_t i32;
struct drm_agp_info __user *info;
int err;
info = compat_alloc_user_space(sizeof(*info));
if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
if (err)
return err;
if (__get_user(i32.agp_version_major, &info->agp_version_major)
|| __get_user(i32.agp_version_minor, &info->agp_version_minor)
|| __get_user(i32.mode, &info->mode)
|| __get_user(i32.aperture_base, &info->aperture_base)
|| __get_user(i32.aperture_size, &info->aperture_size)
|| __get_user(i32.memory_allowed, &info->memory_allowed)
|| __get_user(i32.memory_used, &info->memory_used)
|| __get_user(i32.id_vendor, &info->id_vendor)
|| __get_user(i32.id_device, &info->id_device))
return -EFAULT;
if (copy_to_user(argp, &i32, sizeof(i32)))
return -EFAULT;
return 0;
}
typedef struct drm_agp_buffer32 {
u32 size; /**< In bytes -- will round to page boundary */
u32 handle; /**< Used for binding / unbinding */
u32 type; /**< Type of memory to allocate */
u32 physical; /**< Physical used by i810 */
} drm_agp_buffer32_t;
static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
drm_agp_buffer32_t req32;
struct drm_agp_buffer __user *request;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.size, &request->size)
|| __put_user(req32.type, &request->type))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
if (err)
return err;
if (__get_user(req32.handle, &request->handle)
|| __get_user(req32.physical, &request->physical)
|| copy_to_user(argp, &req32, sizeof(req32))) {
drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
return -EFAULT;
}
return 0;
}
static int compat_drm_agp_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
struct drm_agp_buffer __user *request;
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| get_user(handle, &argp->handle)
|| __put_user(handle, &request->handle))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
}
typedef struct drm_agp_binding32 {
u32 handle; /**< From drm_agp_buffer */
u32 offset; /**< In bytes -- will round to page boundary */
} drm_agp_binding32_t;
static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
drm_agp_binding32_t req32;
struct drm_agp_binding __user *request;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.handle, &request->handle)
|| __put_user(req32.offset, &request->offset))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
}
static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
struct drm_agp_binding __user *request;
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| get_user(handle, &argp->handle)
|| __put_user(handle, &request->handle))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
}
#endif /* __OS_HAS_AGP */
typedef struct drm_scatter_gather32 {
u32 size; /**< In bytes -- will round to page boundary */
u32 handle; /**< Used for mapping / unmapping */
} drm_scatter_gather32_t;
static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
struct drm_scatter_gather __user *request;
int err;
unsigned long x;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
|| __get_user(x, &argp->size)
|| __put_user(x, &request->size))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
if (err)
return err;
/* XXX not sure about the handle conversion here... */
if (__get_user(x, &request->handle)
|| __put_user(x >> PAGE_SHIFT, &argp->handle))
return -EFAULT;
return 0;
}
static int compat_drm_sg_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
struct drm_scatter_gather __user *request;
unsigned long x;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
|| __get_user(x, &argp->handle)
|| __put_user(x << PAGE_SHIFT, &request->handle))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
}
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
unsigned int type;
unsigned int num;
/* 64-bit version has a 32-bit pad here */
u64 data; /**< Pointer */
} __attribute__((packed)) drm_update_draw32_t;
static int compat_drm_update_draw(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_update_draw32_t update32;
struct drm_update_draw __user *request;
int err;
if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
__put_user(update32.handle, &request->handle) ||
__put_user(update32.type, &request->type) ||
__put_user(update32.num, &request->num) ||
__put_user(update32.data, &request->data))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
return err;
}
#endif
struct drm_wait_vblank_request32 {
enum drm_vblank_seq_type type;
unsigned int sequence;
u32 signal;
};
struct drm_wait_vblank_reply32 {
enum drm_vblank_seq_type type;
unsigned int sequence;
s32 tval_sec;
s32 tval_usec;
};
typedef union drm_wait_vblank32 {
struct drm_wait_vblank_request32 request;
struct drm_wait_vblank_reply32 reply;
} drm_wait_vblank32_t;
static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_wait_vblank32_t __user *argp = (void __user *)arg;
drm_wait_vblank32_t req32;
union drm_wait_vblank __user *request;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
|| __put_user(req32.request.type, &request->request.type)
|| __put_user(req32.request.sequence, &request->request.sequence)
|| __put_user(req32.request.signal, &request->request.signal))
return -EFAULT;
err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
if (err)
return err;
if (__get_user(req32.reply.type, &request->reply.type)
|| __get_user(req32.reply.sequence, &request->reply.sequence)
|| __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
|| __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
return -EFAULT;
if (copy_to_user(argp, &req32, sizeof(req32)))
return -EFAULT;
return 0;
}
drm_ioctl_compat_t *drm_compat_ioctls[] = {
[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
[DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
#if __OS_HAS_AGP
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
#endif
[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
#endif
[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
};
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/drm.
*
* \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn;
int ret;
/* Assume that ioctls without an explicit compat routine will just
* work. This may not always be a good assumption, but it's better
* than always failing.
*/
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
return drm_ioctl(filp, cmd, arg);
fn = drm_compat_ioctls[nr];
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
ret = drm_ioctl(filp, cmd, arg);
return ret;
}
EXPORT_SYMBOL(drm_compat_ioctl);
| gpl-2.0 |
mwoodward/Amaze-ics-sense | arch/arm/mach-msm/peripheral-loader.c | 195 | 10141 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/elf.h>
#include <linux/mutex.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <mach/socinfo.h>
#include <asm/uaccess.h>
#include <asm/setup.h>
#include "peripheral-loader.h"
struct pil_device {
struct pil_desc *desc;
int count;
struct mutex lock;
struct list_head list;
};
static DEFINE_MUTEX(pil_list_lock);
static LIST_HEAD(pil_list);
static struct pil_device *__find_peripheral(const char *str)
{
struct pil_device *dev;
list_for_each_entry(dev, &pil_list, list)
if (!strcmp(dev->desc->name, str))
return dev;
return NULL;
}
static struct pil_device *find_peripheral(const char *str)
{
struct pil_device *dev;
if (!str)
return NULL;
mutex_lock(&pil_list_lock);
dev = __find_peripheral(str);
mutex_unlock(&pil_list_lock);
return dev;
}
#define IOMAP_SIZE SZ_4M
static int load_segment(const struct elf32_phdr *phdr, unsigned num,
struct pil_device *pil)
{
int ret, count, paddr;
char fw_name[30];
const struct firmware *fw = NULL;
const u8 *data;
if (memblock_is_region_memory(phdr->p_paddr, phdr->p_memsz)) {
dev_err(pil->desc->dev, "Kernel memory would be overwritten");
return -EPERM;
}
if (phdr->p_filesz) {
snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
pil->desc->name, num);
ret = request_firmware(&fw, fw_name, pil->desc->dev);
if (ret) {
dev_err(pil->desc->dev, "Failed to locate blob %s\n",
fw_name);
return ret;
}
if (fw->size != phdr->p_filesz) {
dev_err(pil->desc->dev,
"Blob size %u doesn't match %u\n", fw->size,
phdr->p_filesz);
ret = -EPERM;
goto release_fw;
}
}
/* Load the segment into memory */
count = phdr->p_filesz;
paddr = phdr->p_paddr;
data = fw ? fw->data : NULL;
while (count > 0) {
int size;
u8 __iomem *buf;
size = min_t(size_t, IOMAP_SIZE, count);
buf = ioremap(paddr, size);
if (!buf) {
dev_err(pil->desc->dev, "Failed to map memory\n");
ret = -ENOMEM;
goto release_fw;
}
memcpy(buf, data, size);
iounmap(buf);
count -= size;
paddr += size;
data += size;
}
/* Zero out trailing memory */
count = phdr->p_memsz - phdr->p_filesz;
while (count > 0) {
int size;
u8 __iomem *buf;
size = min_t(size_t, IOMAP_SIZE, count);
buf = ioremap(paddr, size);
if (!buf) {
dev_err(pil->desc->dev, "Failed to map memory\n");
ret = -ENOMEM;
goto release_fw;
}
memset(buf, 0, size);
iounmap(buf);
count -= size;
paddr += size;
}
ret = pil->desc->ops->verify_blob(pil->desc, phdr->p_paddr,
phdr->p_memsz);
if (ret)
dev_err(pil->desc->dev, "Blob %u failed verification\n", num);
release_fw:
release_firmware(fw);
return ret;
}
#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
static int segment_is_loadable(const struct elf32_phdr *p)
{
return (p->p_type & PT_LOAD) && !segment_is_hash(p->p_flags);
}
static int load_image(struct pil_device *pil)
{
int i, ret;
char fw_name[30];
struct elf32_hdr *ehdr;
const struct elf32_phdr *phdr;
const struct firmware *fw;
snprintf(fw_name, sizeof(fw_name), "%s.mdt", pil->desc->name);
ret = request_firmware(&fw, fw_name, pil->desc->dev);
if (ret) {
dev_err(pil->desc->dev, "Failed to locate %s\n", fw_name);
goto out;
}
if (fw->size < sizeof(*ehdr)) {
dev_err(pil->desc->dev, "Not big enough to be an elf header\n");
ret = -EIO;
goto release_fw;
}
ehdr = (struct elf32_hdr *)fw->data;
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
dev_err(pil->desc->dev, "Not an elf header\n");
ret = -EIO;
goto release_fw;
}
if (ehdr->e_phnum == 0) {
dev_err(pil->desc->dev, "No loadable segments\n");
ret = -EIO;
goto release_fw;
}
if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
sizeof(struct elf32_hdr) > fw->size) {
dev_err(pil->desc->dev, "Program headers not within mdt\n");
ret = -EIO;
goto release_fw;
}
ret = pil->desc->ops->init_image(pil->desc, fw->data, fw->size);
if (ret) {
dev_err(pil->desc->dev, "Invalid firmware metadata\n");
goto release_fw;
}
phdr = (const struct elf32_phdr *)(fw->data + sizeof(struct elf32_hdr));
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
if (!segment_is_loadable(phdr))
continue;
ret = load_segment(phdr, i, pil);
if (ret) {
dev_err(pil->desc->dev, "Failed to load segment %d\n",
i);
goto release_fw;
}
}
ret = pil->desc->ops->auth_and_reset(pil->desc);
if (ret) {
dev_err(pil->desc->dev, "Failed to bring out of reset\n");
goto release_fw;
}
dev_info(pil->desc->dev, "brought out of reset\n");
release_fw:
release_firmware(fw);
out:
return ret;
}
/**
* pil_get() - Load a peripheral into memory and take it out of reset
* @name: pointer to a string containing the name of the peripheral to load
*
* This function returns a pointer if it succeeds. If an error occurs an
* ERR_PTR is returned.
*
* If PIL is not enabled in the kernel, the value %NULL will be returned.
*/
void *pil_get(const char *name)
{
int ret;
struct pil_device *pil;
struct pil_device *pil_d;
void *retval;
/* PIL is not yet supported on 8064. */
if (cpu_is_apq8064())
return NULL;
pil = retval = find_peripheral(name);
if (!pil)
return ERR_PTR(-ENODEV);
pil_d = find_peripheral(pil->desc->depends_on);
if (pil_d) {
void *p = pil_get(pil_d->desc->name);
if (IS_ERR(p))
return p;
}
mutex_lock(&pil->lock);
if (pil->count) {
pil->count++;
goto unlock;
}
ret = load_image(pil);
if (ret) {
retval = ERR_PTR(ret);
goto unlock;
}
pil->count++;
unlock:
mutex_unlock(&pil->lock);
return retval;
}
EXPORT_SYMBOL(pil_get);
/**
* pil_put() - Inform PIL the peripheral no longer needs to be active
* @peripheral_handle: pointer from a previous call to pil_get()
*
* This doesn't imply that a peripheral is shutdown or in reset since another
* driver could be using the peripheral.
*/
void pil_put(void *peripheral_handle)
{
struct pil_device *pil_d;
struct pil_device *pil = peripheral_handle;
if (!pil || IS_ERR(pil))
return;
mutex_lock(&pil->lock);
WARN(!pil->count, "%s: Reference count mismatch\n", __func__);
/* TODO: Peripheral shutdown support */
if (pil->count == 1)
goto unlock;
if (pil->count)
pil->count--;
if (pil->count == 0)
pil->desc->ops->shutdown(pil->desc);
unlock:
mutex_unlock(&pil->lock);
pil_d = find_peripheral(pil->desc->depends_on);
if (pil_d)
pil_put(pil_d);
}
EXPORT_SYMBOL(pil_put);
void pil_force_shutdown(const char *name)
{
struct pil_device *pil;
pil = find_peripheral(name);
if (!pil)
return;
mutex_lock(&pil->lock);
if (!WARN(!pil->count, "%s: Reference count mismatch\n", __func__))
pil->desc->ops->shutdown(pil->desc);
mutex_unlock(&pil->lock);
}
EXPORT_SYMBOL(pil_force_shutdown);
int pil_force_boot(const char *name)
{
int ret = -EINVAL;
struct pil_device *pil;
pil = find_peripheral(name);
if (!pil)
return -EINVAL;
mutex_lock(&pil->lock);
if (!WARN(!pil->count, "%s: Reference count mismatch\n", __func__))
ret = load_image(pil);
mutex_unlock(&pil->lock);
return ret;
}
EXPORT_SYMBOL(pil_force_boot);
#ifdef CONFIG_DEBUG_FS
int msm_pil_debugfs_open(struct inode *inode, struct file *filp)
{
filp->private_data = inode->i_private;
return 0;
}
static ssize_t msm_pil_debugfs_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
int r;
char buf[40];
struct pil_device *pil = filp->private_data;
mutex_lock(&pil->lock);
r = snprintf(buf, sizeof(buf), "%d\n", pil->count);
mutex_unlock(&pil->lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t msm_pil_debugfs_write(struct file *filp,
const char __user *ubuf, size_t cnt, loff_t *ppos)
{
struct pil_device *pil = filp->private_data;
char buf[4];
if (cnt > sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
if (!strncmp(buf, "get", 3)) {
if (IS_ERR(pil_get(pil->desc->name)))
return -EIO;
} else if (!strncmp(buf, "put", 3))
pil_put(pil);
else
return -EINVAL;
return cnt;
}
static const struct file_operations msm_pil_debugfs_fops = {
.open = msm_pil_debugfs_open,
.read = msm_pil_debugfs_read,
.write = msm_pil_debugfs_write,
};
static struct dentry *pil_base_dir;
static int msm_pil_debugfs_init(void)
{
pil_base_dir = debugfs_create_dir("pil", NULL);
if (!pil_base_dir) {
pil_base_dir = NULL;
return -ENOMEM;
}
return 0;
}
arch_initcall(msm_pil_debugfs_init);
static int msm_pil_debugfs_add(struct pil_device *pil)
{
if (!pil_base_dir)
return -ENOMEM;
if (!debugfs_create_file(pil->desc->name, S_IRUGO | S_IWUSR,
pil_base_dir, pil, &msm_pil_debugfs_fops))
return -ENOMEM;
return 0;
}
#else
static int msm_pil_debugfs_add(struct pil_device *pil) { return 0; }
#endif
static int msm_pil_shutdown_at_boot(void)
{
struct pil_device *pil;
mutex_lock(&pil_list_lock);
list_for_each_entry(pil, &pil_list, list)
pil->desc->ops->shutdown(pil->desc);
mutex_unlock(&pil_list_lock);
return 0;
}
late_initcall(msm_pil_shutdown_at_boot);
int msm_pil_register(struct pil_desc *desc)
{
struct pil_device *pil = kzalloc(sizeof(*pil), GFP_KERNEL);
if (!pil)
return -ENOMEM;
mutex_init(&pil->lock);
INIT_LIST_HEAD(&pil->list);
pil->desc = desc;
mutex_lock(&pil_list_lock);
list_add(&pil->list, &pil_list);
mutex_unlock(&pil_list_lock);
return msm_pil_debugfs_add(pil);
}
EXPORT_SYMBOL(msm_pil_register);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");
| gpl-2.0 |
kref/linux-oxnas | drivers/scsi/ibmvscsi/ibmvfc.c | 451 | 141190 | /*
* ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
*
* Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
*
* Copyright (C) IBM Corporation, 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/stringify.h>
#include <asm/firmware.h>
#include <asm/irq.h>
#include <asm/vio.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include "ibmvfc.h"
static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
static unsigned int max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
static LIST_HEAD(ibmvfc_head);
static DEFINE_SPINLOCK(ibmvfc_driver_lock);
static struct scsi_transport_template *ibmvfc_transport_template;
MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBMVFC_DRIVER_VERSION);
module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
"[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(default_timeout,
"Default timeout in seconds for initialization and EH commands. "
"[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
module_param_named(max_requests, max_requests, uint, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
"[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
module_param_named(max_lun, max_lun, uint, S_IRUGO);
MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
"[Default=" __stringify(IBMVFC_MAX_LUN) "]");
module_param_named(max_targets, max_targets, uint, S_IRUGO);
MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
"[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
"[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable driver debug information. "
"[Default=" __stringify(IBMVFC_DEBUG) "]");
module_param_named(log_level, log_level, uint, 0);
MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
"[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
static const struct {
u16 status;
u16 error;
u8 result;
u8 retry;
int log;
char *name;
} cmd_status [] = {
{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
};
static void ibmvfc_npiv_login(struct ibmvfc_host *);
static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static const char *unknown_error = "unknown error";
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
* ibmvfc_trc_start - Log a start trace entry
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_trc_start(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
struct ibmvfc_trace_entry *entry;
entry = &vhost->trace[vhost->trace_index++];
entry->evt = evt;
entry->time = jiffies;
entry->fmt = evt->crq.format;
entry->type = IBMVFC_TRC_START;
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
entry->op_code = vfc_cmd->iu.cdb[0];
entry->scsi_id = vfc_cmd->tgt_scsi_id;
entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
entry->tmf_flags = vfc_cmd->iu.tmf_flags;
entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
break;
case IBMVFC_MAD_FORMAT:
entry->op_code = mad->opcode;
break;
default:
break;
};
}
/**
* ibmvfc_trc_end - Log an end trace entry
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_trc_end(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
entry->evt = evt;
entry->time = jiffies;
entry->fmt = evt->crq.format;
entry->type = IBMVFC_TRC_END;
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
entry->op_code = vfc_cmd->iu.cdb[0];
entry->scsi_id = vfc_cmd->tgt_scsi_id;
entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
entry->tmf_flags = vfc_cmd->iu.tmf_flags;
entry->u.end.status = vfc_cmd->status;
entry->u.end.error = vfc_cmd->error;
entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
break;
case IBMVFC_MAD_FORMAT:
entry->op_code = mad->opcode;
entry->u.end.status = mad->status;
break;
default:
break;
};
}
#else
#define ibmvfc_trc_start(evt) do { } while (0)
#define ibmvfc_trc_end(evt) do { } while (0)
#endif
/**
* ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
* @status: status / error class
* @error: error
*
* Return value:
* index into cmd_status / -EINVAL on failure
**/
static int ibmvfc_get_err_index(u16 status, u16 error)
{
int i;
for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
if ((cmd_status[i].status & status) == cmd_status[i].status &&
cmd_status[i].error == error)
return i;
return -EINVAL;
}
/**
* ibmvfc_get_cmd_error - Find the error description for the fcp response
* @status: status / error class
* @error: error
*
* Return value:
* error description string
**/
static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
{
int rc = ibmvfc_get_err_index(status, error);
if (rc >= 0)
return cmd_status[rc].name;
return unknown_error;
}
/**
* ibmvfc_get_err_result - Find the scsi status to return for the fcp response
* @vfc_cmd: ibmvfc command struct
*
* Return value:
* SCSI result value to return for completed command
**/
static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
{
int err;
struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
int fc_rsp_len = rsp->fcp_rsp_len;
if ((rsp->flags & FCP_RSP_LEN_VALID) &&
((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
rsp->data.info.rsp_code))
return DID_ERROR << 16;
err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
if (err >= 0)
return rsp->scsi_status | (cmd_status[err].result << 16);
return rsp->scsi_status | (DID_ERROR << 16);
}
/**
* ibmvfc_retry_cmd - Determine if error status is retryable
* @status: status / error class
* @error: error
*
* Return value:
* 1 if error should be retried / 0 if it should not
**/
static int ibmvfc_retry_cmd(u16 status, u16 error)
{
int rc = ibmvfc_get_err_index(status, error);
if (rc >= 0)
return cmd_status[rc].retry;
return 1;
}
static const char *unknown_fc_explain = "unknown fc explain";
static const struct {
u16 fc_explain;
char *name;
} ls_explain [] = {
{ 0x00, "no additional explanation" },
{ 0x01, "service parameter error - options" },
{ 0x03, "service parameter error - initiator control" },
{ 0x05, "service parameter error - recipient control" },
{ 0x07, "service parameter error - received data field size" },
{ 0x09, "service parameter error - concurrent seq" },
{ 0x0B, "service parameter error - credit" },
{ 0x0D, "invalid N_Port/F_Port_Name" },
{ 0x0E, "invalid node/Fabric Name" },
{ 0x0F, "invalid common service parameters" },
{ 0x11, "invalid association header" },
{ 0x13, "association header required" },
{ 0x15, "invalid originator S_ID" },
{ 0x17, "invalid OX_ID-RX-ID combination" },
{ 0x19, "command (request) already in progress" },
{ 0x1E, "N_Port Login requested" },
{ 0x1F, "Invalid N_Port_ID" },
};
static const struct {
u16 fc_explain;
char *name;
} gs_explain [] = {
{ 0x00, "no additional explanation" },
{ 0x01, "port identifier not registered" },
{ 0x02, "port name not registered" },
{ 0x03, "node name not registered" },
{ 0x04, "class of service not registered" },
{ 0x06, "initial process associator not registered" },
{ 0x07, "FC-4 TYPEs not registered" },
{ 0x08, "symbolic port name not registered" },
{ 0x09, "symbolic node name not registered" },
{ 0x0A, "port type not registered" },
{ 0xF0, "authorization exception" },
{ 0xF1, "authentication exception" },
{ 0xF2, "data base full" },
{ 0xF3, "data base empty" },
{ 0xF4, "processing request" },
{ 0xF5, "unable to verify connection" },
{ 0xF6, "devices not in a common zone" },
};
/**
* ibmvfc_get_ls_explain - Return the FC Explain description text
* @status: FC Explain status
*
* Returns:
* error string
**/
static const char *ibmvfc_get_ls_explain(u16 status)
{
int i;
for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
if (ls_explain[i].fc_explain == status)
return ls_explain[i].name;
return unknown_fc_explain;
}
/**
* ibmvfc_get_gs_explain - Return the FC Explain description text
* @status: FC Explain status
*
* Returns:
* error string
**/
static const char *ibmvfc_get_gs_explain(u16 status)
{
int i;
for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
if (gs_explain[i].fc_explain == status)
return gs_explain[i].name;
return unknown_fc_explain;
}
static const struct {
enum ibmvfc_fc_type fc_type;
char *name;
} fc_type [] = {
{ IBMVFC_FABRIC_REJECT, "fabric reject" },
{ IBMVFC_PORT_REJECT, "port reject" },
{ IBMVFC_LS_REJECT, "ELS reject" },
{ IBMVFC_FABRIC_BUSY, "fabric busy" },
{ IBMVFC_PORT_BUSY, "port busy" },
{ IBMVFC_BASIC_REJECT, "basic reject" },
};
static const char *unknown_fc_type = "unknown fc type";
/**
* ibmvfc_get_fc_type - Return the FC Type description text
* @status: FC Type error status
*
* Returns:
* error string
**/
static const char *ibmvfc_get_fc_type(u16 status)
{
int i;
for (i = 0; i < ARRAY_SIZE(fc_type); i++)
if (fc_type[i].fc_type == status)
return fc_type[i].name;
return unknown_fc_type;
}
/**
* ibmvfc_set_tgt_action - Set the next init action for the target
* @tgt: ibmvfc target struct
* @action: action to perform
*
**/
static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
enum ibmvfc_target_action action)
{
switch (tgt->action) {
case IBMVFC_TGT_ACTION_DEL_RPORT:
if (action == IBMVFC_TGT_ACTION_DELETED_RPORT)
tgt->action = action;
case IBMVFC_TGT_ACTION_DELETED_RPORT:
break;
default:
if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
tgt->add_rport = 0;
tgt->action = action;
break;
}
}
/**
* ibmvfc_set_host_state - Set the state for the host
* @vhost: ibmvfc host struct
* @state: state to set host to
*
* Returns:
* 0 if state changed / non-zero if not changed
**/
static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
enum ibmvfc_host_state state)
{
int rc = 0;
switch (vhost->state) {
case IBMVFC_HOST_OFFLINE:
rc = -EINVAL;
break;
default:
vhost->state = state;
break;
};
return rc;
}
/**
* ibmvfc_set_host_action - Set the next init action for the host
* @vhost: ibmvfc host struct
* @action: action to perform
*
**/
static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
enum ibmvfc_host_action action)
{
switch (action) {
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
vhost->action = action;
break;
case IBMVFC_HOST_ACTION_LOGO_WAIT:
if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
vhost->action = action;
break;
case IBMVFC_HOST_ACTION_INIT_WAIT:
if (vhost->action == IBMVFC_HOST_ACTION_INIT)
vhost->action = action;
break;
case IBMVFC_HOST_ACTION_QUERY:
switch (vhost->action) {
case IBMVFC_HOST_ACTION_INIT_WAIT:
case IBMVFC_HOST_ACTION_NONE:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
vhost->action = action;
break;
default:
break;
};
break;
case IBMVFC_HOST_ACTION_TGT_INIT:
if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
vhost->action = action;
break;
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_TGT_DEL:
switch (vhost->action) {
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
break;
default:
vhost->action = action;
break;
};
break;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_NONE:
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
default:
vhost->action = action;
break;
};
}
/**
* ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
* @vhost: ibmvfc host struct
*
* Return value:
* nothing
**/
static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
{
if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
}
} else
vhost->reinit = 1;
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_link_down - Handle a link down event from the adapter
* @vhost: ibmvfc host struct
* @state: ibmvfc host state to enter
*
**/
static void ibmvfc_link_down(struct ibmvfc_host *vhost,
enum ibmvfc_host_state state)
{
struct ibmvfc_target *tgt;
ENTER;
scsi_block_requests(vhost->host);
list_for_each_entry(tgt, &vhost->targets, queue)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
ibmvfc_set_host_state(vhost, state);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
wake_up(&vhost->work_wait_q);
LEAVE;
}
/**
* ibmvfc_init_host - Start host initialization
* @vhost: ibmvfc host struct
*
* Return value:
* nothing
**/
static void ibmvfc_init_host(struct ibmvfc_host *vhost)
{
struct ibmvfc_target *tgt;
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
dev_err(vhost->dev,
"Host initialization retries exceeded. Taking adapter offline\n");
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
return;
}
}
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
vhost->async_crq.cur = 0;
list_for_each_entry(tgt, &vhost->targets, queue)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_npiv_login;
wake_up(&vhost->work_wait_q);
}
}
/**
* ibmvfc_send_crq - Send a CRQ
* @vhost: ibmvfc host struct
* @word1: the first 64 bits of the data
* @word2: the second 64 bits of the data
*
* Return value:
* 0 on success / other on failure
**/
static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
{
struct vio_dev *vdev = to_vio_dev(vhost->dev);
return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
/**
* ibmvfc_send_crq_init - Send a CRQ init message
* @vhost: ibmvfc host struct
*
* Return value:
* 0 on success / other on failure
**/
static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
{
ibmvfc_dbg(vhost, "Sending CRQ init\n");
return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
}
/**
* ibmvfc_send_crq_init_complete - Send a CRQ init complete message
* @vhost: ibmvfc host struct
*
* Return value:
* 0 on success / other on failure
**/
static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
{
ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
}
/**
* ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
* @vhost: ibmvfc host struct
*
* Frees irq, deallocates a page for messages, unmaps dma, and unregisters
* the crq with the hypervisor.
**/
static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
{
long rc = 0;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_crq_queue *crq = &vhost->crq;
ibmvfc_dbg(vhost, "Releasing CRQ\n");
free_irq(vdev->irq, vhost);
tasklet_kill(&vhost->tasklet);
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
free_page((unsigned long)crq->msgs);
}
/**
* ibmvfc_reenable_crq_queue - reenables the CRQ
* @vhost: ibmvfc host struct
*
* Return value:
* 0 on success / other on failure
**/
static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
{
int rc = 0;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
/* Re-enable the CRQ */
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
if (rc)
dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
return rc;
}
/**
* ibmvfc_reset_crq - resets a crq after a failure
* @vhost: ibmvfc host struct
*
* Return value:
* 0 on success / other on failure
**/
static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
{
int rc = 0;
unsigned long flags;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_crq_queue *crq = &vhost->crq;
/* Close the CRQ */
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
spin_lock_irqsave(vhost->host->host_lock, flags);
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
/* Clean out the queue */
memset(crq->msgs, 0, PAGE_SIZE);
crq->cur = 0;
/* And re-open it again */
rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
crq->msg_token, PAGE_SIZE);
if (rc == H_CLOSED)
/* Adapter is good, but other end is not ready */
dev_warn(vhost->dev, "Partner adapter not ready\n");
else if (rc != 0)
dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return rc;
}
/**
* ibmvfc_valid_event - Determines if event is valid.
* @pool: event_pool that contains the event
* @evt: ibmvfc event to be checked for validity
*
* Return value:
* 1 if event is valid / 0 if event is not valid
**/
static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
struct ibmvfc_event *evt)
{
int index = evt - pool->events;
if (index < 0 || index >= pool->size) /* outside of bounds */
return 0;
if (evt != pool->events + index) /* unaligned */
return 0;
return 1;
}
/**
* ibmvfc_free_event - Free the specified event
* @evt: ibmvfc_event to be freed
*
**/
static void ibmvfc_free_event(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_event_pool *pool = &vhost->pool;
BUG_ON(!ibmvfc_valid_event(pool, evt));
BUG_ON(atomic_inc_return(&evt->free) != 1);
list_add_tail(&evt->queue, &vhost->free);
}
/**
* ibmvfc_scsi_eh_done - EH done function for queuecommand commands
* @evt: ibmvfc event struct
*
* This function does not setup any error status, that must be done
* before this function gets called.
**/
static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
{
struct scsi_cmnd *cmnd = evt->cmnd;
if (cmnd) {
scsi_dma_unmap(cmnd);
cmnd->scsi_done(cmnd);
}
if (evt->eh_comp)
complete(evt->eh_comp);
ibmvfc_free_event(evt);
}
/**
* ibmvfc_fail_request - Fail request with specified error code
* @evt: ibmvfc event struct
* @error_code: error code to fail request with
*
* Return value:
* none
**/
static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
{
if (evt->cmnd) {
evt->cmnd->result = (error_code << 16);
evt->done = ibmvfc_scsi_eh_done;
} else
evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
list_del(&evt->queue);
del_timer(&evt->timer);
ibmvfc_trc_end(evt);
evt->done(evt);
}
/**
* ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
* @vhost: ibmvfc host struct
* @error_code: error code to fail requests with
*
* Return value:
* none
**/
static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
{
struct ibmvfc_event *evt, *pos;
ibmvfc_dbg(vhost, "Purging all requests\n");
list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
ibmvfc_fail_request(evt, error_code);
}
/**
* ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
* @vhost: struct ibmvfc host to reset
**/
static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
{
ibmvfc_purge_requests(vhost, DID_ERROR);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
}
/**
* __ibmvfc_reset_host - Reset the connection to the server (no locking)
* @vhost: struct ibmvfc host to reset
**/
static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
{
if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
vhost->job_step = ibmvfc_npiv_logout;
wake_up(&vhost->work_wait_q);
} else
ibmvfc_hard_reset_host(vhost);
}
/**
* ibmvfc_reset_host - Reset the connection to the server
* @vhost: ibmvfc host struct
**/
static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
{
unsigned long flags;
spin_lock_irqsave(vhost->host->host_lock, flags);
__ibmvfc_reset_host(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
/**
* ibmvfc_retry_host_init - Retry host initialization if allowed
* @vhost: ibmvfc host struct
*
* Returns: 1 if init will be retried / 0 if not
*
**/
static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
{
int retry = 0;
if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
vhost->delay_init = 1;
if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
dev_err(vhost->dev,
"Host initialization retries exceeded. Taking adapter offline\n");
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
__ibmvfc_reset_host(vhost);
else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
retry = 1;
}
}
wake_up(&vhost->work_wait_q);
return retry;
}
/**
* __ibmvfc_get_target - Find the specified scsi_target (no locking)
* @starget: scsi target struct
*
* Return value:
* ibmvfc_target struct / NULL if not found
**/
static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_host *vhost = shost_priv(shost);
struct ibmvfc_target *tgt;
list_for_each_entry(tgt, &vhost->targets, queue)
if (tgt->target_id == starget->id) {
kref_get(&tgt->kref);
return tgt;
}
return NULL;
}
/**
* ibmvfc_get_target - Find the specified scsi_target
* @starget: scsi target struct
*
* Return value:
* ibmvfc_target struct / NULL if not found
**/
static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_target *tgt;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
tgt = __ibmvfc_get_target(starget);
spin_unlock_irqrestore(shost->host_lock, flags);
return tgt;
}
/**
* ibmvfc_get_host_speed - Get host port speed
* @shost: scsi host struct
*
* Return value:
* none
**/
static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
{
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
if (vhost->state == IBMVFC_ACTIVE) {
switch (vhost->login_buf->resp.link_speed / 100) {
case 1:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
break;
case 2:
fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
break;
case 4:
fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
break;
case 8:
fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
break;
case 10:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
case 16:
fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
break;
default:
ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
vhost->login_buf->resp.link_speed / 100);
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
} else
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
* ibmvfc_get_host_port_state - Get host port state
* @shost: scsi host struct
*
* Return value:
* none
**/
static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
{
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
switch (vhost->state) {
case IBMVFC_INITIALIZING:
case IBMVFC_ACTIVE:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
break;
case IBMVFC_LINK_DOWN:
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
break;
case IBMVFC_LINK_DEAD:
case IBMVFC_HOST_OFFLINE:
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
break;
case IBMVFC_HALTED:
fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
break;
case IBMVFC_NO_CRQ:
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
break;
default:
ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
break;
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
* ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
* @rport: rport struct
* @timeout: timeout value
*
* Return value:
* none
**/
static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
{
if (timeout)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = 1;
}
/**
* ibmvfc_release_tgt - Free memory allocated for a target
* @kref: kref struct
*
**/
static void ibmvfc_release_tgt(struct kref *kref)
{
struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
kfree(tgt);
}
/**
* ibmvfc_get_starget_node_name - Get SCSI target's node name
* @starget: scsi target struct
*
* Return value:
* none
**/
static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
{
struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
if (tgt)
kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
* ibmvfc_get_starget_port_name - Get SCSI target's port name
* @starget: scsi target struct
*
* Return value:
* none
**/
static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
{
struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
if (tgt)
kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
* ibmvfc_get_starget_port_id - Get SCSI target's port ID
* @starget: scsi target struct
*
* Return value:
* none
**/
static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
{
struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
if (tgt)
kref_put(&tgt->kref, ibmvfc_release_tgt);
}
/**
* ibmvfc_wait_while_resetting - Wait while the host resets
* @vhost: ibmvfc host struct
*
* Return value:
* 0 on success / other on failure
**/
static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
{
long timeout = wait_event_timeout(vhost->init_wait_q,
((vhost->state == IBMVFC_ACTIVE ||
vhost->state == IBMVFC_HOST_OFFLINE ||
vhost->state == IBMVFC_LINK_DEAD) &&
vhost->action == IBMVFC_HOST_ACTION_NONE),
(init_timeout * HZ));
return timeout ? 0 : -EIO;
}
/**
* ibmvfc_issue_fc_host_lip - Re-initiate link initialization
* @shost: scsi host struct
*
* Return value:
* 0 on success / other on failure
**/
static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
{
struct ibmvfc_host *vhost = shost_priv(shost);
dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
ibmvfc_reset_host(vhost);
return ibmvfc_wait_while_resetting(vhost);
}
/**
* ibmvfc_gather_partition_info - Gather info about the LPAR
*
* Return value:
* none
**/
static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
{
struct device_node *rootdn;
const char *name;
const unsigned int *num;
rootdn = of_find_node_by_path("/");
if (!rootdn)
return;
name = of_get_property(rootdn, "ibm,partition-name", NULL);
if (name)
strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
num = of_get_property(rootdn, "ibm,partition-no", NULL);
if (num)
vhost->partition_number = *num;
of_node_put(rootdn);
}
/**
* ibmvfc_set_login_info - Setup info for NPIV login
* @vhost: ibmvfc host struct
*
* Return value:
* none
**/
static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login *login_info = &vhost->login_info;
struct device_node *of_node = vhost->dev->of_node;
const char *location;
memset(login_info, 0, sizeof(*login_info));
login_info->ostype = IBMVFC_OS_LINUX;
login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
login_info->partition_num = vhost->partition_number;
login_info->vfc_frame_version = 1;
login_info->fcp_version = 3;
login_info->flags = IBMVFC_FLUSH_ON_HALT;
if (vhost->client_migrated)
login_info->flags |= IBMVFC_CLIENT_MIGRATED;
login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
login_info->capabilities = IBMVFC_CAN_MIGRATE;
login_info->async.va = vhost->async_crq.msg_token;
login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
strncpy(login_info->device_name,
dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
location = of_get_property(of_node, "ibm,loc-code", NULL);
location = location ? location : dev_name(vhost->dev);
strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
}
/**
* ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
* @vhost: ibmvfc host who owns the event pool
*
* Returns zero on success.
**/
static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
{
int i;
struct ibmvfc_event_pool *pool = &vhost->pool;
ENTER;
pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
if (!pool->events)
return -ENOMEM;
pool->iu_storage = dma_alloc_coherent(vhost->dev,
pool->size * sizeof(*pool->iu_storage),
&pool->iu_token, 0);
if (!pool->iu_storage) {
kfree(pool->events);
return -ENOMEM;
}
for (i = 0; i < pool->size; ++i) {
struct ibmvfc_event *evt = &pool->events[i];
atomic_set(&evt->free, 1);
evt->crq.valid = 0x80;
evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
evt->xfer_iu = pool->iu_storage + i;
evt->vhost = vhost;
evt->ext_list = NULL;
list_add_tail(&evt->queue, &vhost->free);
}
LEAVE;
return 0;
}
/**
* ibmvfc_free_event_pool - Frees memory of the event pool of a host
* @vhost: ibmvfc host who owns the event pool
*
**/
static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
{
int i;
struct ibmvfc_event_pool *pool = &vhost->pool;
ENTER;
for (i = 0; i < pool->size; ++i) {
list_del(&pool->events[i].queue);
BUG_ON(atomic_read(&pool->events[i].free) != 1);
if (pool->events[i].ext_list)
dma_pool_free(vhost->sg_pool,
pool->events[i].ext_list,
pool->events[i].ext_list_token);
}
kfree(pool->events);
dma_free_coherent(vhost->dev,
pool->size * sizeof(*pool->iu_storage),
pool->iu_storage, pool->iu_token);
LEAVE;
}
/**
* ibmvfc_get_event - Gets the next free event in pool
* @vhost: ibmvfc host struct
*
* Returns a free event from the pool.
**/
static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
{
struct ibmvfc_event *evt;
BUG_ON(list_empty(&vhost->free));
evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
atomic_set(&evt->free, 0);
list_del(&evt->queue);
return evt;
}
/**
* ibmvfc_init_event - Initialize fields in an event struct that are always
* required.
* @evt: The event
* @done: Routine to call when the event is responded to
* @format: SRP or MAD format
**/
static void ibmvfc_init_event(struct ibmvfc_event *evt,
void (*done) (struct ibmvfc_event *), u8 format)
{
evt->cmnd = NULL;
evt->sync_iu = NULL;
evt->crq.format = format;
evt->done = done;
evt->eh_comp = NULL;
}
/**
* ibmvfc_map_sg_list - Initialize scatterlist
* @scmd: scsi command struct
* @nseg: number of scatterlist segments
* @md: memory descriptor list to initialize
**/
static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
struct srp_direct_buf *md)
{
int i;
struct scatterlist *sg;
scsi_for_each_sg(scmd, sg, nseg, i) {
md[i].va = sg_dma_address(sg);
md[i].len = sg_dma_len(sg);
md[i].key = 0;
}
}
/**
* ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
* @scmd: Scsi_Cmnd with the scatterlist
* @evt: ibmvfc event struct
* @vfc_cmd: vfc_cmd that contains the memory descriptor
* @dev: device for which to map dma memory
*
* Returns:
* 0 on success / non-zero on failure
**/
static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
struct ibmvfc_event *evt,
struct ibmvfc_cmd *vfc_cmd, struct device *dev)
{
int sg_mapped;
struct srp_direct_buf *data = &vfc_cmd->ioba;
struct ibmvfc_host *vhost = dev_get_drvdata(dev);
sg_mapped = scsi_dma_map(scmd);
if (!sg_mapped) {
vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
return 0;
} else if (unlikely(sg_mapped < 0)) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
return sg_mapped;
}
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
vfc_cmd->flags |= IBMVFC_WRITE;
vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
} else {
vfc_cmd->flags |= IBMVFC_READ;
vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
}
if (sg_mapped == 1) {
ibmvfc_map_sg_list(scmd, sg_mapped, data);
return 0;
}
vfc_cmd->flags |= IBMVFC_SCATTERLIST;
if (!evt->ext_list) {
evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
&evt->ext_list_token);
if (!evt->ext_list) {
scsi_dma_unmap(scmd);
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
return -ENOMEM;
}
}
ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
data->va = evt->ext_list_token;
data->len = sg_mapped * sizeof(struct srp_direct_buf);
data->key = 0;
return 0;
}
/**
* ibmvfc_timeout - Internal command timeout handler
* @evt: struct ibmvfc_event that timed out
*
* Called when an internally generated command times out
**/
static void ibmvfc_timeout(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
ibmvfc_reset_host(vhost);
}
/**
* ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
* @evt: event to be sent
* @vhost: ibmvfc host struct
* @timeout: timeout in seconds - 0 means do not time command
*
* Returns the value returned from ibmvfc_send_crq(). (Zero for success)
**/
static int ibmvfc_send_event(struct ibmvfc_event *evt,
struct ibmvfc_host *vhost, unsigned long timeout)
{
u64 *crq_as_u64 = (u64 *) &evt->crq;
int rc;
/* Copy the IU into the transfer area */
*evt->xfer_iu = evt->iu;
if (evt->crq.format == IBMVFC_CMD_FORMAT)
evt->xfer_iu->cmd.tag = (u64)evt;
else if (evt->crq.format == IBMVFC_MAD_FORMAT)
evt->xfer_iu->mad_common.tag = (u64)evt;
else
BUG();
list_add_tail(&evt->queue, &vhost->sent);
init_timer(&evt->timer);
if (timeout) {
evt->timer.data = (unsigned long) evt;
evt->timer.expires = jiffies + (timeout * HZ);
evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout;
add_timer(&evt->timer);
}
mb();
if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
list_del(&evt->queue);
del_timer(&evt->timer);
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
* Firmware will send a CRQ with a transport event (0xFF) to
* tell this client what has happened to the transport. This
* will be handled in ibmvfc_handle_crq()
*/
if (rc == H_CLOSED) {
if (printk_ratelimit())
dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
if (evt->cmnd)
scsi_dma_unmap(evt->cmnd);
ibmvfc_free_event(evt);
return SCSI_MLQUEUE_HOST_BUSY;
}
dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
if (evt->cmnd) {
evt->cmnd->result = DID_ERROR << 16;
evt->done = ibmvfc_scsi_eh_done;
} else
evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
evt->done(evt);
} else
ibmvfc_trc_start(evt);
return 0;
}
/**
* ibmvfc_log_error - Log an error for the failed command if appropriate
* @evt: ibmvfc event to log
*
**/
static void ibmvfc_log_error(struct ibmvfc_event *evt)
{
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
struct scsi_cmnd *cmnd = evt->cmnd;
const char *err = unknown_error;
int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
int logerr = 0;
int rsp_code = 0;
if (index >= 0) {
logerr = cmd_status[index].log;
err = cmd_status[index].name;
}
if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
return;
if (rsp->flags & FCP_RSP_LEN_VALID)
rsp_code = rsp->data.info.rsp_code;
scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
}
/**
* ibmvfc_relogin - Log back into the specified device
* @sdev: scsi device struct
*
**/
static void ibmvfc_relogin(struct scsi_device *sdev)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_target *tgt;
list_for_each_entry(tgt, &vhost->targets, queue) {
if (rport == tgt->rport) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
}
}
ibmvfc_reinit_host(vhost);
}
/**
* ibmvfc_scsi_done - Handle responses from commands
* @evt: ibmvfc event to be handled
*
* Used as a callback when sending scsi cmds.
**/
static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
{
struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
struct scsi_cmnd *cmnd = evt->cmnd;
u32 rsp_len = 0;
u32 sense_len = rsp->fcp_sense_len;
if (cmnd) {
if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
else if (rsp->flags & FCP_RESID_UNDER)
scsi_set_resid(cmnd, rsp->fcp_resid);
else
scsi_set_resid(cmnd, 0);
if (vfc_cmd->status) {
cmnd->result = ibmvfc_get_err_result(vfc_cmd);
if (rsp->flags & FCP_RSP_LEN_VALID)
rsp_len = rsp->fcp_rsp_len;
if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
ibmvfc_relogin(cmnd->device);
if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
cmnd->result = (DID_ERROR << 16);
ibmvfc_log_error(evt);
}
if (!cmnd->result &&
(scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
cmnd->result = (DID_ERROR << 16);
scsi_dma_unmap(cmnd);
cmnd->scsi_done(cmnd);
}
if (evt->eh_comp)
complete(evt->eh_comp);
ibmvfc_free_event(evt);
}
/**
* ibmvfc_host_chkready - Check if the host can accept commands
* @vhost: struct ibmvfc host
*
* Returns:
* 1 if host can accept command / 0 if not
**/
static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
{
int result = 0;
switch (vhost->state) {
case IBMVFC_LINK_DEAD:
case IBMVFC_HOST_OFFLINE:
result = DID_NO_CONNECT << 16;
break;
case IBMVFC_NO_CRQ:
case IBMVFC_INITIALIZING:
case IBMVFC_HALTED:
case IBMVFC_LINK_DOWN:
result = DID_REQUEUE << 16;
break;
case IBMVFC_ACTIVE:
result = 0;
break;
};
return result;
}
/**
* ibmvfc_queuecommand - The queuecommand function of the scsi template
* @cmnd: struct scsi_cmnd to be executed
* @done: Callback function to be called when cmnd is completed
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
void (*done) (struct scsi_cmnd *))
{
struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct ibmvfc_cmd *vfc_cmd;
struct ibmvfc_event *evt;
u8 tag[2];
int rc;
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
unlikely((rc = ibmvfc_host_chkready(vhost)))) {
cmnd->result = rc;
done(cmnd);
return 0;
}
cmnd->result = (DID_OK << 16);
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
evt->cmnd = cmnd;
cmnd->scsi_done = done;
vfc_cmd = &evt->iu.cmd;
memset(vfc_cmd, 0, sizeof(*vfc_cmd));
vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
vfc_cmd->tgt_scsi_id = rport->port_id;
vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
if (scsi_populate_tag_msg(cmnd, tag)) {
vfc_cmd->task_tag = tag[1];
switch (tag[0]) {
case MSG_SIMPLE_TAG:
vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
break;
case MSG_HEAD_TAG:
vfc_cmd->iu.pri_task_attr = IBMVFC_HEAD_OF_QUEUE;
break;
case MSG_ORDERED_TAG:
vfc_cmd->iu.pri_task_attr = IBMVFC_ORDERED_TASK;
break;
};
}
if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
return ibmvfc_send_event(evt, vhost, 0);
ibmvfc_free_event(evt);
if (rc == -ENOMEM)
return SCSI_MLQUEUE_HOST_BUSY;
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
scmd_printk(KERN_ERR, cmnd,
"Failed to map DMA buffer for command. rc=%d\n", rc);
cmnd->result = DID_ERROR << 16;
done(cmnd);
return 0;
}
static DEF_SCSI_QCMD(ibmvfc_queuecommand)
/**
* ibmvfc_sync_completion - Signal that a synchronous command has completed
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
{
/* copy the response back */
if (evt->sync_iu)
*evt->sync_iu = *evt->xfer_iu;
complete(&evt->comp);
}
/**
* ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
* @evt: struct ibmvfc_event
*
**/
static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
ibmvfc_free_event(evt);
vhost->aborting_passthru = 0;
dev_info(vhost->dev, "Passthru command cancelled\n");
}
/**
* ibmvfc_bsg_timeout - Handle a BSG timeout
* @job: struct fc_bsg_job that timed out
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
{
struct ibmvfc_host *vhost = shost_priv(job->shost);
unsigned long port_id = (unsigned long)job->dd_data;
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
unsigned long flags;
int rc;
ENTER;
spin_lock_irqsave(vhost->host->host_lock, flags);
if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
__ibmvfc_reset_host(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
vhost->aborting_passthru = 1;
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
tmf->common.version = 1;
tmf->common.opcode = IBMVFC_TMF_MAD;
tmf->common.length = sizeof(*tmf);
tmf->scsi_id = port_id;
tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY;
rc = ibmvfc_send_event(evt, vhost, default_timeout);
if (rc != 0) {
vhost->aborting_passthru = 0;
dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
rc = -EIO;
} else
dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
port_id);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
LEAVE;
return rc;
}
/**
* ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
* @vhost: struct ibmvfc_host to send command
* @port_id: port ID to send command
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
{
struct ibmvfc_port_login *plogi;
struct ibmvfc_target *tgt;
struct ibmvfc_event *evt;
union ibmvfc_iu rsp_iu;
unsigned long flags;
int rc = 0, issue_login = 1;
ENTER;
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->scsi_id == port_id) {
issue_login = 0;
break;
}
}
if (!issue_login)
goto unlock_out;
if (unlikely((rc = ibmvfc_host_chkready(vhost))))
goto unlock_out;
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
plogi->common.version = 1;
plogi->common.opcode = IBMVFC_PORT_LOGIN;
plogi->common.length = sizeof(*plogi);
plogi->scsi_id = port_id;
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
rc = ibmvfc_send_event(evt, vhost, default_timeout);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rc)
return -EIO;
wait_for_completion(&evt->comp);
if (rsp_iu.plogi.common.status)
rc = -EIO;
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
unlock_out:
spin_unlock_irqrestore(vhost->host->host_lock, flags);
LEAVE;
return rc;
}
/**
* ibmvfc_bsg_request - Handle a BSG request
* @job: struct fc_bsg_job to be executed
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_bsg_request(struct fc_bsg_job *job)
{
struct ibmvfc_host *vhost = shost_priv(job->shost);
struct fc_rport *rport = job->rport;
struct ibmvfc_passthru_mad *mad;
struct ibmvfc_event *evt;
union ibmvfc_iu rsp_iu;
unsigned long flags, port_id = -1;
unsigned int code = job->request->msgcode;
int rc = 0, req_seg, rsp_seg, issue_login = 0;
u32 fc_flags, rsp_len;
ENTER;
job->reply->reply_payload_rcv_len = 0;
if (rport)
port_id = rport->port_id;
switch (code) {
case FC_BSG_HST_ELS_NOLOGIN:
port_id = (job->request->rqst_data.h_els.port_id[0] << 16) |
(job->request->rqst_data.h_els.port_id[1] << 8) |
job->request->rqst_data.h_els.port_id[2];
case FC_BSG_RPT_ELS:
fc_flags = IBMVFC_FC_ELS;
break;
case FC_BSG_HST_CT:
issue_login = 1;
port_id = (job->request->rqst_data.h_ct.port_id[0] << 16) |
(job->request->rqst_data.h_ct.port_id[1] << 8) |
job->request->rqst_data.h_ct.port_id[2];
case FC_BSG_RPT_CT:
fc_flags = IBMVFC_FC_CT_IU;
break;
default:
return -ENOTSUPP;
};
if (port_id == -1)
return -EINVAL;
if (!mutex_trylock(&vhost->passthru_mutex))
return -EBUSY;
job->dd_data = (void *)port_id;
req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_seg) {
mutex_unlock(&vhost->passthru_mutex);
return -ENOMEM;
}
rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_seg) {
dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
mutex_unlock(&vhost->passthru_mutex);
return -ENOMEM;
}
if (req_seg > 1 || rsp_seg > 1) {
rc = -EINVAL;
goto out;
}
if (issue_login)
rc = ibmvfc_bsg_plogi(vhost, port_id);
spin_lock_irqsave(vhost->host->host_lock, flags);
if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
unlikely((rc = ibmvfc_host_chkready(vhost)))) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
goto out;
}
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
mad = &evt->iu.passthru;
memset(mad, 0, sizeof(*mad));
mad->common.version = 1;
mad->common.opcode = IBMVFC_PASSTHRU;
mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
mad->cmd_ioba.va = (u64)evt->crq.ioba +
offsetof(struct ibmvfc_passthru_mad, iu);
mad->cmd_ioba.len = sizeof(mad->iu);
mad->iu.cmd_len = job->request_payload.payload_len;
mad->iu.rsp_len = job->reply_payload.payload_len;
mad->iu.flags = fc_flags;
mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list);
mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list);
mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list);
mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list);
mad->iu.scsi_id = port_id;
mad->iu.tag = (u64)evt;
rsp_len = mad->iu.rsp.len;
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
rc = ibmvfc_send_event(evt, vhost, 0);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rc) {
rc = -EIO;
goto out;
}
wait_for_completion(&evt->comp);
if (rsp_iu.passthru.common.status)
rc = -EIO;
else
job->reply->reply_payload_rcv_len = rsp_len;
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
job->reply->result = rc;
job->job_done(job);
rc = 0;
out:
dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
job->request_payload.sg_cnt, DMA_TO_DEVICE);
dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
mutex_unlock(&vhost->passthru_mutex);
LEAVE;
return rc;
}
/**
* ibmvfc_reset_device - Reset the device with the specified reset type
* @sdev: scsi device to reset
* @type: reset type
* @desc: reset type description for log messages
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt = NULL;
union ibmvfc_iu rsp_iu;
struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
int rsp_rc = -EBUSY;
unsigned long flags;
int rsp_code = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
if (vhost->state == IBMVFC_ACTIVE) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
tmf = &evt->iu.cmd;
memset(tmf, 0, sizeof(*tmf));
tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
tmf->resp.len = sizeof(tmf->rsp);
tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
tmf->payload_len = sizeof(tmf->iu);
tmf->resp_len = sizeof(tmf->rsp);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->tgt_scsi_id = rport->port_id;
int_to_scsilun(sdev->lun, &tmf->iu.lun);
tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
tmf->iu.tmf_flags = type;
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
desc, rsp_rc);
return -EIO;
}
sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
wait_for_completion(&evt->comp);
if (rsp_iu.cmd.status)
rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
rsp_code = fc_rsp->data.info.rsp_code;
sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return rsp_rc;
}
/**
* ibmvfc_match_rport - Match function for specified remote port
* @evt: ibmvfc event struct
* @device: device to match (rport)
*
* Returns:
* 1 if event matches rport / 0 if event does not match rport
**/
static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
{
struct fc_rport *cmd_rport;
if (evt->cmnd) {
cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
if (cmd_rport == rport)
return 1;
}
return 0;
}
/**
* ibmvfc_match_target - Match function for specified target
* @evt: ibmvfc event struct
* @device: device to match (starget)
*
* Returns:
* 1 if event matches starget / 0 if event does not match starget
**/
static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
{
if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
return 1;
return 0;
}
/**
* ibmvfc_match_lun - Match function for specified LUN
* @evt: ibmvfc event struct
* @device: device to match (sdev)
*
* Returns:
* 1 if event matches sdev / 0 if event does not match sdev
**/
static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
{
if (evt->cmnd && evt->cmnd->device == device)
return 1;
return 0;
}
/**
* ibmvfc_wait_for_ops - Wait for ops to complete
* @vhost: ibmvfc host struct
* @device: device to match (starget or sdev)
* @match: match function
*
* Returns:
* SUCCESS / FAILED
**/
static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
int (*match) (struct ibmvfc_event *, void *))
{
struct ibmvfc_event *evt;
DECLARE_COMPLETION_ONSTACK(comp);
int wait;
unsigned long flags;
signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
ENTER;
do {
wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(evt, &vhost->sent, queue) {
if (match(evt, device)) {
evt->eh_comp = ∁
wait++;
}
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait) {
timeout = wait_for_completion_timeout(&comp, timeout);
if (!timeout) {
wait = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(evt, &vhost->sent, queue) {
if (match(evt, device)) {
evt->eh_comp = NULL;
wait++;
}
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (wait)
dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
LEAVE;
return wait ? FAILED : SUCCESS;
}
}
} while (wait);
LEAVE;
return SUCCESS;
}
/**
* ibmvfc_cancel_all - Cancel all outstanding commands to the device
* @sdev: scsi device to cancel commands
* @type: type of error recovery being performed
*
* This sends a cancel to the VIOS for the specified device. This does
* NOT send any abort to the actual device. That must be done separately.
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
struct fc_rport *rport = starget_to_rport(starget);
struct ibmvfc_tmf *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp;
int rsp_rc = -EBUSY;
unsigned long flags;
u16 status;
ENTER;
spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
list_for_each_entry(evt, &vhost->sent, queue) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
if (vhost->logged_in) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
tmf->common.version = 1;
tmf->common.opcode = IBMVFC_TMF_MAD;
tmf->common.length = sizeof(*tmf);
tmf->scsi_id = rport->port_id;
int_to_scsilun(sdev->lun, &tmf->lun);
if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
if (vhost->state == IBMVFC_ACTIVE)
tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
else
tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->my_cancel_key = (unsigned long)starget->hostdata;
evt->sync_iu = &rsp;
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
/* If failure is received, the host adapter is most likely going
through reset, return success so the caller will wait for the command
being cancelled to get returned */
return 0;
}
sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
wait_for_completion(&evt->comp);
status = rsp.mad_common.status;
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (status != IBMVFC_MAD_SUCCESS) {
sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
switch (status) {
case IBMVFC_MAD_DRIVER_FAILED:
case IBMVFC_MAD_CRQ_ERROR:
/* Host adapter most likely going through reset, return success to
the caller will wait for the command being cancelled to get returned */
return 0;
default:
return -EIO;
};
}
sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
return 0;
}
/**
* ibmvfc_match_key - Match function for specified cancel key
* @evt: ibmvfc event struct
* @key: cancel key to match
*
* Returns:
* 1 if event matches key / 0 if event does not match key
**/
static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
{
unsigned long cancel_key = (unsigned long)key;
if (evt->crq.format == IBMVFC_CMD_FORMAT &&
evt->iu.cmd.cancel_key == cancel_key)
return 1;
return 0;
}
/**
* ibmvfc_match_evt - Match function for specified event
* @evt: ibmvfc event struct
* @match: event to match
*
* Returns:
* 1 if event matches key / 0 if event does not match key
**/
static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
{
if (evt == match)
return 1;
return 0;
}
/**
* ibmvfc_abort_task_set - Abort outstanding commands to the device
* @sdev: scsi device to abort commands
*
* This sends an Abort Task Set to the VIOS for the specified device. This does
* NOT send any cancel to the VIOS. That must be done separately.
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_abort_task_set(struct scsi_device *sdev)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_cmd *tmf;
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp_iu;
struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
int rc, rsp_rc = -EBUSY;
unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
int rsp_code = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
list_for_each_entry(evt, &vhost->sent, queue) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
if (vhost->state == IBMVFC_ACTIVE) {
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
tmf = &evt->iu.cmd;
memset(tmf, 0, sizeof(*tmf));
tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
tmf->resp.len = sizeof(tmf->rsp);
tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
tmf->payload_len = sizeof(tmf->iu);
tmf->resp_len = sizeof(tmf->rsp);
tmf->cancel_key = (unsigned long)sdev->hostdata;
tmf->tgt_scsi_id = rport->port_id;
int_to_scsilun(sdev->lun, &tmf->iu.lun);
tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rsp_rc != 0) {
sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
return -EIO;
}
sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
timeout = wait_for_completion_timeout(&evt->comp, timeout);
if (!timeout) {
rc = ibmvfc_cancel_all(sdev, 0);
if (!rc) {
rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
if (rc == SUCCESS)
rc = 0;
}
if (rc) {
sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
ibmvfc_reset_host(vhost);
rsp_rc = -EIO;
rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
if (rc == SUCCESS)
rsp_rc = 0;
rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
if (rc != SUCCESS) {
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_hard_reset_host(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
rsp_rc = 0;
}
goto out;
}
}
if (rsp_iu.cmd.status)
rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
if (rsp_code) {
if (fc_rsp->flags & FCP_RSP_LEN_VALID)
rsp_code = fc_rsp->data.info.rsp_code;
sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
} else
sdev_printk(KERN_INFO, sdev, "Abort successful\n");
out:
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return rsp_rc;
}
/**
* ibmvfc_eh_abort_handler - Abort a command
* @cmd: scsi command to abort
*
* Returns:
* SUCCESS / FAST_IO_FAIL / FAILED
**/
static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
int cancel_rc, block_rc;
int rc = FAILED;
ENTER;
block_rc = fc_block_scsi_eh(cmd);
ibmvfc_wait_while_resetting(vhost);
if (block_rc != FAST_IO_FAIL) {
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
ibmvfc_abort_task_set(sdev);
} else
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
if (!cancel_rc)
rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
if (block_rc == FAST_IO_FAIL && rc != FAILED)
rc = FAST_IO_FAIL;
LEAVE;
return rc;
}
/**
* ibmvfc_eh_device_reset_handler - Reset a single LUN
* @cmd: scsi command struct
*
* Returns:
* SUCCESS / FAST_IO_FAIL / FAILED
**/
static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
int cancel_rc, block_rc, reset_rc = 0;
int rc = FAILED;
ENTER;
block_rc = fc_block_scsi_eh(cmd);
ibmvfc_wait_while_resetting(vhost);
if (block_rc != FAST_IO_FAIL) {
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
} else
cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
if (block_rc == FAST_IO_FAIL && rc != FAILED)
rc = FAST_IO_FAIL;
LEAVE;
return rc;
}
/**
* ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
* @sdev: scsi device struct
* @data: return code
*
**/
static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
{
unsigned long *rc = data;
*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
}
/**
* ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
* @sdev: scsi device struct
* @data: return code
*
**/
static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
{
unsigned long *rc = data;
*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
}
/**
* ibmvfc_eh_target_reset_handler - Reset the target
* @cmd: scsi command struct
*
* Returns:
* SUCCESS / FAST_IO_FAIL / FAILED
**/
static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
int block_rc;
int reset_rc = 0;
int rc = FAILED;
unsigned long cancel_rc = 0;
ENTER;
block_rc = fc_block_scsi_eh(cmd);
ibmvfc_wait_while_resetting(vhost);
if (block_rc != FAST_IO_FAIL) {
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
} else
starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
if (!cancel_rc && !reset_rc)
rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
if (block_rc == FAST_IO_FAIL && rc != FAILED)
rc = FAST_IO_FAIL;
LEAVE;
return rc;
}
/**
* ibmvfc_eh_host_reset_handler - Reset the connection to the server
* @cmd: struct scsi_cmnd having problems
*
**/
static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
int rc, block_rc;
struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
block_rc = fc_block_scsi_eh(cmd);
dev_err(vhost->dev, "Resetting connection due to error recovery\n");
rc = ibmvfc_issue_fc_host_lip(vhost->host);
if (block_rc == FAST_IO_FAIL)
return FAST_IO_FAIL;
return rc ? FAILED : SUCCESS;
}
/**
* ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
* @rport: rport struct
*
* Return value:
* none
**/
static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
{
struct Scsi_Host *shost = rport_to_shost(rport);
struct ibmvfc_host *vhost = shost_priv(shost);
struct fc_rport *dev_rport;
struct scsi_device *sdev;
unsigned long rc;
ENTER;
shost_for_each_device(sdev, shost) {
dev_rport = starget_to_rport(scsi_target(sdev));
if (dev_rport != rport)
continue;
ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
}
rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
if (rc == FAILED)
ibmvfc_issue_fc_host_lip(shost);
LEAVE;
}
static const struct ibmvfc_async_desc ae_desc [] = {
{ "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
{ "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
{ "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
{ "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
{ "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
{ "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL },
{ "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL },
{ "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL },
{ "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL },
{ "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL },
{ "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL },
{ "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL },
{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
};
static const struct ibmvfc_async_desc unknown_ae = {
"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
};
/**
* ibmvfc_get_ae_desc - Get text description for async event
* @ae: async event
*
**/
static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
{
int i;
for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
if (ae_desc[i].ae == ae)
return &ae_desc[i];
return &unknown_ae;
}
static const struct {
enum ibmvfc_ae_link_state state;
const char *desc;
} link_desc [] = {
{ IBMVFC_AE_LS_LINK_UP, " link up" },
{ IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
{ IBMVFC_AE_LS_LINK_DOWN, " link down" },
{ IBMVFC_AE_LS_LINK_DEAD, " link dead" },
};
/**
* ibmvfc_get_link_state - Get text description for link state
* @state: link state
*
**/
static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
{
int i;
for (i = 0; i < ARRAY_SIZE(link_desc); i++)
if (link_desc[i].state == state)
return link_desc[i].desc;
return "";
}
/**
* ibmvfc_handle_async - Handle an async event from the adapter
* @crq: crq to process
* @vhost: ibmvfc host struct
*
**/
static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
struct ibmvfc_host *vhost)
{
const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(crq->event);
struct ibmvfc_target *tgt;
ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
" node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name,
ibmvfc_get_link_state(crq->link_state));
switch (crq->event) {
case IBMVFC_AE_RESUME:
switch (crq->link_state) {
case IBMVFC_AE_LS_LINK_DOWN:
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
break;
case IBMVFC_AE_LS_LINK_DEAD:
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
break;
case IBMVFC_AE_LS_LINK_UP:
case IBMVFC_AE_LS_LINK_BOUNCED:
default:
vhost->events_to_log |= IBMVFC_AE_LINKUP;
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
};
break;
case IBMVFC_AE_LINK_UP:
vhost->events_to_log |= IBMVFC_AE_LINKUP;
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
break;
case IBMVFC_AE_SCN_FABRIC:
case IBMVFC_AE_SCN_DOMAIN:
vhost->events_to_log |= IBMVFC_AE_RSCN;
if (vhost->state < IBMVFC_HALTED) {
vhost->delay_init = 1;
__ibmvfc_reset_host(vhost);
}
break;
case IBMVFC_AE_SCN_NPORT:
case IBMVFC_AE_SCN_GROUP:
vhost->events_to_log |= IBMVFC_AE_RSCN;
ibmvfc_reinit_host(vhost);
break;
case IBMVFC_AE_ELS_LOGO:
case IBMVFC_AE_ELS_PRLO:
case IBMVFC_AE_ELS_PLOGI:
list_for_each_entry(tgt, &vhost->targets, queue) {
if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
break;
if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
continue;
if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
continue;
if (crq->node_name && tgt->ids.node_name != crq->node_name)
continue;
if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO)
tgt->logo_rcvd = 1;
if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
ibmvfc_reinit_host(vhost);
}
}
break;
case IBMVFC_AE_LINK_DOWN:
case IBMVFC_AE_ADAPTER_FAILED:
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
break;
case IBMVFC_AE_LINK_DEAD:
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
break;
case IBMVFC_AE_HALT:
ibmvfc_link_down(vhost, IBMVFC_HALTED);
break;
default:
dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
break;
};
}
/**
* ibmvfc_handle_crq - Handles and frees received events in the CRQ
* @crq: Command/Response queue
* @vhost: ibmvfc host struct
*
**/
static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
{
long rc;
struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
switch (crq->valid) {
case IBMVFC_CRQ_INIT_RSP:
switch (crq->format) {
case IBMVFC_CRQ_INIT:
dev_info(vhost->dev, "Partner initialized\n");
/* Send back a response */
rc = ibmvfc_send_crq_init_complete(vhost);
if (rc == 0)
ibmvfc_init_host(vhost);
else
dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
break;
case IBMVFC_CRQ_INIT_COMPLETE:
dev_info(vhost->dev, "Partner initialization complete\n");
ibmvfc_init_host(vhost);
break;
default:
dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
}
return;
case IBMVFC_CRQ_XPORT_EVENT:
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
if (crq->format == IBMVFC_PARTITION_MIGRATED) {
/* We need to re-setup the interpartition connection */
dev_info(vhost->dev, "Re-enabling adapter\n");
vhost->client_migrated = 1;
ibmvfc_purge_requests(vhost, DID_REQUEUE);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
} else {
dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
ibmvfc_purge_requests(vhost, DID_ERROR);
ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
}
return;
case IBMVFC_CRQ_CMD_RSP:
break;
default:
dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
return;
}
if (crq->format == IBMVFC_ASYNC_EVENT)
return;
/* The only kind of payload CRQs we should get are responses to
* things we send. Make sure this response is to something we
* actually sent
*/
if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
crq->ioba);
return;
}
if (unlikely(atomic_read(&evt->free))) {
dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
crq->ioba);
return;
}
del_timer(&evt->timer);
list_del(&evt->queue);
ibmvfc_trc_end(evt);
evt->done(evt);
}
/**
* ibmvfc_scan_finished - Check if the device scan is done.
* @shost: scsi host struct
* @time: current elapsed time
*
* Returns:
* 0 if scan is not done / 1 if scan is done
**/
static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
{
unsigned long flags;
struct ibmvfc_host *vhost = shost_priv(shost);
int done = 0;
spin_lock_irqsave(shost->host_lock, flags);
if (time >= (init_timeout * HZ)) {
dev_info(vhost->dev, "Scan taking longer than %d seconds, "
"continuing initialization\n", init_timeout);
done = 1;
}
if (vhost->scan_complete)
done = 1;
spin_unlock_irqrestore(shost->host_lock, flags);
return done;
}
/**
* ibmvfc_slave_alloc - Setup the device's task set value
* @sdev: struct scsi_device device to configure
*
* Set the device's task set value so that error handling works as
* expected.
*
* Returns:
* 0 on success / -ENXIO if device does not exist
**/
static int ibmvfc_slave_alloc(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags = 0;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
spin_lock_irqsave(shost->host_lock, flags);
sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
/**
* ibmvfc_target_alloc - Setup the target's task set value
* @starget: struct scsi_target
*
* Set the target's task set value so that error handling works as
* expected.
*
* Returns:
* 0 on success / -ENXIO if device does not exist
**/
static int ibmvfc_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
starget->hostdata = (void *)(unsigned long)vhost->task_set++;
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
/**
* ibmvfc_slave_configure - Configure the device
* @sdev: struct scsi_device device to configure
*
* Enable allow_restart for a device if it is a disk. Adjust the
* queue_depth here also.
*
* Returns:
* 0
**/
static int ibmvfc_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
if (sdev->type == TYPE_DISK)
sdev->allow_restart = 1;
if (sdev->tagged_supported) {
scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
scsi_activate_tcq(sdev, sdev->queue_depth);
} else
scsi_deactivate_tcq(sdev, sdev->queue_depth);
spin_unlock_irqrestore(shost->host_lock, flags);
return 0;
}
/**
* ibmvfc_change_queue_depth - Change the device's queue depth
* @sdev: scsi device struct
* @qdepth: depth to set
* @reason: calling context
*
* Return value:
* actual depth set
**/
static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth,
int reason)
{
if (reason != SCSI_QDEPTH_DEFAULT)
return -EOPNOTSUPP;
if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
qdepth = IBMVFC_MAX_CMDS_PER_LUN;
scsi_adjust_queue_depth(sdev, 0, qdepth);
return sdev->queue_depth;
}
/**
* ibmvfc_change_queue_type - Change the device's queue type
* @sdev: scsi device struct
* @tag_type: type of tags to use
*
* Return value:
* actual queue type set
**/
static int ibmvfc_change_queue_type(struct scsi_device *sdev, int tag_type)
{
if (sdev->tagged_supported) {
scsi_set_tag_type(sdev, tag_type);
if (tag_type)
scsi_activate_tcq(sdev, sdev->queue_depth);
else
scsi_deactivate_tcq(sdev, sdev->queue_depth);
} else
tag_type = 0;
return tag_type;
}
static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
return snprintf(buf, PAGE_SIZE, "%s\n",
vhost->login_buf->resp.partition_name);
}
static ssize_t ibmvfc_show_host_device_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
return snprintf(buf, PAGE_SIZE, "%s\n",
vhost->login_buf->resp.device_name);
}
static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
return snprintf(buf, PAGE_SIZE, "%s\n",
vhost->login_buf->resp.port_loc_code);
}
static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
return snprintf(buf, PAGE_SIZE, "%s\n",
vhost->login_buf->resp.drc_name);
}
static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
}
static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
}
/**
* ibmvfc_show_log_level - Show the adapter's error logging level
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
**/
static ssize_t ibmvfc_show_log_level(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags = 0;
int len;
spin_lock_irqsave(shost->host_lock, flags);
len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
spin_unlock_irqrestore(shost->host_lock, flags);
return len;
}
/**
* ibmvfc_store_log_level - Change the adapter's error logging level
* @dev: class device struct
* @buf: buffer
*
* Return value:
* number of bytes printed to buffer
**/
static ssize_t ibmvfc_store_log_level(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags = 0;
spin_lock_irqsave(shost->host_lock, flags);
vhost->log_level = simple_strtoul(buf, NULL, 10);
spin_unlock_irqrestore(shost->host_lock, flags);
return strlen(buf);
}
static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
ibmvfc_show_log_level, ibmvfc_store_log_level);
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
* ibmvfc_read_trace - Dump the adapter trace
* @filp: open sysfs file
* @kobj: kobject struct
* @bin_attr: bin_attribute struct
* @buf: buffer
* @off: offset
* @count: buffer size
*
* Return value:
* number of bytes printed to buffer
**/
static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct Scsi_Host *shost = class_to_shost(dev);
struct ibmvfc_host *vhost = shost_priv(shost);
unsigned long flags = 0;
int size = IBMVFC_TRACE_SIZE;
char *src = (char *)vhost->trace;
if (off > size)
return 0;
if (off + count > size) {
size -= off;
count = size;
}
spin_lock_irqsave(shost->host_lock, flags);
memcpy(buf, &src[off], count);
spin_unlock_irqrestore(shost->host_lock, flags);
return count;
}
static struct bin_attribute ibmvfc_trace_attr = {
.attr = {
.name = "trace",
.mode = S_IRUGO,
},
.size = 0,
.read = ibmvfc_read_trace,
};
#endif
static struct device_attribute *ibmvfc_attrs[] = {
&dev_attr_partition_name,
&dev_attr_device_name,
&dev_attr_port_loc_code,
&dev_attr_drc_name,
&dev_attr_npiv_version,
&dev_attr_capabilities,
&dev_attr_log_level,
NULL
};
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IBM POWER Virtual FC Adapter",
.proc_name = IBMVFC_NAME,
.queuecommand = ibmvfc_queuecommand,
.eh_abort_handler = ibmvfc_eh_abort_handler,
.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
.slave_alloc = ibmvfc_slave_alloc,
.slave_configure = ibmvfc_slave_configure,
.target_alloc = ibmvfc_target_alloc,
.scan_finished = ibmvfc_scan_finished,
.change_queue_depth = ibmvfc_change_queue_depth,
.change_queue_type = ibmvfc_change_queue_type,
.cmd_per_lun = 16,
.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = IBMVFC_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = ibmvfc_attrs,
};
/**
* ibmvfc_next_async_crq - Returns the next entry in async queue
* @vhost: ibmvfc host struct
*
* Returns:
* Pointer to next entry in queue / NULL if empty
**/
static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
{
struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
struct ibmvfc_async_crq *crq;
crq = &async_crq->msgs[async_crq->cur];
if (crq->valid & 0x80) {
if (++async_crq->cur == async_crq->size)
async_crq->cur = 0;
rmb();
} else
crq = NULL;
return crq;
}
/**
* ibmvfc_next_crq - Returns the next entry in message queue
* @vhost: ibmvfc host struct
*
* Returns:
* Pointer to next entry in queue / NULL if empty
**/
static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
{
struct ibmvfc_crq_queue *queue = &vhost->crq;
struct ibmvfc_crq *crq;
crq = &queue->msgs[queue->cur];
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
rmb();
} else
crq = NULL;
return crq;
}
/**
* ibmvfc_interrupt - Interrupt handler
* @irq: number of irq to handle, not used
* @dev_instance: ibmvfc_host that received interrupt
*
* Returns:
* IRQ_HANDLED
**/
static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
{
struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
unsigned long flags;
spin_lock_irqsave(vhost->host->host_lock, flags);
vio_disable_interrupts(to_vio_dev(vhost->dev));
tasklet_schedule(&vhost->tasklet);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return IRQ_HANDLED;
}
/**
* ibmvfc_tasklet - Interrupt handler tasklet
* @data: ibmvfc host struct
*
* Returns:
* Nothing
**/
static void ibmvfc_tasklet(void *data)
{
struct ibmvfc_host *vhost = data;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_crq *crq;
struct ibmvfc_async_crq *async;
unsigned long flags;
int done = 0;
spin_lock_irqsave(vhost->host->host_lock, flags);
while (!done) {
/* Pull all the valid messages off the async CRQ */
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
ibmvfc_handle_async(async, vhost);
async->valid = 0;
wmb();
}
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
wmb();
}
vio_enable_interrupts(vdev);
if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_async(async, vhost);
async->valid = 0;
wmb();
} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
ibmvfc_handle_crq(crq, vhost);
crq->valid = 0;
wmb();
} else
done = 1;
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
/**
* ibmvfc_init_tgt - Set the next init job step for the target
* @tgt: ibmvfc target struct
* @job_step: job step to perform
*
**/
static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT);
tgt->job_step = job_step;
wake_up(&tgt->vhost->work_wait_q);
}
/**
* ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
* @tgt: ibmvfc target struct
* @job_step: initialization job step
*
* Returns: 1 if step will be retried / 0 if not
*
**/
static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
void (*job_step) (struct ibmvfc_target *))
{
if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
wake_up(&tgt->vhost->work_wait_q);
return 0;
} else
ibmvfc_init_tgt(tgt, job_step);
return 1;
}
/* Defined in FC-LS */
static const struct {
int code;
int retry;
int logged_in;
} prli_rsp [] = {
{ 0, 1, 0 },
{ 1, 0, 1 },
{ 2, 1, 0 },
{ 3, 1, 0 },
{ 4, 0, 0 },
{ 5, 0, 0 },
{ 6, 0, 1 },
{ 7, 0, 0 },
{ 8, 1, 0 },
};
/**
* ibmvfc_get_prli_rsp - Find PRLI response index
* @flags: PRLI response flags
*
**/
static int ibmvfc_get_prli_rsp(u16 flags)
{
int i;
int code = (flags & 0x0f00) >> 8;
for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
if (prli_rsp[i].code == code)
return i;
return 0;
}
/**
* ibmvfc_tgt_prli_done - Completion handler for Process Login
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
{
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
u32 status = rsp->common.status;
int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
parms->type, parms->flags, parms->service_parms);
if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
index = ibmvfc_get_prli_rsp(parms->flags);
if (prli_rsp[index].logged_in) {
if (parms->flags & IBMVFC_PRLI_EST_IMG_PAIR) {
tgt->need_login = 0;
tgt->ids.roles = 0;
if (parms->service_parms & IBMVFC_PRLI_TARGET_FUNC)
tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
tgt->add_rport = 1;
} else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
} else if (prli_rsp[index].retry)
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
} else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_CRQ_ERROR:
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
break;
case IBMVFC_MAD_FAILED:
default:
if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED)
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else if (tgt->logo_rcvd)
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error),
rsp->status, rsp->error, status);
break;
};
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_tgt_send_prli - Send a process login
* @tgt: ibmvfc target struct
*
**/
static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
{
struct ibmvfc_process_login *prli;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
if (vhost->discovery_threads >= disc_threads)
return;
kref_get(&tgt->kref);
evt = ibmvfc_get_event(vhost);
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
prli = &evt->iu.prli;
memset(prli, 0, sizeof(*prli));
prli->common.version = 1;
prli->common.opcode = IBMVFC_PROCESS_LOGIN;
prli->common.length = sizeof(*prli);
prli->scsi_id = tgt->scsi_id;
prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
tgt_dbg(tgt, "Sent process login\n");
}
/**
* ibmvfc_tgt_plogi_done - Completion handler for Port Login
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
{
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
u32 status = rsp->common.status;
int level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Port Login succeeded\n");
if (tgt->ids.port_name &&
tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
vhost->reinit = 1;
tgt_dbg(tgt, "Port re-init required\n");
break;
}
tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
tgt->ids.port_id = tgt->scsi_id;
memcpy(&tgt->service_parms, &rsp->service_parms,
sizeof(tgt->service_parms));
memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
sizeof(tgt->service_parms_change));
ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_CRQ_ERROR:
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
break;
case IBMVFC_MAD_FAILED:
default:
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
break;
};
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
* @tgt: ibmvfc target struct
*
**/
static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
{
struct ibmvfc_port_login *plogi;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
if (vhost->discovery_threads >= disc_threads)
return;
kref_get(&tgt->kref);
tgt->logo_rcvd = 0;
evt = ibmvfc_get_event(vhost);
vhost->discovery_threads++;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
plogi->common.version = 1;
plogi->common.opcode = IBMVFC_PORT_LOGIN;
plogi->common.length = sizeof(*plogi);
plogi->scsi_id = tgt->scsi_id;
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
tgt_dbg(tgt, "Sent port login\n");
}
/**
* ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
{
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
u32 status = rsp->common.status;
vhost->discovery_threads--;
ibmvfc_free_event(evt);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Implicit Logout succeeded\n");
break;
case IBMVFC_MAD_DRIVER_FAILED:
kref_put(&tgt->kref, ibmvfc_release_tgt);
wake_up(&vhost->work_wait_q);
return;
case IBMVFC_MAD_FAILED:
default:
tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
break;
};
if (vhost->action == IBMVFC_HOST_ACTION_TGT_INIT)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
else if (vhost->action == IBMVFC_HOST_ACTION_QUERY_TGTS &&
tgt->scsi_id != tgt->new_scsi_id)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
kref_put(&tgt->kref, ibmvfc_release_tgt);
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
* @tgt: ibmvfc target struct
*
**/
static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
{
struct ibmvfc_implicit_logout *mad;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
if (vhost->discovery_threads >= disc_threads)
return;
kref_get(&tgt->kref);
evt = ibmvfc_get_event(vhost);
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_implicit_logout_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
mad = &evt->iu.implicit_logout;
memset(mad, 0, sizeof(*mad));
mad->common.version = 1;
mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
mad->common.length = sizeof(*mad);
mad->old_scsi_id = tgt->scsi_id;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
tgt_dbg(tgt, "Sent Implicit Logout\n");
}
/**
* ibmvfc_adisc_needs_plogi - Does device need PLOGI?
* @mad: ibmvfc passthru mad struct
* @tgt: ibmvfc target struct
*
* Returns:
* 1 if PLOGI needed / 0 if PLOGI not needed
**/
static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
struct ibmvfc_target *tgt)
{
if (memcmp(&mad->fc_iu.response[2], &tgt->ids.port_name,
sizeof(tgt->ids.port_name)))
return 1;
if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
sizeof(tgt->ids.node_name)))
return 1;
if (mad->fc_iu.response[6] != tgt->scsi_id)
return 1;
return 0;
}
/**
* ibmvfc_tgt_adisc_done - Completion handler for ADISC
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
{
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
u32 status = mad->common.status;
u8 fc_reason, fc_explain;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
del_timer(&tgt->timer);
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "ADISC succeeded\n");
if (ibmvfc_adisc_needs_plogi(mad, tgt))
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_FAILED:
default:
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
mad->iu.status, mad->iu.error,
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
break;
};
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_init_passthru - Initialize an event struct for FC passthru
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
{
struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
memset(mad, 0, sizeof(*mad));
mad->common.version = 1;
mad->common.opcode = IBMVFC_PASSTHRU;
mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
mad->cmd_ioba.va = (u64)evt->crq.ioba +
offsetof(struct ibmvfc_passthru_mad, iu);
mad->cmd_ioba.len = sizeof(mad->iu);
mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
mad->iu.rsp_len = sizeof(mad->fc_iu.response);
mad->iu.cmd.va = (u64)evt->crq.ioba +
offsetof(struct ibmvfc_passthru_mad, fc_iu) +
offsetof(struct ibmvfc_passthru_fc_iu, payload);
mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
mad->iu.rsp.va = (u64)evt->crq.ioba +
offsetof(struct ibmvfc_passthru_mad, fc_iu) +
offsetof(struct ibmvfc_passthru_fc_iu, response);
mad->iu.rsp.len = sizeof(mad->fc_iu.response);
}
/**
* ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
* @evt: ibmvfc event struct
*
* Just cleanup this event struct. Everything else is handled by
* the ADISC completion handler. If the ADISC never actually comes
* back, we still have the timer running on the ADISC event struct
* which will fire and cause the CRQ to get reset.
*
**/
static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_target *tgt = evt->tgt;
tgt_dbg(tgt, "ADISC cancel complete\n");
vhost->abort_threads--;
ibmvfc_free_event(evt);
kref_put(&tgt->kref, ibmvfc_release_tgt);
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_adisc_timeout - Handle an ADISC timeout
* @tgt: ibmvfc target struct
*
* If an ADISC times out, send a cancel. If the cancel times
* out, reset the CRQ. When the ADISC comes back as cancelled,
* log back into the target.
**/
static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
{
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
unsigned long flags;
int rc;
tgt_dbg(tgt, "ADISC timeout\n");
spin_lock_irqsave(vhost->host->host_lock, flags);
if (vhost->abort_threads >= disc_threads ||
tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
vhost->state != IBMVFC_INITIALIZING ||
vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return;
}
vhost->abort_threads++;
kref_get(&tgt->kref);
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
tmf->common.version = 1;
tmf->common.opcode = IBMVFC_TMF_MAD;
tmf->common.length = sizeof(*tmf);
tmf->scsi_id = tgt->scsi_id;
tmf->cancel_key = tgt->cancel_key;
rc = ibmvfc_send_event(evt, vhost, default_timeout);
if (rc) {
tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
vhost->abort_threads--;
kref_put(&tgt->kref, ibmvfc_release_tgt);
__ibmvfc_reset_host(vhost);
} else
tgt_dbg(tgt, "Attempting to cancel ADISC\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
/**
* ibmvfc_tgt_adisc - Initiate an ADISC for specified target
* @tgt: ibmvfc target struct
*
* When sending an ADISC we end up with two timers running. The
* first timer is the timer in the ibmvfc target struct. If this
* fires, we send a cancel to the target. The second timer is the
* timer on the ibmvfc event for the ADISC, which is longer. If that
* fires, it means the ADISC timed out and our attempt to cancel it
* also failed, so we need to reset the CRQ.
**/
static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
{
struct ibmvfc_passthru_mad *mad;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
if (vhost->discovery_threads >= disc_threads)
return;
kref_get(&tgt->kref);
evt = ibmvfc_get_event(vhost);
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
ibmvfc_init_passthru(evt);
mad = &evt->iu.passthru;
mad->iu.flags = IBMVFC_FC_ELS;
mad->iu.scsi_id = tgt->scsi_id;
mad->iu.cancel_key = tgt->cancel_key;
mad->fc_iu.payload[0] = IBMVFC_ADISC;
memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
sizeof(vhost->login_buf->resp.port_name));
memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
sizeof(vhost->login_buf->resp.node_name));
mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
if (timer_pending(&tgt->timer))
mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
else {
tgt->timer.data = (unsigned long) tgt;
tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout;
add_timer(&tgt->timer);
}
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
vhost->discovery_threads--;
del_timer(&tgt->timer);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
tgt_dbg(tgt, "Sent ADISC\n");
}
/**
* ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
{
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
u32 status = rsp->common.status;
int level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Query Target succeeded\n");
tgt->new_scsi_id = rsp->scsi_id;
if (rsp->scsi_id != tgt->scsi_id)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
else
ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
case IBMVFC_MAD_CRQ_ERROR:
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
break;
case IBMVFC_MAD_FAILED:
default:
if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
break;
};
kref_put(&tgt->kref, ibmvfc_release_tgt);
ibmvfc_free_event(evt);
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_tgt_query_target - Initiate a Query Target for specified target
* @tgt: ibmvfc target struct
*
**/
static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
{
struct ibmvfc_query_tgt *query_tgt;
struct ibmvfc_host *vhost = tgt->vhost;
struct ibmvfc_event *evt;
if (vhost->discovery_threads >= disc_threads)
return;
kref_get(&tgt->kref);
evt = ibmvfc_get_event(vhost);
vhost->discovery_threads++;
evt->tgt = tgt;
ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
query_tgt = &evt->iu.query_tgt;
memset(query_tgt, 0, sizeof(*query_tgt));
query_tgt->common.version = 1;
query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
query_tgt->common.length = sizeof(*query_tgt);
query_tgt->wwpn = tgt->ids.port_name;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
vhost->discovery_threads--;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
kref_put(&tgt->kref, ibmvfc_release_tgt);
} else
tgt_dbg(tgt, "Sent Query Target\n");
}
/**
* ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
* @vhost: ibmvfc host struct
* @scsi_id: SCSI ID to allocate target for
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
{
struct ibmvfc_target *tgt;
unsigned long flags;
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->scsi_id == scsi_id) {
if (tgt->need_login)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
goto unlock_out;
}
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
if (!tgt) {
dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
scsi_id);
return -ENOMEM;
}
memset(tgt, 0, sizeof(*tgt));
tgt->scsi_id = scsi_id;
tgt->new_scsi_id = scsi_id;
tgt->vhost = vhost;
tgt->need_login = 1;
tgt->cancel_key = vhost->task_set++;
init_timer(&tgt->timer);
kref_init(&tgt->kref);
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
spin_lock_irqsave(vhost->host->host_lock, flags);
list_add_tail(&tgt->queue, &vhost->targets);
unlock_out:
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
/**
* ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
* @vhost: ibmvfc host struct
*
* Returns:
* 0 on success / other on failure
**/
static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
{
int i, rc;
for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
rc = ibmvfc_alloc_target(vhost,
vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
return rc;
}
/**
* ibmvfc_discover_targets_done - Completion handler for discover targets MAD
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
u32 mad_status = rsp->common.status;
int level = IBMVFC_DEFAULT_LOG_LEVEL;
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
vhost->num_targets = rsp->num_written;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
break;
case IBMVFC_MAD_FAILED:
level += ibmvfc_retry_host_init(vhost);
ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
default:
dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
break;
}
ibmvfc_free_event(evt);
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_discover_targets - Send Discover Targets MAD
* @vhost: ibmvfc host struct
*
**/
static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
{
struct ibmvfc_discover_targets *mad;
struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.discover_targets;
memset(mad, 0, sizeof(*mad));
mad->common.version = 1;
mad->common.opcode = IBMVFC_DISC_TARGETS;
mad->common.length = sizeof(*mad);
mad->bufflen = vhost->disc_buf_sz;
mad->buffer.va = vhost->disc_buf_dma;
mad->buffer.len = vhost->disc_buf_sz;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
ibmvfc_dbg(vhost, "Sent discover targets\n");
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
}
/**
* ibmvfc_npiv_login_done - Completion handler for NPIV Login
* @evt: ibmvfc event struct
*
**/
static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
u32 mad_status = evt->xfer_iu->npiv_login.common.status;
struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
unsigned int npiv_max_sectors;
int level = IBMVFC_DEFAULT_LOG_LEVEL;
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
ibmvfc_free_event(evt);
break;
case IBMVFC_MAD_FAILED:
if (ibmvfc_retry_cmd(rsp->status, rsp->error))
level += ibmvfc_retry_host_init(vhost);
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
ibmvfc_free_event(evt);
return;
case IBMVFC_MAD_CRQ_ERROR:
ibmvfc_retry_host_init(vhost);
case IBMVFC_MAD_DRIVER_FAILED:
ibmvfc_free_event(evt);
return;
default:
dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
ibmvfc_free_event(evt);
return;
}
vhost->client_migrated = 0;
if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
rsp->flags);
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
wake_up(&vhost->work_wait_q);
return;
}
if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
rsp->max_cmds);
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
wake_up(&vhost->work_wait_q);
return;
}
vhost->logged_in = 1;
npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
rsp->partition_name, rsp->device_name, rsp->port_loc_code,
rsp->drc_name, npiv_max_sectors);
fc_host_fabric_name(vhost->host) = rsp->node_name;
fc_host_node_name(vhost->host) = rsp->node_name;
fc_host_port_name(vhost->host) = rsp->port_name;
fc_host_port_id(vhost->host) = rsp->scsi_id;
fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
fc_host_supported_classes(vhost->host) = 0;
if (rsp->service_parms.class1_parms[0] & 0x80000000)
fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
if (rsp->service_parms.class2_parms[0] & 0x80000000)
fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
if (rsp->service_parms.class3_parms[0] & 0x80000000)
fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
fc_host_maxframe_size(vhost->host) =
rsp->service_parms.common.bb_rcv_sz & 0x0fff;
vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
vhost->host->max_sectors = npiv_max_sectors;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
wake_up(&vhost->work_wait_q);
}
/**
* ibmvfc_npiv_login - Sends NPIV login
* @vhost: ibmvfc host struct
*
**/
static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login_mad *mad;
struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
ibmvfc_gather_partition_info(vhost);
ibmvfc_set_login_info(vhost);
ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
mad = &evt->iu.npiv_login;
memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
mad->common.version = 1;
mad->common.opcode = IBMVFC_NPIV_LOGIN;
mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
mad->buffer.va = vhost->login_buf_dma;
mad->buffer.len = sizeof(*vhost->login_buf);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
ibmvfc_dbg(vhost, "Sent NPIV login\n");
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
};
/**
* ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
* @vhost: ibmvfc host struct
*
**/
static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
ibmvfc_free_event(evt);
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
if (list_empty(&vhost->sent) &&
vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
ibmvfc_init_host(vhost);
return;
}
break;
case IBMVFC_MAD_FAILED:
case IBMVFC_MAD_NOT_SUPPORTED:
case IBMVFC_MAD_CRQ_ERROR:
case IBMVFC_MAD_DRIVER_FAILED:
default:
ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
break;
}
ibmvfc_hard_reset_host(vhost);
}
/**
* ibmvfc_npiv_logout - Issue an NPIV Logout
* @vhost: ibmvfc host struct
*
**/
static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_logout_mad *mad;
struct ibmvfc_event *evt;
evt = ibmvfc_get_event(vhost);
ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.npiv_logout;
memset(mad, 0, sizeof(*mad));
mad->common.version = 1;
mad->common.opcode = IBMVFC_NPIV_LOGOUT;
mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
ibmvfc_dbg(vhost, "Sent NPIV logout\n");
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
}
/**
* ibmvfc_dev_init_to_do - Is there target initialization work to do?
* @vhost: ibmvfc host struct
*
* Returns:
* 1 if work to do / 0 if not
**/
static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
{
struct ibmvfc_target *tgt;
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
return 1;
}
return 0;
}
/**
* __ibmvfc_work_to_do - Is there task level work to do? (no locking)
* @vhost: ibmvfc host struct
*
* Returns:
* 1 if work to do / 0 if not
**/
static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
{
struct ibmvfc_target *tgt;
if (kthread_should_stop())
return 1;
switch (vhost->action) {
case IBMVFC_HOST_ACTION_NONE:
case IBMVFC_HOST_ACTION_INIT_WAIT:
case IBMVFC_HOST_ACTION_LOGO_WAIT:
return 0;
case IBMVFC_HOST_ACTION_TGT_INIT:
case IBMVFC_HOST_ACTION_QUERY_TGTS:
if (vhost->discovery_threads == disc_threads)
return 0;
list_for_each_entry(tgt, &vhost->targets, queue)
if (tgt->action == IBMVFC_TGT_ACTION_INIT)
return 1;
list_for_each_entry(tgt, &vhost->targets, queue)
if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
return 0;
return 1;
case IBMVFC_HOST_ACTION_LOGO:
case IBMVFC_HOST_ACTION_INIT:
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
case IBMVFC_HOST_ACTION_QUERY:
case IBMVFC_HOST_ACTION_RESET:
case IBMVFC_HOST_ACTION_REENABLE:
default:
break;
};
return 1;
}
/**
* ibmvfc_work_to_do - Is there task level work to do?
* @vhost: ibmvfc host struct
*
* Returns:
* 1 if work to do / 0 if not
**/
static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
{
unsigned long flags;
int rc;
spin_lock_irqsave(vhost->host->host_lock, flags);
rc = __ibmvfc_work_to_do(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return rc;
}
/**
* ibmvfc_log_ae - Log async events if necessary
* @vhost: ibmvfc host struct
* @events: events to log
*
**/
static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
{
if (events & IBMVFC_AE_RSCN)
fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
if ((events & IBMVFC_AE_LINKDOWN) &&
vhost->state >= IBMVFC_HALTED)
fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
if ((events & IBMVFC_AE_LINKUP) &&
vhost->state == IBMVFC_INITIALIZING)
fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
}
/**
* ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
* @tgt: ibmvfc target struct
*
**/
static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
{
struct ibmvfc_host *vhost = tgt->vhost;
struct fc_rport *rport;
unsigned long flags;
tgt_dbg(tgt, "Adding rport\n");
rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
spin_lock_irqsave(vhost->host->host_lock, flags);
if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
list_del(&tgt->queue);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
fc_remote_port_delete(rport);
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return;
}
if (rport) {
tgt_dbg(tgt, "rport add succeeded\n");
tgt->rport = rport;
rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
rport->supported_classes = 0;
tgt->target_id = rport->scsi_target_id;
if (tgt->service_parms.class1_parms[0] & 0x80000000)
rport->supported_classes |= FC_COS_CLASS1;
if (tgt->service_parms.class2_parms[0] & 0x80000000)
rport->supported_classes |= FC_COS_CLASS2;
if (tgt->service_parms.class3_parms[0] & 0x80000000)
rport->supported_classes |= FC_COS_CLASS3;
if (rport->rqst_q)
blk_queue_max_segments(rport->rqst_q, 1);
} else
tgt_dbg(tgt, "rport add failed\n");
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
/**
* ibmvfc_do_work - Do task level work
* @vhost: ibmvfc host struct
*
**/
static void ibmvfc_do_work(struct ibmvfc_host *vhost)
{
struct ibmvfc_target *tgt;
unsigned long flags;
struct fc_rport *rport;
int rc;
ibmvfc_log_ae(vhost, vhost->events_to_log);
spin_lock_irqsave(vhost->host->host_lock, flags);
vhost->events_to_log = 0;
switch (vhost->action) {
case IBMVFC_HOST_ACTION_NONE:
case IBMVFC_HOST_ACTION_LOGO_WAIT:
case IBMVFC_HOST_ACTION_INIT_WAIT:
break;
case IBMVFC_HOST_ACTION_RESET:
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
rc = ibmvfc_reset_crq(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags);
if (rc == H_CLOSED)
vio_enable_interrupts(to_vio_dev(vhost->dev));
if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
(rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
}
break;
case IBMVFC_HOST_ACTION_REENABLE:
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
rc = ibmvfc_reenable_crq_queue(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags);
if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
}
break;
case IBMVFC_HOST_ACTION_LOGO:
vhost->job_step(vhost);
break;
case IBMVFC_HOST_ACTION_INIT:
BUG_ON(vhost->state != IBMVFC_INITIALIZING);
if (vhost->delay_init) {
vhost->delay_init = 0;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ssleep(15);
return;
} else
vhost->job_step(vhost);
break;
case IBMVFC_HOST_ACTION_QUERY:
list_for_each_entry(tgt, &vhost->targets, queue)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
break;
case IBMVFC_HOST_ACTION_QUERY_TGTS:
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
tgt->job_step(tgt);
break;
}
}
if (!ibmvfc_dev_init_to_do(vhost))
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
break;
case IBMVFC_HOST_ACTION_TGT_DEL:
case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
tgt_dbg(tgt, "Deleting rport\n");
rport = tgt->rport;
tgt->rport = NULL;
list_del(&tgt->queue);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
if (rport)
fc_remote_port_delete(rport);
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
}
}
if (vhost->state == IBMVFC_INITIALIZING) {
if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
if (vhost->reinit) {
vhost->reinit = 0;
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
} else {
ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
wake_up(&vhost->init_wait_q);
schedule_work(&vhost->rport_add_work_q);
vhost->init_retries = 0;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
scsi_unblock_requests(vhost->host);
}
return;
} else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
vhost->job_step = ibmvfc_discover_targets;
}
} else {
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
scsi_unblock_requests(vhost->host);
wake_up(&vhost->init_wait_q);
return;
}
break;
case IBMVFC_HOST_ACTION_ALLOC_TGTS:
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_alloc_targets(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags);
break;
case IBMVFC_HOST_ACTION_TGT_INIT:
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
tgt->job_step(tgt);
break;
}
}
if (!ibmvfc_dev_init_to_do(vhost))
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
break;
default:
break;
};
spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
/**
* ibmvfc_work - Do task level work
* @data: ibmvfc host struct
*
* Returns:
* zero
**/
static int ibmvfc_work(void *data)
{
struct ibmvfc_host *vhost = data;
int rc;
set_user_nice(current, -20);
while (1) {
rc = wait_event_interruptible(vhost->work_wait_q,
ibmvfc_work_to_do(vhost));
BUG_ON(rc);
if (kthread_should_stop())
break;
ibmvfc_do_work(vhost);
}
ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
return 0;
}
/**
* ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
* @vhost: ibmvfc host struct
*
* Allocates a page for messages, maps it for dma, and registers
* the crq with the hypervisor.
*
* Return value:
* zero on success / other on failure
**/
static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
{
int rc, retrc = -ENOMEM;
struct device *dev = vhost->dev;
struct vio_dev *vdev = to_vio_dev(dev);
struct ibmvfc_crq_queue *crq = &vhost->crq;
ENTER;
crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
if (!crq->msgs)
return -ENOMEM;
crq->size = PAGE_SIZE / sizeof(*crq->msgs);
crq->msg_token = dma_map_single(dev, crq->msgs,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, crq->msg_token))
goto map_failed;
retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
crq->msg_token, PAGE_SIZE);
if (rc == H_RESOURCE)
/* maybe kexecing and resource is busy. try a reset */
retrc = rc = ibmvfc_reset_crq(vhost);
if (rc == H_CLOSED)
dev_warn(dev, "Partner adapter not ready\n");
else if (rc) {
dev_warn(dev, "Error %d opening adapter\n", rc);
goto reg_crq_failed;
}
retrc = 0;
tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
goto req_irq_failed;
}
if ((rc = vio_enable_interrupts(vdev))) {
dev_err(dev, "Error %d enabling interrupts\n", rc);
goto req_irq_failed;
}
crq->cur = 0;
LEAVE;
return retrc;
req_irq_failed:
tasklet_kill(&vhost->tasklet);
do {
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_crq_failed:
dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
map_failed:
free_page((unsigned long)crq->msgs);
return retrc;
}
/**
* ibmvfc_free_mem - Free memory for vhost
* @vhost: ibmvfc host struct
*
* Return value:
* none
**/
static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
{
struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
ENTER;
mempool_destroy(vhost->tgt_pool);
kfree(vhost->trace);
dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
vhost->disc_buf_dma);
dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
vhost->login_buf, vhost->login_buf_dma);
dma_pool_destroy(vhost->sg_pool);
dma_unmap_single(vhost->dev, async_q->msg_token,
async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
free_page((unsigned long)async_q->msgs);
LEAVE;
}
/**
* ibmvfc_alloc_mem - Allocate memory for vhost
* @vhost: ibmvfc host struct
*
* Return value:
* 0 on success / non-zero on failure
**/
static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
{
struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
struct device *dev = vhost->dev;
ENTER;
async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
if (!async_q->msgs) {
dev_err(dev, "Couldn't allocate async queue.\n");
goto nomem;
}
async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
async_q->msg_token = dma_map_single(dev, async_q->msgs,
async_q->size * sizeof(*async_q->msgs),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, async_q->msg_token)) {
dev_err(dev, "Failed to map async queue\n");
goto free_async_crq;
}
vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
SG_ALL * sizeof(struct srp_direct_buf),
sizeof(struct srp_direct_buf), 0);
if (!vhost->sg_pool) {
dev_err(dev, "Failed to allocate sg pool\n");
goto unmap_async_crq;
}
vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
&vhost->login_buf_dma, GFP_KERNEL);
if (!vhost->login_buf) {
dev_err(dev, "Couldn't allocate NPIV login buffer\n");
goto free_sg_pool;
}
vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
&vhost->disc_buf_dma, GFP_KERNEL);
if (!vhost->disc_buf) {
dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
goto free_login_buffer;
}
vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
if (!vhost->trace)
goto free_disc_buffer;
vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
sizeof(struct ibmvfc_target));
if (!vhost->tgt_pool) {
dev_err(dev, "Couldn't allocate target memory pool\n");
goto free_trace;
}
LEAVE;
return 0;
free_trace:
kfree(vhost->trace);
free_disc_buffer:
dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
vhost->disc_buf_dma);
free_login_buffer:
dma_free_coherent(dev, sizeof(*vhost->login_buf),
vhost->login_buf, vhost->login_buf_dma);
free_sg_pool:
dma_pool_destroy(vhost->sg_pool);
unmap_async_crq:
dma_unmap_single(dev, async_q->msg_token,
async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
free_async_crq:
free_page((unsigned long)async_q->msgs);
nomem:
LEAVE;
return -ENOMEM;
}
/**
* ibmvfc_rport_add_thread - Worker thread for rport adds
* @work: work struct
*
**/
static void ibmvfc_rport_add_thread(struct work_struct *work)
{
struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
rport_add_work_q);
struct ibmvfc_target *tgt;
struct fc_rport *rport;
unsigned long flags;
int did_work;
ENTER;
spin_lock_irqsave(vhost->host->host_lock, flags);
do {
did_work = 0;
if (vhost->state != IBMVFC_ACTIVE)
break;
list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->add_rport) {
did_work = 1;
tgt->add_rport = 0;
kref_get(&tgt->kref);
rport = tgt->rport;
if (!rport) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_tgt_add_rport(tgt);
} else if (get_device(&rport->dev)) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
tgt_dbg(tgt, "Setting rport roles\n");
fc_remote_port_rolechg(rport, tgt->ids.roles);
put_device(&rport->dev);
}
kref_put(&tgt->kref, ibmvfc_release_tgt);
spin_lock_irqsave(vhost->host->host_lock, flags);
break;
}
}
} while(did_work);
if (vhost->state == IBMVFC_ACTIVE)
vhost->scan_complete = 1;
spin_unlock_irqrestore(vhost->host->host_lock, flags);
LEAVE;
}
/**
* ibmvfc_probe - Adapter hot plug add entry point
* @vdev: vio device struct
* @id: vio device id struct
*
* Return value:
* 0 on success / non-zero on failure
**/
static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct ibmvfc_host *vhost;
struct Scsi_Host *shost;
struct device *dev = &vdev->dev;
int rc = -ENOMEM;
ENTER;
shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
if (!shost) {
dev_err(dev, "Couldn't allocate host data\n");
goto out;
}
shost->transportt = ibmvfc_transport_template;
shost->can_queue = max_requests;
shost->max_lun = max_lun;
shost->max_id = max_targets;
shost->max_sectors = IBMVFC_MAX_SECTORS;
shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
shost->unique_id = shost->host_no;
vhost = shost_priv(shost);
INIT_LIST_HEAD(&vhost->sent);
INIT_LIST_HEAD(&vhost->free);
INIT_LIST_HEAD(&vhost->targets);
sprintf(vhost->name, IBMVFC_NAME);
vhost->host = shost;
vhost->dev = dev;
vhost->partition_number = -1;
vhost->log_level = log_level;
vhost->task_set = 1;
strcpy(vhost->partition_name, "UNKNOWN");
init_waitqueue_head(&vhost->work_wait_q);
init_waitqueue_head(&vhost->init_wait_q);
INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
mutex_init(&vhost->passthru_mutex);
if ((rc = ibmvfc_alloc_mem(vhost)))
goto free_scsi_host;
vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
shost->host_no);
if (IS_ERR(vhost->work_thread)) {
dev_err(dev, "Couldn't create kernel thread: %ld\n",
PTR_ERR(vhost->work_thread));
goto free_host_mem;
}
if ((rc = ibmvfc_init_crq(vhost))) {
dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
goto kill_kthread;
}
if ((rc = ibmvfc_init_event_pool(vhost))) {
dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
goto release_crq;
}
if ((rc = scsi_add_host(shost, dev)))
goto release_event_pool;
fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
&ibmvfc_trace_attr))) {
dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
goto remove_shost;
}
if (shost_to_fc_host(shost)->rqst_q)
blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
dev_set_drvdata(dev, vhost);
spin_lock(&ibmvfc_driver_lock);
list_add_tail(&vhost->queue, &ibmvfc_head);
spin_unlock(&ibmvfc_driver_lock);
ibmvfc_send_crq_init(vhost);
scsi_scan_host(shost);
return 0;
remove_shost:
scsi_remove_host(shost);
release_event_pool:
ibmvfc_free_event_pool(vhost);
release_crq:
ibmvfc_release_crq_queue(vhost);
kill_kthread:
kthread_stop(vhost->work_thread);
free_host_mem:
ibmvfc_free_mem(vhost);
free_scsi_host:
scsi_host_put(shost);
out:
LEAVE;
return rc;
}
/**
* ibmvfc_remove - Adapter hot plug remove entry point
* @vdev: vio device struct
*
* Return value:
* 0
**/
static int ibmvfc_remove(struct vio_dev *vdev)
{
struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
unsigned long flags;
ENTER;
ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_wait_while_resetting(vhost);
ibmvfc_release_crq_queue(vhost);
kthread_stop(vhost->work_thread);
fc_remove_host(vhost->host);
scsi_remove_host(vhost->host);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
ibmvfc_free_event_pool(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_free_mem(vhost);
spin_lock(&ibmvfc_driver_lock);
list_del(&vhost->queue);
spin_unlock(&ibmvfc_driver_lock);
scsi_host_put(vhost->host);
LEAVE;
return 0;
}
/**
* ibmvfc_resume - Resume from suspend
* @dev: device struct
*
* We may have lost an interrupt across suspend/resume, so kick the
* interrupt handler
*
*/
static int ibmvfc_resume(struct device *dev)
{
unsigned long flags;
struct ibmvfc_host *vhost = dev_get_drvdata(dev);
struct vio_dev *vdev = to_vio_dev(dev);
spin_lock_irqsave(vhost->host->host_lock, flags);
vio_disable_interrupts(vdev);
tasklet_schedule(&vhost->tasklet);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return 0;
}
/**
* ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
* @vdev: vio device struct
*
* Return value:
* Number of bytes the driver will need to DMA map at the same time in
* order to perform well.
*/
static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
{
unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
}
static struct vio_device_id ibmvfc_device_table[] = {
{"fcp", "IBM,vfc-client"},
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
static struct dev_pm_ops ibmvfc_pm_ops = {
.resume = ibmvfc_resume
};
static struct vio_driver ibmvfc_driver = {
.id_table = ibmvfc_device_table,
.probe = ibmvfc_probe,
.remove = ibmvfc_remove,
.get_desired_dma = ibmvfc_get_desired_dma,
.name = IBMVFC_NAME,
.pm = &ibmvfc_pm_ops,
};
static struct fc_function_template ibmvfc_transport_functions = {
.show_host_fabric_name = 1,
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_port_type = 1,
.show_host_port_id = 1,
.show_host_maxframe_size = 1,
.get_host_port_state = ibmvfc_get_host_port_state,
.show_host_port_state = 1,
.get_host_speed = ibmvfc_get_host_speed,
.show_host_speed = 1,
.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
.terminate_rport_io = ibmvfc_terminate_rport_io,
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
.show_rport_dev_loss_tmo = 1,
.get_starget_node_name = ibmvfc_get_starget_node_name,
.show_starget_node_name = 1,
.get_starget_port_name = ibmvfc_get_starget_port_name,
.show_starget_port_name = 1,
.get_starget_port_id = ibmvfc_get_starget_port_id,
.show_starget_port_id = 1,
.bsg_request = ibmvfc_bsg_request,
.bsg_timeout = ibmvfc_bsg_timeout,
};
/**
* ibmvfc_module_init - Initialize the ibmvfc module
*
* Return value:
* 0 on success / other on failure
**/
static int __init ibmvfc_module_init(void)
{
int rc;
if (!firmware_has_feature(FW_FEATURE_VIO))
return -ENODEV;
printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
if (!ibmvfc_transport_template)
return -ENOMEM;
rc = vio_register_driver(&ibmvfc_driver);
if (rc)
fc_release_transport(ibmvfc_transport_template);
return rc;
}
/**
* ibmvfc_module_exit - Teardown the ibmvfc module
*
* Return value:
* nothing
**/
static void __exit ibmvfc_module_exit(void)
{
vio_unregister_driver(&ibmvfc_driver);
fc_release_transport(ibmvfc_transport_template);
}
module_init(ibmvfc_module_init);
module_exit(ibmvfc_module_exit);
| gpl-2.0 |
SaberMod/android_kernel_moto_shamu | arch/arm/kernel/irq.c | 963 | 5182 | /*
* linux/arch/arm/kernel/irq.c
*
* Copyright (C) 1992 Linus Torvalds
* Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
*
* Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
* Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
* Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/random.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
unsigned long irq_err_count;
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_FIQ
show_fiq_list(p, prec);
#endif
#ifdef CONFIG_SMP
show_ipi_list(p, prec);
#endif
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0;
}
/*
* handle_IRQ handles all hardware IRQ's. Decoded IRQs should
* not come via this function. Instead, they should provide their
* own 'handler'. Used by platform code implementing C-based 1st
* level decoding.
*/
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
/*
* Some hardware gives randomly wrong interrupts. Rather
* than crashing, do something sensible.
*/
if (unlikely(irq >= nr_irqs)) {
if (printk_ratelimit())
printk(KERN_WARNING "Bad IRQ%u\n", irq);
ack_bad_irq(irq);
} else {
generic_handle_irq(irq);
}
irq_exit();
set_irq_regs(old_regs);
}
/*
* asm_do_IRQ is the interface to be used from assembly code.
*/
asmlinkage void __exception_irq_entry
asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
handle_IRQ(irq, regs);
}
void set_irq_flags(unsigned int irq, unsigned int iflags)
{
unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (irq >= nr_irqs) {
printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq);
return;
}
if (iflags & IRQF_VALID)
clr |= IRQ_NOREQUEST;
if (iflags & IRQF_PROBE)
clr |= IRQ_NOPROBE;
if (!(iflags & IRQF_NOAUTOEN))
clr |= IRQ_NOAUTOEN;
/* Order is clear bits in "clr" then set bits in "set" */
irq_modify_status(irq, clr, set & ~clr);
}
EXPORT_SYMBOL_GPL(set_irq_flags);
void __init init_IRQ(void)
{
if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
irqchip_init();
else
machine_desc->init_irq();
}
#ifdef CONFIG_MULTI_IRQ_HANDLER
void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
{
if (handle_arch_irq)
return;
handle_arch_irq = handle_irq;
}
#endif
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
return nr_irqs;
}
#endif
#ifdef CONFIG_HOTPLUG_CPU
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
const struct cpumask *affinity = d->affinity;
struct irq_chip *c;
bool ret = false;
/*
* If this is a per-CPU interrupt, or the affinity does not
* include this CPU, then we have nothing to do.
*/
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
affinity = cpu_online_mask;
ret = true;
}
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
}
/*
* The current CPU has been marked offline. Migrate IRQs off this CPU.
* If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*
* Note: we must iterate over all IRQs, whether they have an attached
* action structure or not, as we need to get chained interrupts too.
*/
void migrate_irqs(void)
{
unsigned int i;
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
bool affinity_broken;
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken && printk_ratelimit())
pr_warning("IRQ%u no longer affine to CPU%u\n", i,
smp_processor_id());
}
local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */
| gpl-2.0 |
lategoodbye/linux-mxs-power | fs/sysv/dir.c | 1219 | 8750 | /*
* linux/fs/sysv/dir.c
*
* minix/dir.c
* Copyright (C) 1991, 1992 Linus Torvalds
*
* coh/dir.c
* Copyright (C) 1993 Pascal Haible, Bruno Haible
*
* sysv/dir.c
* Copyright (C) 1993 Bruno Haible
*
* SystemV/Coherent directory handling functions
*/
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include "sysv.h"
static int sysv_readdir(struct file *, struct dir_context *);
const struct file_operations sysv_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.iterate = sysv_readdir,
.fsync = generic_file_fsync,
};
static inline void dir_put_page(struct page *page)
{
kunmap(page);
page_cache_release(page);
}
static inline unsigned long dir_pages(struct inode *inode)
{
return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
}
static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
{
struct address_space *mapping = page->mapping;
struct inode *dir = mapping->host;
int err = 0;
block_write_end(NULL, mapping, pos, len, len, page, NULL);
if (pos+len > dir->i_size) {
i_size_write(dir, pos+len);
mark_inode_dirty(dir);
}
if (IS_DIRSYNC(dir))
err = write_one_page(page, 1);
else
unlock_page(page);
return err;
}
static struct page * dir_get_page(struct inode *dir, unsigned long n)
{
struct address_space *mapping = dir->i_mapping;
struct page *page = read_mapping_page(mapping, n, NULL);
if (!IS_ERR(page))
kmap(page);
return page;
}
static int sysv_readdir(struct file *file, struct dir_context *ctx)
{
unsigned long pos = ctx->pos;
struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
unsigned long npages = dir_pages(inode);
unsigned offset;
unsigned long n;
ctx->pos = pos = (pos + SYSV_DIRSIZE-1) & ~(SYSV_DIRSIZE-1);
if (pos >= inode->i_size)
return 0;
offset = pos & ~PAGE_CACHE_MASK;
n = pos >> PAGE_CACHE_SHIFT;
for ( ; n < npages; n++, offset = 0) {
char *kaddr, *limit;
struct sysv_dir_entry *de;
struct page *page = dir_get_page(inode, n);
if (IS_ERR(page))
continue;
kaddr = (char *)page_address(page);
de = (struct sysv_dir_entry *)(kaddr+offset);
limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE;
for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) {
char *name = de->name;
if (!de->inode)
continue;
if (!dir_emit(ctx, name, strnlen(name,SYSV_NAMELEN),
fs16_to_cpu(SYSV_SB(sb), de->inode),
DT_UNKNOWN)) {
dir_put_page(page);
return 0;
}
}
dir_put_page(page);
}
return 0;
}
/* compare strings: name[0..len-1] (not zero-terminated) and
* buffer[0..] (filled with zeroes up to buffer[0..maxlen-1])
*/
static inline int namecompare(int len, int maxlen,
const char * name, const char * buffer)
{
if (len < maxlen && buffer[len])
return 0;
return !memcmp(name, buffer, len);
}
/*
* sysv_find_entry()
*
* finds an entry in the specified directory with the wanted name. It
* returns the cache buffer in which the entry was found, and the entry
* itself (as a parameter - res_dir). It does NOT read the inode of the
* entry - you'll have to do that yourself if you want to.
*/
struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_page)
{
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct inode * dir = dentry->d_parent->d_inode;
unsigned long start, n;
unsigned long npages = dir_pages(dir);
struct page *page = NULL;
struct sysv_dir_entry *de;
*res_page = NULL;
start = SYSV_I(dir)->i_dir_start_lookup;
if (start >= npages)
start = 0;
n = start;
do {
char *kaddr;
page = dir_get_page(dir, n);
if (!IS_ERR(page)) {
kaddr = (char*)page_address(page);
de = (struct sysv_dir_entry *) kaddr;
kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
for ( ; (char *) de <= kaddr ; de++) {
if (!de->inode)
continue;
if (namecompare(namelen, SYSV_NAMELEN,
name, de->name))
goto found;
}
dir_put_page(page);
}
if (++n >= npages)
n = 0;
} while (n != start);
return NULL;
found:
SYSV_I(dir)->i_dir_start_lookup = n;
*res_page = page;
return de;
}
int sysv_add_link(struct dentry *dentry, struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
const char * name = dentry->d_name.name;
int namelen = dentry->d_name.len;
struct page *page = NULL;
struct sysv_dir_entry * de;
unsigned long npages = dir_pages(dir);
unsigned long n;
char *kaddr;
loff_t pos;
int err;
/* We take care of directory expansion in the same loop */
for (n = 0; n <= npages; n++) {
page = dir_get_page(dir, n);
err = PTR_ERR(page);
if (IS_ERR(page))
goto out;
kaddr = (char*)page_address(page);
de = (struct sysv_dir_entry *)kaddr;
kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE;
while ((char *)de <= kaddr) {
if (!de->inode)
goto got_it;
err = -EEXIST;
if (namecompare(namelen, SYSV_NAMELEN, name, de->name))
goto out_page;
de++;
}
dir_put_page(page);
}
BUG();
return -EINVAL;
got_it:
pos = page_offset(page) +
(char*)de - (char*)page_address(page);
lock_page(page);
err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
if (err)
goto out_unlock;
memcpy (de->name, name, namelen);
memset (de->name + namelen, 0, SYSV_DIRSIZE - namelen - 2);
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(dir);
out_page:
dir_put_page(page);
out:
return err;
out_unlock:
unlock_page(page);
goto out_page;
}
int sysv_delete_entry(struct sysv_dir_entry *de, struct page *page)
{
struct inode *inode = page->mapping->host;
char *kaddr = (char*)page_address(page);
loff_t pos = page_offset(page) + (char *)de - kaddr;
int err;
lock_page(page);
err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
BUG_ON(err);
de->inode = 0;
err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
dir_put_page(page);
inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
return err;
}
int sysv_make_empty(struct inode *inode, struct inode *dir)
{
struct page *page = grab_cache_page(inode->i_mapping, 0);
struct sysv_dir_entry * de;
char *base;
int err;
if (!page)
return -ENOMEM;
err = sysv_prepare_chunk(page, 0, 2 * SYSV_DIRSIZE);
if (err) {
unlock_page(page);
goto fail;
}
kmap(page);
base = (char*)page_address(page);
memset(base, 0, PAGE_CACHE_SIZE);
de = (struct sysv_dir_entry *) base;
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
strcpy(de->name,".");
de++;
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), dir->i_ino);
strcpy(de->name,"..");
kunmap(page);
err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE);
fail:
page_cache_release(page);
return err;
}
/*
* routine to check that the specified directory is empty (for rmdir)
*/
int sysv_empty_dir(struct inode * inode)
{
struct super_block *sb = inode->i_sb;
struct page *page = NULL;
unsigned long i, npages = dir_pages(inode);
for (i = 0; i < npages; i++) {
char *kaddr;
struct sysv_dir_entry * de;
page = dir_get_page(inode, i);
if (IS_ERR(page))
continue;
kaddr = (char *)page_address(page);
de = (struct sysv_dir_entry *)kaddr;
kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE;
for ( ;(char *)de <= kaddr; de++) {
if (!de->inode)
continue;
/* check for . and .. */
if (de->name[0] != '.')
goto not_empty;
if (!de->name[1]) {
if (de->inode == cpu_to_fs16(SYSV_SB(sb),
inode->i_ino))
continue;
goto not_empty;
}
if (de->name[1] != '.' || de->name[2])
goto not_empty;
}
dir_put_page(page);
}
return 1;
not_empty:
dir_put_page(page);
return 0;
}
/* Releases the page */
void sysv_set_link(struct sysv_dir_entry *de, struct page *page,
struct inode *inode)
{
struct inode *dir = page->mapping->host;
loff_t pos = page_offset(page) +
(char *)de-(char*)page_address(page);
int err;
lock_page(page);
err = sysv_prepare_chunk(page, pos, SYSV_DIRSIZE);
BUG_ON(err);
de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino);
err = dir_commit_chunk(page, pos, SYSV_DIRSIZE);
dir_put_page(page);
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(dir);
}
struct sysv_dir_entry * sysv_dotdot (struct inode *dir, struct page **p)
{
struct page *page = dir_get_page(dir, 0);
struct sysv_dir_entry *de = NULL;
if (!IS_ERR(page)) {
de = (struct sysv_dir_entry*) page_address(page) + 1;
*p = page;
}
return de;
}
ino_t sysv_inode_by_name(struct dentry *dentry)
{
struct page *page;
struct sysv_dir_entry *de = sysv_find_entry (dentry, &page);
ino_t res = 0;
if (de) {
res = fs16_to_cpu(SYSV_SB(dentry->d_sb), de->inode);
dir_put_page(page);
}
return res;
}
| gpl-2.0 |
AospPlus/android_kernel_msm | drivers/staging/bcm/PHSModule.c | 2243 | 50023 | #include "headers.h"
static UINT CreateSFToClassifierRuleMapping(B_UINT16 uiVcid, B_UINT16 uiClsId, struct bcm_phs_table *psServiceFlowTable, struct bcm_phs_rule *psPhsRule, B_UINT8 u8AssociatedPHSI);
static UINT CreateClassiferToPHSRuleMapping(B_UINT16 uiVcid, B_UINT16 uiClsId, struct bcm_phs_entry *pstServiceFlowEntry, struct bcm_phs_rule *psPhsRule, B_UINT8 u8AssociatedPHSI);
static UINT CreateClassifierPHSRule(B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *psPhsRule, enum bcm_phs_classifier_context eClsContext, B_UINT8 u8AssociatedPHSI);
static UINT UpdateClassifierPHSRule(B_UINT16 uiClsId, struct bcm_phs_classifier_entry *pstClassifierEntry, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *psPhsRule, B_UINT8 u8AssociatedPHSI);
static BOOLEAN ValidatePHSRuleComplete(struct bcm_phs_rule *psPhsRule);
static BOOLEAN DerefPhsRule(B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *pstPhsRule);
static UINT GetClassifierEntry(struct bcm_phs_classifier_table *pstClassifierTable, B_UINT32 uiClsid, enum bcm_phs_classifier_context eClsContext, struct bcm_phs_classifier_entry **ppstClassifierEntry);
static UINT GetPhsRuleEntry(struct bcm_phs_classifier_table *pstClassifierTable, B_UINT32 uiPHSI, enum bcm_phs_classifier_context eClsContext, struct bcm_phs_rule **ppstPhsRule);
static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable);
static int phs_compress(struct bcm_phs_rule *phs_members, unsigned char *in_buf,
unsigned char *out_buf, unsigned int *header_size, UINT *new_header_size);
static int verify_suppress_phsf(unsigned char *in_buffer, unsigned char *out_buffer,
unsigned char *phsf, unsigned char *phsm, unsigned int phss, unsigned int phsv, UINT *new_header_size);
static int phs_decompress(unsigned char *in_buf, unsigned char *out_buf,
struct bcm_phs_rule *phs_rules, UINT *header_size);
static ULONG PhsCompress(void *pvContext,
B_UINT16 uiVcid,
B_UINT16 uiClsId,
void *pvInputBuffer,
void *pvOutputBuffer,
UINT *pOldHeaderSize,
UINT *pNewHeaderSize);
static ULONG PhsDeCompress(void *pvContext,
B_UINT16 uiVcid,
void *pvInputBuffer,
void *pvOutputBuffer,
UINT *pInHeaderSize,
UINT *pOutHeaderSize);
#define IN
#define OUT
/*
* Function: PHSTransmit
* Description: This routine handle PHS(Payload Header Suppression for Tx path.
* It extracts a fragment of the NDIS_PACKET containing the header
* to be suppressed. It then suppresses the header by invoking PHS exported compress routine.
* The header data after suppression is copied back to the NDIS_PACKET.
*
* Input parameters: IN struct bcm_mini_adapter *Adapter - Miniport Adapter Context
* IN Packet - NDIS packet containing data to be transmitted
* IN USHORT Vcid - vcid pertaining to connection on which the packet is being sent.Used to
* identify PHS rule to be applied.
* B_UINT16 uiClassifierRuleID - Classifier Rule ID
* BOOLEAN bHeaderSuppressionEnabled - indicates if header suprression is enabled for SF.
*
* Return: STATUS_SUCCESS - If the send was successful.
* Other - If an error occurred.
*/
int PHSTransmit(struct bcm_mini_adapter *Adapter,
struct sk_buff **pPacket,
USHORT Vcid,
B_UINT16 uiClassifierRuleID,
BOOLEAN bHeaderSuppressionEnabled,
UINT *PacketLen,
UCHAR bEthCSSupport)
{
/* PHS Sepcific */
UINT unPHSPktHdrBytesCopied = 0;
UINT unPhsOldHdrSize = 0;
UINT unPHSNewPktHeaderLen = 0;
/* Pointer to PHS IN Hdr Buffer */
PUCHAR pucPHSPktHdrInBuf = Adapter->stPhsTxContextInfo.ucaHdrSuppressionInBuf;
/* Pointer to PHS OUT Hdr Buffer */
PUCHAR pucPHSPktHdrOutBuf = Adapter->stPhsTxContextInfo.ucaHdrSuppressionOutBuf;
UINT usPacketType;
UINT BytesToRemove = 0;
BOOLEAN bPHSI = 0;
LONG ulPhsStatus = 0;
UINT numBytesCompressed = 0;
struct sk_buff *newPacket = NULL;
struct sk_buff *Packet = *pPacket;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "In PHSTransmit");
if (!bEthCSSupport)
BytesToRemove = ETH_HLEN;
/*
* Accumulate the header upto the size we support suppression
* from NDIS packet
*/
usPacketType = ((struct ethhdr *)(Packet->data))->h_proto;
pucPHSPktHdrInBuf = Packet->data + BytesToRemove;
/* considering data after ethernet header */
if ((*PacketLen - BytesToRemove) < MAX_PHS_LENGTHS)
unPHSPktHdrBytesCopied = (*PacketLen - BytesToRemove);
else
unPHSPktHdrBytesCopied = MAX_PHS_LENGTHS;
if ((unPHSPktHdrBytesCopied > 0) &&
(unPHSPktHdrBytesCopied <= MAX_PHS_LENGTHS)) {
/*
* Step 2 Suppress Header using PHS and fill into intermediate ucaPHSPktHdrOutBuf.
* Suppress only if IP Header and PHS Enabled For the Service Flow
*/
if (((usPacketType == ETHERNET_FRAMETYPE_IPV4) ||
(usPacketType == ETHERNET_FRAMETYPE_IPV6)) &&
(bHeaderSuppressionEnabled)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "\nTrying to PHS Compress Using Classifier rule 0x%X", uiClassifierRuleID);
unPHSNewPktHeaderLen = unPHSPktHdrBytesCopied;
ulPhsStatus = PhsCompress(&Adapter->stBCMPhsContext,
Vcid,
uiClassifierRuleID,
pucPHSPktHdrInBuf,
pucPHSPktHdrOutBuf,
&unPhsOldHdrSize,
&unPHSNewPktHeaderLen);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "\nPHS Old header Size : %d New Header Size %d\n", unPhsOldHdrSize, unPHSNewPktHeaderLen);
if (unPHSNewPktHeaderLen == unPhsOldHdrSize) {
if (ulPhsStatus == STATUS_PHS_COMPRESSED)
bPHSI = *pucPHSPktHdrOutBuf;
ulPhsStatus = STATUS_PHS_NOCOMPRESSION;
}
if (ulPhsStatus == STATUS_PHS_COMPRESSED) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "PHS Sending packet Compressed");
if (skb_cloned(Packet)) {
newPacket = skb_copy(Packet, GFP_ATOMIC);
if (newPacket == NULL)
return STATUS_FAILURE;
dev_kfree_skb(Packet);
*pPacket = Packet = newPacket;
pucPHSPktHdrInBuf = Packet->data + BytesToRemove;
}
numBytesCompressed = unPhsOldHdrSize - (unPHSNewPktHeaderLen + PHSI_LEN);
memcpy(pucPHSPktHdrInBuf + numBytesCompressed, pucPHSPktHdrOutBuf, unPHSNewPktHeaderLen + PHSI_LEN);
memcpy(Packet->data + numBytesCompressed, Packet->data, BytesToRemove);
skb_pull(Packet, numBytesCompressed);
return STATUS_SUCCESS;
} else {
/* if one byte headroom is not available, increase it through skb_cow */
if (!(skb_headroom(Packet) > 0)) {
if (skb_cow(Packet, 1)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SKB Cow Failed\n");
return STATUS_FAILURE;
}
}
skb_push(Packet, 1);
/*
* CAUTION: The MAC Header is getting corrupted
* here for IP CS - can be saved by copying 14
* Bytes. not needed .... hence corrupting it.
*/
*(Packet->data + BytesToRemove) = bPHSI;
return STATUS_SUCCESS;
}
} else {
if (!bHeaderSuppressionEnabled)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "\nHeader Suppression Disabled For SF: No PHS\n");
return STATUS_SUCCESS;
}
}
/* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL,"PHSTransmit : Dumping data packet After PHS"); */
return STATUS_SUCCESS;
}
int PHSReceive(struct bcm_mini_adapter *Adapter,
USHORT usVcid,
struct sk_buff *packet,
UINT *punPacketLen,
UCHAR *pucEthernetHdr,
UINT bHeaderSuppressionEnabled)
{
u32 nStandardPktHdrLen = 0;
u32 nTotalsuppressedPktHdrBytes = 0;
int ulPhsStatus = 0;
PUCHAR pucInBuff = NULL;
UINT TotalBytesAdded = 0;
if (!bHeaderSuppressionEnabled) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nPhs Disabled for incoming packet");
return ulPhsStatus;
}
pucInBuff = packet->data;
/* Restore PHS suppressed header */
nStandardPktHdrLen = packet->len;
ulPhsStatus = PhsDeCompress(&Adapter->stBCMPhsContext,
usVcid,
pucInBuff,
Adapter->ucaPHSPktRestoreBuf,
&nTotalsuppressedPktHdrBytes,
&nStandardPktHdrLen);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nSuppressed PktHdrLen : 0x%x Restored PktHdrLen : 0x%x",
nTotalsuppressedPktHdrBytes, nStandardPktHdrLen);
if (ulPhsStatus != STATUS_PHS_COMPRESSED) {
skb_pull(packet, 1);
return STATUS_SUCCESS;
} else {
TotalBytesAdded = nStandardPktHdrLen - nTotalsuppressedPktHdrBytes - PHSI_LEN;
if (TotalBytesAdded) {
if (skb_headroom(packet) >= (SKB_RESERVE_ETHERNET_HEADER + TotalBytesAdded))
skb_push(packet, TotalBytesAdded);
else {
if (skb_cow(packet, skb_headroom(packet) + TotalBytesAdded)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "cow failed in receive\n");
return STATUS_FAILURE;
}
skb_push(packet, TotalBytesAdded);
}
}
memcpy(packet->data, Adapter->ucaPHSPktRestoreBuf, nStandardPktHdrLen);
}
return STATUS_SUCCESS;
}
void DumpFullPacket(UCHAR *pBuf, UINT nPktLen)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dumping Data Packet");
BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, pBuf, nPktLen);
}
/*
* Procedure: phs_init
*
* Description: This routine is responsible for allocating memory for classifier and
* PHS rules.
*
* Arguments:
* pPhsdeviceExtension - ptr to Device extension containing PHS Classifier rules and PHS Rules , RX, TX buffer etc
*
* Returns:
* TRUE(1) -If allocation of memory was successful.
* FALSE -If allocation of memory fails.
*/
int phs_init(struct bcm_phs_extension *pPhsdeviceExtension, struct bcm_mini_adapter *Adapter)
{
int i;
struct bcm_phs_table *pstServiceFlowTable;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nPHS:phs_init function");
if (pPhsdeviceExtension->pstServiceFlowPhsRulesTable)
return -EINVAL;
pPhsdeviceExtension->pstServiceFlowPhsRulesTable = kzalloc(sizeof(struct bcm_phs_table), GFP_KERNEL);
if (!pPhsdeviceExtension->pstServiceFlowPhsRulesTable) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation ServiceFlowPhsRulesTable failed");
return -ENOMEM;
}
pstServiceFlowTable = pPhsdeviceExtension->pstServiceFlowPhsRulesTable;
for (i = 0; i < MAX_SERVICEFLOWS; i++) {
struct bcm_phs_entry sServiceFlow = pstServiceFlowTable->stSFList[i];
sServiceFlow.pstClassifierTable = kzalloc(sizeof(struct bcm_phs_classifier_table), GFP_KERNEL);
if (!sServiceFlow.pstClassifierTable) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation failed");
free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
return -ENOMEM;
}
}
pPhsdeviceExtension->CompressedTxBuffer = kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
if (pPhsdeviceExtension->CompressedTxBuffer == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation failed");
free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
return -ENOMEM;
}
pPhsdeviceExtension->UnCompressedRxBuffer = kmalloc(PHS_BUFFER_SIZE, GFP_KERNEL);
if (pPhsdeviceExtension->UnCompressedRxBuffer == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAllocation failed");
kfree(pPhsdeviceExtension->CompressedTxBuffer);
free_phs_serviceflow_rules(pPhsdeviceExtension->pstServiceFlowPhsRulesTable);
pPhsdeviceExtension->pstServiceFlowPhsRulesTable = NULL;
return -ENOMEM;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\n phs_init Successful");
return STATUS_SUCCESS;
}
int PhsCleanup(IN struct bcm_phs_extension *pPHSDeviceExt)
{
if (pPHSDeviceExt->pstServiceFlowPhsRulesTable) {
free_phs_serviceflow_rules(pPHSDeviceExt->pstServiceFlowPhsRulesTable);
pPHSDeviceExt->pstServiceFlowPhsRulesTable = NULL;
}
kfree(pPHSDeviceExt->CompressedTxBuffer);
pPHSDeviceExt->CompressedTxBuffer = NULL;
kfree(pPHSDeviceExt->UnCompressedRxBuffer);
pPHSDeviceExt->UnCompressedRxBuffer = NULL;
return 0;
}
/*
* PHS functions
* PhsUpdateClassifierRule
*
* Routine Description:
* Exported function to add or modify a PHS Rule.
*
* Arguments:
* IN void* pvContext - PHS Driver Specific Context
* IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
* IN B_UINT16 uiClsId - The Classifier ID within the Service Flow for which the PHS rule applies.
* IN struct bcm_phs_rule *psPhsRule - The PHS Rule strcuture to be added to the PHS Rule table.
*
* Return Value:
*
* 0 if successful,
* >0 Error.
*/
ULONG PhsUpdateClassifierRule(IN void *pvContext,
IN B_UINT16 uiVcid ,
IN B_UINT16 uiClsId ,
IN struct bcm_phs_rule *psPhsRule,
IN B_UINT8 u8AssociatedPHSI)
{
ULONG lStatus = 0;
UINT nSFIndex = 0;
struct bcm_phs_entry *pstServiceFlowEntry = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *)pvContext;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "PHS With Corr2 Changes\n");
if (pDeviceExtension == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "Invalid Device Extension\n");
return ERR_PHS_INVALID_DEVICE_EXETENSION;
}
if (u8AssociatedPHSI == 0)
return ERR_PHS_INVALID_PHS_RULE;
/* Retrieve the SFID Entry Index for requested Service Flow */
nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
uiVcid, &pstServiceFlowEntry);
if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
/* This is a new SF. Create a mapping entry for this */
lStatus = CreateSFToClassifierRuleMapping(uiVcid, uiClsId,
pDeviceExtension->pstServiceFlowPhsRulesTable, psPhsRule, u8AssociatedPHSI);
return lStatus;
}
/* SF already Exists Add PHS Rule to existing SF */
lStatus = CreateClassiferToPHSRuleMapping(uiVcid, uiClsId,
pstServiceFlowEntry, psPhsRule, u8AssociatedPHSI);
return lStatus;
}
/*
* PhsDeletePHSRule
*
* Routine Description:
* Deletes the specified phs Rule within Vcid
*
* Arguments:
* IN void* pvContext - PHS Driver Specific Context
* IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
* IN B_UINT8 u8PHSI - the PHS Index identifying PHS rule to be deleted.
*
* Return Value:
*
* 0 if successful,
* >0 Error.
*/
ULONG PhsDeletePHSRule(IN void *pvContext, IN B_UINT16 uiVcid, IN B_UINT8 u8PHSI)
{
ULONG lStatus = 0;
UINT nSFIndex = 0, nClsidIndex = 0;
struct bcm_phs_entry *pstServiceFlowEntry = NULL;
struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *)pvContext;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "======>\n");
if (pDeviceExtension) {
/* Retrieve the SFID Entry Index for requested Service Flow */
nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable, uiVcid, &pstServiceFlowEntry);
if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "SFID Match Failed\n");
return ERR_SF_MATCH_FAIL;
}
pstClassifierRulesTable = pstServiceFlowEntry->pstClassifierTable;
if (pstClassifierRulesTable) {
for (nClsidIndex = 0; nClsidIndex < MAX_PHSRULE_PER_SF; nClsidIndex++) {
if (pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].bUsed && pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule) {
if (pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8PHSI == u8PHSI) {
if (pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt--;
if (0 == pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
kfree(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule);
memset(&pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex], 0,
sizeof(struct bcm_phs_classifier_entry));
}
}
}
}
}
return lStatus;
}
/*
* PhsDeleteClassifierRule
*
* Routine Description:
* Exported function to Delete a PHS Rule for the SFID,CLSID Pair.
*
* Arguments:
* IN void* pvContext - PHS Driver Specific Context
* IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rule applies
* IN B_UINT16 uiClsId - The Classifier ID within the Service Flow for which the PHS rule applies.
*
* Return Value:
*
* 0 if successful,
* >0 Error.
*/
ULONG PhsDeleteClassifierRule(IN void *pvContext, IN B_UINT16 uiVcid, IN B_UINT16 uiClsId)
{
ULONG lStatus = 0;
UINT nSFIndex = 0, nClsidIndex = 0;
struct bcm_phs_entry *pstServiceFlowEntry = NULL;
struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *)pvContext;
if (pDeviceExtension) {
/* Retrieve the SFID Entry Index for requested Service Flow */
nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable, uiVcid, &pstServiceFlowEntry);
if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "SFID Match Failed\n");
return ERR_SF_MATCH_FAIL;
}
nClsidIndex = GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
uiClsId, eActiveClassifierRuleContext, &pstClassifierEntry);
if ((nClsidIndex != PHS_INVALID_TABLE_INDEX) && (!pstClassifierEntry->bUnclassifiedPHSRule)) {
if (pstClassifierEntry->pstPhsRule) {
if (pstClassifierEntry->pstPhsRule->u8RefCnt)
pstClassifierEntry->pstPhsRule->u8RefCnt--;
if (0 == pstClassifierEntry->pstPhsRule->u8RefCnt)
kfree(pstClassifierEntry->pstPhsRule);
}
memset(pstClassifierEntry, 0, sizeof(struct bcm_phs_classifier_entry));
}
nClsidIndex = GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
uiClsId, eOldClassifierRuleContext, &pstClassifierEntry);
if ((nClsidIndex != PHS_INVALID_TABLE_INDEX) && (!pstClassifierEntry->bUnclassifiedPHSRule)) {
kfree(pstClassifierEntry->pstPhsRule);
memset(pstClassifierEntry, 0, sizeof(struct bcm_phs_classifier_entry));
}
}
return lStatus;
}
/*
* PhsDeleteSFRules
*
* Routine Description:
* Exported function to Delete a all PHS Rules for the SFID.
*
* Arguments:
* IN void* pvContext - PHS Driver Specific Context
* IN B_UINT16 uiVcid - The Service Flow ID for which the PHS rules need to be deleted
*
* Return Value:
*
* 0 if successful,
* >0 Error.
*/
ULONG PhsDeleteSFRules(IN void *pvContext, IN B_UINT16 uiVcid)
{
ULONG lStatus = 0;
UINT nSFIndex = 0, nClsidIndex = 0;
struct bcm_phs_entry *pstServiceFlowEntry = NULL;
struct bcm_phs_classifier_table *pstClassifierRulesTable = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *)pvContext;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "====>\n");
if (pDeviceExtension) {
/* Retrieve the SFID Entry Index for requested Service Flow */
nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
uiVcid, &pstServiceFlowEntry);
if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "SFID Match Failed\n");
return ERR_SF_MATCH_FAIL;
}
pstClassifierRulesTable = pstServiceFlowEntry->pstClassifierTable;
if (pstClassifierRulesTable) {
for (nClsidIndex = 0; nClsidIndex < MAX_PHSRULE_PER_SF; nClsidIndex++) {
if (pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule) {
if (pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt--;
if (0 == pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
kfree(pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule);
pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex].pstPhsRule = NULL;
}
memset(&pstClassifierRulesTable->stActivePhsRulesList[nClsidIndex], 0, sizeof(struct bcm_phs_classifier_entry));
if (pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex].pstPhsRule) {
if (pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt--;
if (0 == pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex].pstPhsRule->u8RefCnt)
kfree(pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex].pstPhsRule);
pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex].pstPhsRule = NULL;
}
memset(&pstClassifierRulesTable->stOldPhsRulesList[nClsidIndex], 0, sizeof(struct bcm_phs_classifier_entry));
}
}
pstServiceFlowEntry->bUsed = FALSE;
pstServiceFlowEntry->uiVcid = 0;
}
return lStatus;
}
/*
* PhsCompress
*
* Routine Description:
* Exported function to compress the data using PHS.
*
* Arguments:
* IN void* pvContext - PHS Driver Specific Context.
* IN B_UINT16 uiVcid - The Service Flow ID to which current packet header compression applies.
* IN UINT uiClsId - The Classifier ID to which current packet header compression applies.
* IN void *pvInputBuffer - The Input buffer containg packet header data
* IN void *pvOutputBuffer - The output buffer returned by this function after PHS
* IN UINT *pOldHeaderSize - The actual size of the header before PHS
* IN UINT *pNewHeaderSize - The new size of the header after applying PHS
*
* Return Value:
*
* 0 if successful,
* >0 Error.
*/
ULONG PhsCompress(IN void *pvContext,
IN B_UINT16 uiVcid,
IN B_UINT16 uiClsId,
IN void *pvInputBuffer,
OUT void *pvOutputBuffer,
OUT UINT *pOldHeaderSize,
OUT UINT *pNewHeaderSize)
{
UINT nSFIndex = 0, nClsidIndex = 0;
struct bcm_phs_entry *pstServiceFlowEntry = NULL;
struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
struct bcm_phs_rule *pstPhsRule = NULL;
ULONG lStatus = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *)pvContext;
if (pDeviceExtension == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "Invalid Device Extension\n");
lStatus = STATUS_PHS_NOCOMPRESSION;
return lStatus;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "Suppressing header\n");
/* Retrieve the SFID Entry Index for requested Service Flow */
nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
uiVcid, &pstServiceFlowEntry);
if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "SFID Match Failed\n");
lStatus = STATUS_PHS_NOCOMPRESSION;
return lStatus;
}
nClsidIndex = GetClassifierEntry(pstServiceFlowEntry->pstClassifierTable,
uiClsId, eActiveClassifierRuleContext, &pstClassifierEntry);
if (nClsidIndex == PHS_INVALID_TABLE_INDEX) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "No PHS Rule Defined For Classifier\n");
lStatus = STATUS_PHS_NOCOMPRESSION;
return lStatus;
}
/* get rule from SF id,Cls ID pair and proceed */
pstPhsRule = pstClassifierEntry->pstPhsRule;
if (!ValidatePHSRuleComplete(pstPhsRule)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "PHS Rule Defined For Classifier But Not Complete\n");
lStatus = STATUS_PHS_NOCOMPRESSION;
return lStatus;
}
/* Compress Packet */
lStatus = phs_compress(pstPhsRule, (PUCHAR)pvInputBuffer,
(PUCHAR)pvOutputBuffer, pOldHeaderSize, pNewHeaderSize);
if (lStatus == STATUS_PHS_COMPRESSED) {
pstPhsRule->PHSModifiedBytes += *pOldHeaderSize - *pNewHeaderSize - 1;
pstPhsRule->PHSModifiedNumPackets++;
} else
pstPhsRule->PHSErrorNumPackets++;
return lStatus;
}
/*
* PhsDeCompress
*
* Routine Description:
* Exported function to restore the packet header in Rx path.
*
* Arguments:
* IN void* pvContext - PHS Driver Specific Context.
* IN B_UINT16 uiVcid - The Service Flow ID to which current packet header restoration applies.
* IN void *pvInputBuffer - The Input buffer containg suppressed packet header data
* OUT void *pvOutputBuffer - The output buffer returned by this function after restoration
* OUT UINT *pHeaderSize - The packet header size after restoration is returned in this parameter.
*
* Return Value:
*
* 0 if successful,
* >0 Error.
*/
ULONG PhsDeCompress(IN void *pvContext,
IN B_UINT16 uiVcid,
IN void *pvInputBuffer,
OUT void *pvOutputBuffer,
OUT UINT *pInHeaderSize,
OUT UINT *pOutHeaderSize)
{
UINT nSFIndex = 0, nPhsRuleIndex = 0;
struct bcm_phs_entry *pstServiceFlowEntry = NULL;
struct bcm_phs_rule *pstPhsRule = NULL;
UINT phsi;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
struct bcm_phs_extension *pDeviceExtension = (struct bcm_phs_extension *)pvContext;
*pInHeaderSize = 0;
if (pDeviceExtension == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "Invalid Device Extension\n");
return ERR_PHS_INVALID_DEVICE_EXETENSION;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "Restoring header\n");
phsi = *((unsigned char *)(pvInputBuffer));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "PHSI To Be Used For restore : %x\n", phsi);
if (phsi == UNCOMPRESSED_PACKET)
return STATUS_PHS_NOCOMPRESSION;
/* Retrieve the SFID Entry Index for requested Service Flow */
nSFIndex = GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
uiVcid, &pstServiceFlowEntry);
if (nSFIndex == PHS_INVALID_TABLE_INDEX) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "SFID Match Failed During Lookup\n");
return ERR_SF_MATCH_FAIL;
}
nPhsRuleIndex = GetPhsRuleEntry(pstServiceFlowEntry->pstClassifierTable, phsi,
eActiveClassifierRuleContext, &pstPhsRule);
if (nPhsRuleIndex == PHS_INVALID_TABLE_INDEX) {
/* Phs Rule does not exist in active rules table. Lets try in the old rules table. */
nPhsRuleIndex = GetPhsRuleEntry(pstServiceFlowEntry->pstClassifierTable,
phsi, eOldClassifierRuleContext, &pstPhsRule);
if (nPhsRuleIndex == PHS_INVALID_TABLE_INDEX)
return ERR_PHSRULE_MATCH_FAIL;
}
*pInHeaderSize = phs_decompress((PUCHAR)pvInputBuffer,
(PUCHAR)pvOutputBuffer, pstPhsRule, pOutHeaderSize);
pstPhsRule->PHSModifiedBytes += *pOutHeaderSize - *pInHeaderSize - 1;
pstPhsRule->PHSModifiedNumPackets++;
return STATUS_PHS_COMPRESSED;
}
/*
* Procedure: free_phs_serviceflow_rules
*
* Description: This routine is responsible for freeing memory allocated for PHS rules.
*
* Arguments:
* rules - ptr to S_SERVICEFLOW_TABLE structure.
*
* Returns:
* Does not return any value.
*/
static void free_phs_serviceflow_rules(struct bcm_phs_table *psServiceFlowRulesTable)
{
int i, j;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "=======>\n");
if (psServiceFlowRulesTable) {
for (i = 0; i < MAX_SERVICEFLOWS; i++) {
struct bcm_phs_entry stServiceFlowEntry = psServiceFlowRulesTable->stSFList[i];
struct bcm_phs_classifier_table *pstClassifierRulesTable = stServiceFlowEntry.pstClassifierTable;
if (pstClassifierRulesTable) {
for (j = 0; j < MAX_PHSRULE_PER_SF; j++) {
if (pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule) {
if (pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule->u8RefCnt)
pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule->u8RefCnt--;
if (0 == pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule->u8RefCnt)
kfree(pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule);
pstClassifierRulesTable->stActivePhsRulesList[j].pstPhsRule = NULL;
}
if (pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule) {
if (pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule->u8RefCnt)
pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule->u8RefCnt--;
if (0 == pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule->u8RefCnt)
kfree(pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule);
pstClassifierRulesTable->stOldPhsRulesList[j].pstPhsRule = NULL;
}
}
kfree(pstClassifierRulesTable);
stServiceFlowEntry.pstClassifierTable = pstClassifierRulesTable = NULL;
}
}
}
kfree(psServiceFlowRulesTable);
psServiceFlowRulesTable = NULL;
}
static BOOLEAN ValidatePHSRuleComplete(IN struct bcm_phs_rule *psPhsRule)
{
if (psPhsRule) {
if (!psPhsRule->u8PHSI) {
/* PHSI is not valid */
return FALSE;
}
if (!psPhsRule->u8PHSS) {
/* PHSS Is Undefined */
return FALSE;
}
/* Check if PHSF is defines for the PHS Rule */
if (!psPhsRule->u8PHSFLength) /* If any part of PHSF is valid then Rule contains valid PHSF */
return FALSE;
return TRUE;
} else
return FALSE;
}
UINT GetServiceFlowEntry(IN struct bcm_phs_table *psServiceFlowTable,
IN B_UINT16 uiVcid,
struct bcm_phs_entry **ppstServiceFlowEntry)
{
int i;
for (i = 0; i < MAX_SERVICEFLOWS; i++) {
if (psServiceFlowTable->stSFList[i].bUsed) {
if (psServiceFlowTable->stSFList[i].uiVcid == uiVcid) {
*ppstServiceFlowEntry = &psServiceFlowTable->stSFList[i];
return i;
}
}
}
*ppstServiceFlowEntry = NULL;
return PHS_INVALID_TABLE_INDEX;
}
UINT GetClassifierEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
IN B_UINT32 uiClsid, enum bcm_phs_classifier_context eClsContext,
OUT struct bcm_phs_classifier_entry **ppstClassifierEntry)
{
int i;
struct bcm_phs_classifier_entry *psClassifierRules = NULL;
for (i = 0; i < MAX_PHSRULE_PER_SF; i++) {
if (eClsContext == eActiveClassifierRuleContext)
psClassifierRules = &pstClassifierTable->stActivePhsRulesList[i];
else
psClassifierRules = &pstClassifierTable->stOldPhsRulesList[i];
if (psClassifierRules->bUsed) {
if (psClassifierRules->uiClassifierRuleId == uiClsid) {
*ppstClassifierEntry = psClassifierRules;
return i;
}
}
}
*ppstClassifierEntry = NULL;
return PHS_INVALID_TABLE_INDEX;
}
static UINT GetPhsRuleEntry(IN struct bcm_phs_classifier_table *pstClassifierTable,
IN B_UINT32 uiPHSI, enum bcm_phs_classifier_context eClsContext,
OUT struct bcm_phs_rule **ppstPhsRule)
{
int i;
struct bcm_phs_classifier_entry *pstClassifierRule = NULL;
for (i = 0; i < MAX_PHSRULE_PER_SF; i++) {
if (eClsContext == eActiveClassifierRuleContext)
pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[i];
else
pstClassifierRule = &pstClassifierTable->stOldPhsRulesList[i];
if (pstClassifierRule->bUsed) {
if (pstClassifierRule->u8PHSI == uiPHSI) {
*ppstPhsRule = pstClassifierRule->pstPhsRule;
return i;
}
}
}
*ppstPhsRule = NULL;
return PHS_INVALID_TABLE_INDEX;
}
UINT CreateSFToClassifierRuleMapping(IN B_UINT16 uiVcid, IN B_UINT16 uiClsId,
IN struct bcm_phs_table *psServiceFlowTable,
struct bcm_phs_rule *psPhsRule,
B_UINT8 u8AssociatedPHSI)
{
struct bcm_phs_classifier_table *psaClassifiertable = NULL;
UINT uiStatus = 0;
int iSfIndex;
BOOLEAN bFreeEntryFound = FALSE;
/* Check for a free entry in SFID table */
for (iSfIndex = 0; iSfIndex < MAX_SERVICEFLOWS; iSfIndex++) {
if (!psServiceFlowTable->stSFList[iSfIndex].bUsed) {
bFreeEntryFound = TRUE;
break;
}
}
if (!bFreeEntryFound)
return ERR_SFTABLE_FULL;
psaClassifiertable = psServiceFlowTable->stSFList[iSfIndex].pstClassifierTable;
uiStatus = CreateClassifierPHSRule(uiClsId, psaClassifiertable, psPhsRule,
eActiveClassifierRuleContext, u8AssociatedPHSI);
if (uiStatus == PHS_SUCCESS) {
/* Add entry at free index to the SF */
psServiceFlowTable->stSFList[iSfIndex].bUsed = TRUE;
psServiceFlowTable->stSFList[iSfIndex].uiVcid = uiVcid;
}
return uiStatus;
}
UINT CreateClassiferToPHSRuleMapping(IN B_UINT16 uiVcid,
IN B_UINT16 uiClsId,
IN struct bcm_phs_entry *pstServiceFlowEntry,
struct bcm_phs_rule *psPhsRule,
B_UINT8 u8AssociatedPHSI)
{
struct bcm_phs_classifier_entry *pstClassifierEntry = NULL;
UINT uiStatus = PHS_SUCCESS;
UINT nClassifierIndex = 0;
struct bcm_phs_classifier_table *psaClassifiertable = NULL;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
psaClassifiertable = pstServiceFlowEntry->pstClassifierTable;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "==>");
/* Check if the supplied Classifier already exists */
nClassifierIndex = GetClassifierEntry(
pstServiceFlowEntry->pstClassifierTable,
uiClsId,
eActiveClassifierRuleContext,
&pstClassifierEntry);
if (nClassifierIndex == PHS_INVALID_TABLE_INDEX) {
/*
* The Classifier doesn't exist. So its a new classifier being added.
* Add new entry to associate PHS Rule to the Classifier
*/
uiStatus = CreateClassifierPHSRule(uiClsId, psaClassifiertable,
psPhsRule,
eActiveClassifierRuleContext,
u8AssociatedPHSI);
return uiStatus;
}
/*
* The Classifier exists.The PHS Rule for this classifier
* is being modified
*/
if (pstClassifierEntry->u8PHSI == psPhsRule->u8PHSI) {
if (pstClassifierEntry->pstPhsRule == NULL)
return ERR_PHS_INVALID_PHS_RULE;
/*
* This rule already exists if any fields are changed for this PHS
* rule update them.
*/
/* If any part of PHSF is valid then we update PHSF */
if (psPhsRule->u8PHSFLength) {
/* update PHSF */
memcpy(pstClassifierEntry->pstPhsRule->u8PHSF,
psPhsRule->u8PHSF, MAX_PHS_LENGTHS);
}
if (psPhsRule->u8PHSFLength) {
/* update PHSFLen */
pstClassifierEntry->pstPhsRule->u8PHSFLength = psPhsRule->u8PHSFLength;
}
if (psPhsRule->u8PHSMLength) {
/* update PHSM */
memcpy(pstClassifierEntry->pstPhsRule->u8PHSM,
psPhsRule->u8PHSM, MAX_PHS_LENGTHS);
}
if (psPhsRule->u8PHSMLength) {
/* update PHSM Len */
pstClassifierEntry->pstPhsRule->u8PHSMLength =
psPhsRule->u8PHSMLength;
}
if (psPhsRule->u8PHSS) {
/* update PHSS */
pstClassifierEntry->pstPhsRule->u8PHSS = psPhsRule->u8PHSS;
}
/* update PHSV */
pstClassifierEntry->pstPhsRule->u8PHSV = psPhsRule->u8PHSV;
} else {
/* A new rule is being set for this classifier. */
uiStatus = UpdateClassifierPHSRule(uiClsId, pstClassifierEntry,
psaClassifiertable, psPhsRule, u8AssociatedPHSI);
}
return uiStatus;
}
static UINT CreateClassifierPHSRule(IN B_UINT16 uiClsId,
struct bcm_phs_classifier_table *psaClassifiertable,
struct bcm_phs_rule *psPhsRule,
enum bcm_phs_classifier_context eClsContext,
B_UINT8 u8AssociatedPHSI)
{
UINT iClassifierIndex = 0;
BOOLEAN bFreeEntryFound = FALSE;
struct bcm_phs_classifier_entry *psClassifierRules = NULL;
UINT nStatus = PHS_SUCCESS;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "Inside CreateClassifierPHSRule");
if (psaClassifiertable == NULL)
return ERR_INVALID_CLASSIFIERTABLE_FOR_SF;
if (eClsContext == eOldClassifierRuleContext) {
/*
* If An Old Entry for this classifier ID already exists in the
* old rules table replace it.
*/
iClassifierIndex =
GetClassifierEntry(psaClassifiertable, uiClsId,
eClsContext, &psClassifierRules);
if (iClassifierIndex != PHS_INVALID_TABLE_INDEX) {
/*
* The Classifier already exists in the old rules table
* Lets replace the old classifier with the new one.
*/
bFreeEntryFound = TRUE;
}
}
if (!bFreeEntryFound) {
/* Continue to search for a free location to add the rule */
for (iClassifierIndex = 0; iClassifierIndex <
MAX_PHSRULE_PER_SF; iClassifierIndex++) {
if (eClsContext == eActiveClassifierRuleContext)
psClassifierRules = &psaClassifiertable->stActivePhsRulesList[iClassifierIndex];
else
psClassifierRules = &psaClassifiertable->stOldPhsRulesList[iClassifierIndex];
if (!psClassifierRules->bUsed) {
bFreeEntryFound = TRUE;
break;
}
}
}
if (!bFreeEntryFound) {
if (eClsContext == eActiveClassifierRuleContext)
return ERR_CLSASSIFIER_TABLE_FULL;
else {
/* Lets replace the oldest rule if we are looking in old Rule table */
if (psaClassifiertable->uiOldestPhsRuleIndex >= MAX_PHSRULE_PER_SF)
psaClassifiertable->uiOldestPhsRuleIndex = 0;
iClassifierIndex = psaClassifiertable->uiOldestPhsRuleIndex;
psClassifierRules = &psaClassifiertable->stOldPhsRulesList[iClassifierIndex];
(psaClassifiertable->uiOldestPhsRuleIndex)++;
}
}
if (eClsContext == eOldClassifierRuleContext) {
if (psClassifierRules->pstPhsRule == NULL) {
psClassifierRules->pstPhsRule = kmalloc(sizeof(struct bcm_phs_rule), GFP_KERNEL);
if (NULL == psClassifierRules->pstPhsRule)
return ERR_PHSRULE_MEMALLOC_FAIL;
}
psClassifierRules->bUsed = TRUE;
psClassifierRules->uiClassifierRuleId = uiClsId;
psClassifierRules->u8PHSI = psPhsRule->u8PHSI;
psClassifierRules->bUnclassifiedPHSRule = psPhsRule->bUnclassifiedPHSRule;
/* Update The PHS rule */
memcpy(psClassifierRules->pstPhsRule, psPhsRule, sizeof(struct bcm_phs_rule));
} else
nStatus = UpdateClassifierPHSRule(uiClsId, psClassifierRules,
psaClassifiertable, psPhsRule, u8AssociatedPHSI);
return nStatus;
}
static UINT UpdateClassifierPHSRule(IN B_UINT16 uiClsId,
IN struct bcm_phs_classifier_entry *pstClassifierEntry,
struct bcm_phs_classifier_table *psaClassifiertable,
struct bcm_phs_rule *psPhsRule,
B_UINT8 u8AssociatedPHSI)
{
struct bcm_phs_rule *pstAddPhsRule = NULL;
UINT nPhsRuleIndex = 0;
BOOLEAN bPHSRuleOrphaned = FALSE;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
psPhsRule->u8RefCnt = 0;
/* Step 1 Deref Any Exisiting PHS Rule in this classifier Entry */
bPHSRuleOrphaned = DerefPhsRule(uiClsId, psaClassifiertable,
pstClassifierEntry->pstPhsRule);
/* Step 2 Search if there is a PHS Rule with u8AssociatedPHSI in Classifier table for this SF */
nPhsRuleIndex = GetPhsRuleEntry(psaClassifiertable, u8AssociatedPHSI,
eActiveClassifierRuleContext, &pstAddPhsRule);
if (PHS_INVALID_TABLE_INDEX == nPhsRuleIndex) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nAdding New PHSRuleEntry For Classifier");
if (psPhsRule->u8PHSI == 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nError PHSI is Zero\n");
return ERR_PHS_INVALID_PHS_RULE;
}
/* Step 2.a PHS Rule Does Not Exist .Create New PHS Rule for uiClsId */
if (FALSE == bPHSRuleOrphaned) {
pstClassifierEntry->pstPhsRule = kmalloc(sizeof(struct bcm_phs_rule), GFP_KERNEL);
if (NULL == pstClassifierEntry->pstPhsRule)
return ERR_PHSRULE_MEMALLOC_FAIL;
}
memcpy(pstClassifierEntry->pstPhsRule, psPhsRule, sizeof(struct bcm_phs_rule));
} else {
/* Step 2.b PHS Rule Exists Tie uiClsId with the existing PHS Rule */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_DISPATCH, DBG_LVL_ALL, "\nTying Classifier to Existing PHS Rule");
if (bPHSRuleOrphaned) {
kfree(pstClassifierEntry->pstPhsRule);
pstClassifierEntry->pstPhsRule = NULL;
}
pstClassifierEntry->pstPhsRule = pstAddPhsRule;
}
pstClassifierEntry->bUsed = TRUE;
pstClassifierEntry->u8PHSI = pstClassifierEntry->pstPhsRule->u8PHSI;
pstClassifierEntry->uiClassifierRuleId = uiClsId;
pstClassifierEntry->pstPhsRule->u8RefCnt++;
pstClassifierEntry->bUnclassifiedPHSRule = pstClassifierEntry->pstPhsRule->bUnclassifiedPHSRule;
return PHS_SUCCESS;
}
static BOOLEAN DerefPhsRule(IN B_UINT16 uiClsId, struct bcm_phs_classifier_table *psaClassifiertable, struct bcm_phs_rule *pstPhsRule)
{
if (pstPhsRule == NULL)
return FALSE;
if (pstPhsRule->u8RefCnt)
pstPhsRule->u8RefCnt--;
if (0 == pstPhsRule->u8RefCnt) {
/*
* if(pstPhsRule->u8PHSI)
* Store the currently active rule into the old rules list
* CreateClassifierPHSRule(uiClsId,psaClassifiertable,pstPhsRule,eOldClassifierRuleContext,pstPhsRule->u8PHSI);
*/
return TRUE;
} else
return FALSE;
}
void DumpPhsRules(struct bcm_phs_extension *pDeviceExtension)
{
int i, j, k, l;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n Dumping PHS Rules :\n");
for (i = 0; i < MAX_SERVICEFLOWS; i++) {
struct bcm_phs_entry stServFlowEntry =
pDeviceExtension->pstServiceFlowPhsRulesTable->stSFList[i];
if (stServFlowEntry.bUsed) {
for (j = 0; j < MAX_PHSRULE_PER_SF; j++) {
for (l = 0; l < 2; l++) {
struct bcm_phs_classifier_entry stClsEntry;
if (l == 0) {
stClsEntry = stServFlowEntry.pstClassifierTable->stActivePhsRulesList[j];
if (stClsEntry.bUsed)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n Active PHS Rule :\n");
} else {
stClsEntry = stServFlowEntry.pstClassifierTable->stOldPhsRulesList[j];
if (stClsEntry.bUsed)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n Old PHS Rule :\n");
}
if (stClsEntry.bUsed) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n VCID : %#X", stServFlowEntry.uiVcid);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n ClassifierID : %#X", stClsEntry.uiClassifierRuleId);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSRuleID : %#X", stClsEntry.u8PHSI);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n****************PHS Rule********************\n");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSI : %#X", stClsEntry.pstPhsRule->u8PHSI);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSFLength : %#X ", stClsEntry.pstPhsRule->u8PHSFLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSF : ");
for (k = 0 ; k < stClsEntry.pstPhsRule->u8PHSFLength; k++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "%#X ", stClsEntry.pstPhsRule->u8PHSF[k]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSMLength : %#X", stClsEntry.pstPhsRule->u8PHSMLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSM :");
for (k = 0; k < stClsEntry.pstPhsRule->u8PHSMLength; k++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "%#X ", stClsEntry.pstPhsRule->u8PHSM[k]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSS : %#X ", stClsEntry.pstPhsRule->u8PHSS);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, (DBG_LVL_ALL|DBG_NO_FUNC_PRINT), "\n PHSV : %#X", stClsEntry.pstPhsRule->u8PHSV);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "\n********************************************\n");
}
}
}
}
}
}
/*
* Procedure: phs_decompress
*
* Description: This routine restores the static fields within the packet.
*
* Arguments:
* in_buf - ptr to incoming packet buffer.
* out_buf - ptr to output buffer where the suppressed header is copied.
* decomp_phs_rules - ptr to PHS rule.
* header_size - ptr to field which holds the phss or phsf_length.
*
* Returns:
* size -The number of bytes of dynamic fields present with in the incoming packet
* header.
* 0 -If PHS rule is NULL.If PHSI is 0 indicateing packet as uncompressed.
*/
int phs_decompress(unsigned char *in_buf,
unsigned char *out_buf,
struct bcm_phs_rule *decomp_phs_rules,
UINT *header_size)
{
int phss, size = 0;
struct bcm_phs_rule *tmp_memb;
int bit, i = 0;
unsigned char *phsf, *phsm;
int in_buf_len = *header_size - 1;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
in_buf++;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "====>\n");
*header_size = 0;
if ((decomp_phs_rules == NULL))
return 0;
tmp_memb = decomp_phs_rules;
/*
* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"\nDECOMP:In phs_decompress PHSI 1 %d",phsi));
* header_size = tmp_memb->u8PHSFLength;
*/
phss = tmp_memb->u8PHSS;
phsf = tmp_memb->u8PHSF;
phsm = tmp_memb->u8PHSM;
if (phss > MAX_PHS_LENGTHS)
phss = MAX_PHS_LENGTHS;
/*
* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, PHS_RECEIVE,DBG_LVL_ALL,"\nDECOMP:
* In phs_decompress PHSI %d phss %d index %d",phsi,phss,index));
*/
while ((phss > 0) && (size < in_buf_len)) {
bit = ((*phsm << i) & SUPPRESS);
if (bit == SUPPRESS) {
*out_buf = *phsf;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d phsf %d ouput %d",
phss, *phsf, *out_buf);
} else {
*out_buf = *in_buf;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_RECEIVE, DBG_LVL_ALL, "\nDECOMP:In phss %d input %d ouput %d",
phss, *in_buf, *out_buf);
in_buf++;
size++;
}
out_buf++;
phsf++;
phss--;
i++;
*header_size = *header_size + 1;
if (i > MAX_NO_BIT) {
i = 0;
phsm++;
}
}
return size;
}
/*
* Procedure: phs_compress
*
* Description: This routine suppresses the static fields within the packet.Before
* that it will verify the fields to be suppressed with the corresponding fields in the
* phsf. For verification it checks the phsv field of PHS rule. If set and verification
* succeeds it suppresses the field.If any one static field is found different none of
* the static fields are suppressed then the packet is sent as uncompressed packet with
* phsi=0.
*
* Arguments:
* phs_rule - ptr to PHS rule.
* in_buf - ptr to incoming packet buffer.
* out_buf - ptr to output buffer where the suppressed header is copied.
* header_size - ptr to field which holds the phss.
*
* Returns:
* size-The number of bytes copied into the output buffer i.e dynamic fields
* 0 -If PHS rule is NULL.If PHSV field is not set.If the verification fails.
*/
static int phs_compress(struct bcm_phs_rule *phs_rule,
unsigned char *in_buf,
unsigned char *out_buf,
UINT *header_size,
UINT *new_header_size)
{
unsigned char *old_addr = out_buf;
int suppress = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
if (phs_rule == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "\nphs_compress(): phs_rule null!");
*out_buf = ZERO_PHSI;
return STATUS_PHS_NOCOMPRESSION;
}
if (phs_rule->u8PHSS <= *new_header_size)
*header_size = phs_rule->u8PHSS;
else
*header_size = *new_header_size;
/* To copy PHSI */
out_buf++;
suppress = verify_suppress_phsf(in_buf, out_buf, phs_rule->u8PHSF,
phs_rule->u8PHSM, phs_rule->u8PHSS,
phs_rule->u8PHSV, new_header_size);
if (suppress == STATUS_PHS_COMPRESSED) {
*old_addr = (unsigned char)phs_rule->u8PHSI;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "\nCOMP:In phs_compress phsi %d", phs_rule->u8PHSI);
} else {
*old_addr = ZERO_PHSI;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "\nCOMP:In phs_compress PHSV Verification failed");
}
return suppress;
}
/*
* Procedure: verify_suppress_phsf
*
* Description: This routine verifies the fields of the packet and if all the
* static fields are equal it adds the phsi of that PHS rule.If any static
* field differs it woun't suppress any field.
*
* Arguments:
* rules_set - ptr to classifier_rules.
* in_buffer - ptr to incoming packet buffer.
* out_buffer - ptr to output buffer where the suppressed header is copied.
* phsf - ptr to phsf.
* phsm - ptr to phsm.
* phss - variable holding phss.
*
* Returns:
* size-The number of bytes copied into the output buffer i.e dynamic fields.
* 0 -Packet has failed the verification.
*/
static int verify_suppress_phsf(unsigned char *in_buffer,
unsigned char *out_buffer,
unsigned char *phsf,
unsigned char *phsm,
unsigned int phss,
unsigned int phsv,
UINT *new_header_size)
{
unsigned int size = 0;
int bit, i = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "\nCOMP:In verify_phsf PHSM - 0x%X", *phsm);
if (phss > (*new_header_size))
phss = *new_header_size;
while (phss > 0) {
bit = ((*phsm << i) & SUPPRESS);
if (bit == SUPPRESS) {
if (*in_buffer != *phsf) {
if (phsv == VERIFY) {
BCM_DEBUG_PRINT(Adapter,
DBG_TYPE_OTHERS,
PHS_SEND,
DBG_LVL_ALL,
"\nCOMP:In verify_phsf failed for field %d buf %d phsf %d",
phss,
*in_buffer,
*phsf);
return STATUS_PHS_NOCOMPRESSION;
}
} else
BCM_DEBUG_PRINT(Adapter,
DBG_TYPE_OTHERS,
PHS_SEND,
DBG_LVL_ALL,
"\nCOMP:In verify_phsf success for field %d buf %d phsf %d",
phss,
*in_buffer,
*phsf);
} else {
*out_buffer = *in_buffer;
BCM_DEBUG_PRINT(Adapter,
DBG_TYPE_OTHERS,
PHS_SEND,
DBG_LVL_ALL,
"\nCOMP:In copying_header input %d out %d",
*in_buffer,
*out_buffer);
out_buffer++;
size++;
}
in_buffer++;
phsf++;
phss--;
i++;
if (i > MAX_NO_BIT) {
i = 0;
phsm++;
}
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, PHS_SEND, DBG_LVL_ALL, "\nCOMP:In verify_phsf success");
*new_header_size = size;
return STATUS_PHS_COMPRESSED;
}
| gpl-2.0 |
ffolkes/android_kernel_samsung_trlte | drivers/video/backlight/da903x_bl.c | 2243 | 4652 | /*
* Backlight driver for Dialog Semiconductor DA9030/DA9034
*
* Copyright (C) 2008 Compulab, Ltd.
* Mike Rapoport <mike@compulab.co.il>
*
* Copyright (C) 2006-2008 Marvell International Ltd.
* Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/fb.h>
#include <linux/backlight.h>
#include <linux/mfd/da903x.h>
#include <linux/slab.h>
#include <linux/module.h>
#define DA9030_WLED_CONTROL 0x25
#define DA9030_WLED_CP_EN (1 << 6)
#define DA9030_WLED_TRIM(x) ((x) & 0x7)
#define DA9034_WLED_CONTROL1 0x3C
#define DA9034_WLED_CONTROL2 0x3D
#define DA9034_WLED_ISET(x) ((x) & 0x1f)
#define DA9034_WLED_BOOST_EN (1 << 5)
#define DA9030_MAX_BRIGHTNESS 7
#define DA9034_MAX_BRIGHTNESS 0x7f
struct da903x_backlight_data {
struct device *da903x_dev;
int id;
int current_brightness;
};
static int da903x_backlight_set(struct backlight_device *bl, int brightness)
{
struct da903x_backlight_data *data = bl_get_data(bl);
struct device *dev = data->da903x_dev;
uint8_t val;
int ret = 0;
switch (data->id) {
case DA9034_ID_WLED:
ret = da903x_update(dev, DA9034_WLED_CONTROL1,
brightness, 0x7f);
if (ret)
return ret;
if (data->current_brightness && brightness == 0)
ret = da903x_clr_bits(dev,
DA9034_WLED_CONTROL2,
DA9034_WLED_BOOST_EN);
if (data->current_brightness == 0 && brightness)
ret = da903x_set_bits(dev,
DA9034_WLED_CONTROL2,
DA9034_WLED_BOOST_EN);
break;
case DA9030_ID_WLED:
val = DA9030_WLED_TRIM(brightness);
val |= brightness ? DA9030_WLED_CP_EN : 0;
ret = da903x_write(dev, DA9030_WLED_CONTROL, val);
break;
}
if (ret)
return ret;
data->current_brightness = brightness;
return 0;
}
static int da903x_backlight_update_status(struct backlight_device *bl)
{
int brightness = bl->props.brightness;
if (bl->props.power != FB_BLANK_UNBLANK)
brightness = 0;
if (bl->props.fb_blank != FB_BLANK_UNBLANK)
brightness = 0;
if (bl->props.state & BL_CORE_SUSPENDED)
brightness = 0;
return da903x_backlight_set(bl, brightness);
}
static int da903x_backlight_get_brightness(struct backlight_device *bl)
{
struct da903x_backlight_data *data = bl_get_data(bl);
return data->current_brightness;
}
static const struct backlight_ops da903x_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = da903x_backlight_update_status,
.get_brightness = da903x_backlight_get_brightness,
};
static int da903x_backlight_probe(struct platform_device *pdev)
{
struct da9034_backlight_pdata *pdata = pdev->dev.platform_data;
struct da903x_backlight_data *data;
struct backlight_device *bl;
struct backlight_properties props;
int max_brightness;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
switch (pdev->id) {
case DA9030_ID_WLED:
max_brightness = DA9030_MAX_BRIGHTNESS;
break;
case DA9034_ID_WLED:
max_brightness = DA9034_MAX_BRIGHTNESS;
break;
default:
dev_err(&pdev->dev, "invalid backlight device ID(%d)\n",
pdev->id);
return -EINVAL;
}
data->id = pdev->id;
data->da903x_dev = pdev->dev.parent;
data->current_brightness = 0;
/* adjust the WLED output current */
if (pdata)
da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
DA9034_WLED_ISET(pdata->output_current));
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_brightness;
bl = backlight_device_register(pdev->name, data->da903x_dev, data,
&da903x_backlight_ops, &props);
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
return PTR_ERR(bl);
}
bl->props.brightness = max_brightness;
platform_set_drvdata(pdev, bl);
backlight_update_status(bl);
return 0;
}
static int da903x_backlight_remove(struct platform_device *pdev)
{
struct backlight_device *bl = platform_get_drvdata(pdev);
backlight_device_unregister(bl);
return 0;
}
static struct platform_driver da903x_backlight_driver = {
.driver = {
.name = "da903x-backlight",
.owner = THIS_MODULE,
},
.probe = da903x_backlight_probe,
.remove = da903x_backlight_remove,
};
module_platform_driver(da903x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Dialog Semiconductor DA9030/DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>");
MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da903x-backlight");
| gpl-2.0 |
madprogrammer/linux-e8-rt | drivers/watchdog/coh901327_wdt.c | 4035 | 14517 | /*
* coh901327_wdt.c
*
* Copyright (C) 2008-2009 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* Watchdog driver for the ST-Ericsson AB COH 901 327 IP core
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/delay.h>
#define DRV_NAME "WDOG COH 901 327"
/*
* COH 901 327 register definitions
*/
/* WDOG_FEED Register 32bit (-/W) */
#define U300_WDOG_FR 0x00
#define U300_WDOG_FR_FEED_RESTART_TIMER 0xFEEDU
/* WDOG_TIMEOUT Register 32bit (R/W) */
#define U300_WDOG_TR 0x04
#define U300_WDOG_TR_TIMEOUT_MASK 0x7FFFU
/* WDOG_DISABLE1 Register 32bit (-/W) */
#define U300_WDOG_D1R 0x08
#define U300_WDOG_D1R_DISABLE1_DISABLE_TIMER 0x2BADU
/* WDOG_DISABLE2 Register 32bit (R/W) */
#define U300_WDOG_D2R 0x0C
#define U300_WDOG_D2R_DISABLE2_DISABLE_TIMER 0xCAFEU
#define U300_WDOG_D2R_DISABLE_STATUS_DISABLED 0xDABEU
#define U300_WDOG_D2R_DISABLE_STATUS_ENABLED 0x0000U
/* WDOG_STATUS Register 32bit (R/W) */
#define U300_WDOG_SR 0x10
#define U300_WDOG_SR_STATUS_TIMED_OUT 0xCFE8U
#define U300_WDOG_SR_STATUS_NORMAL 0x0000U
#define U300_WDOG_SR_RESET_STATUS_RESET 0xE8B4U
/* WDOG_COUNT Register 32bit (R/-) */
#define U300_WDOG_CR 0x14
#define U300_WDOG_CR_VALID_IND 0x8000U
#define U300_WDOG_CR_VALID_STABLE 0x0000U
#define U300_WDOG_CR_COUNT_VALUE_MASK 0x7FFFU
/* WDOG_JTAGOVR Register 32bit (R/W) */
#define U300_WDOG_JOR 0x18
#define U300_WDOG_JOR_JTAG_MODE_IND 0x0002U
#define U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE 0x0001U
/* WDOG_RESTART Register 32bit (-/W) */
#define U300_WDOG_RR 0x1C
#define U300_WDOG_RR_RESTART_VALUE_RESUME 0xACEDU
/* WDOG_IRQ_EVENT Register 32bit (R/W) */
#define U300_WDOG_IER 0x20
#define U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND 0x0001U
#define U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE 0x0001U
/* WDOG_IRQ_MASK Register 32bit (R/W) */
#define U300_WDOG_IMR 0x24
#define U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE 0x0001U
/* WDOG_IRQ_FORCE Register 32bit (R/W) */
#define U300_WDOG_IFR 0x28
#define U300_WDOG_IFR_WILL_BARK_IRQ_FORCE_ENABLE 0x0001U
/* Default timeout in seconds = 1 minute */
static int margin = 60;
static resource_size_t phybase;
static resource_size_t physize;
static int irq;
static void __iomem *virtbase;
static unsigned long coh901327_users;
static unsigned long boot_status;
static u16 wdogenablestore;
static u16 irqmaskstore;
static struct device *parent;
/*
* The watchdog block is of course always clocked, the
* clk_enable()/clk_disable() calls are mainly for performing reference
* counting higher up in the clock hierarchy.
*/
static struct clk *clk;
/*
* Enabling and disabling functions.
*/
static void coh901327_enable(u16 timeout)
{
u16 val;
unsigned long freq;
unsigned long delay_ns;
clk_enable(clk);
/* Restart timer if it is disabled */
val = readw(virtbase + U300_WDOG_D2R);
if (val == U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
virtbase + U300_WDOG_RR);
/* Acknowledge any pending interrupt so it doesn't just fire off */
writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
virtbase + U300_WDOG_IER);
/*
* The interrupt is cleared in the 32 kHz clock domain.
* Wait 3 32 kHz cycles for it to take effect
*/
freq = clk_get_rate(clk);
delay_ns = DIV_ROUND_UP(1000000000, freq); /* Freq to ns and round up */
delay_ns = 3 * delay_ns; /* Wait 3 cycles */
ndelay(delay_ns);
/* Enable the watchdog interrupt */
writew(U300_WDOG_IMR_WILL_BARK_IRQ_ENABLE, virtbase + U300_WDOG_IMR);
/* Activate the watchdog timer */
writew(timeout, virtbase + U300_WDOG_TR);
/* Start the watchdog timer */
writew(U300_WDOG_FR_FEED_RESTART_TIMER, virtbase + U300_WDOG_FR);
/*
* Extra read so that this change propagate in the watchdog.
*/
(void) readw(virtbase + U300_WDOG_CR);
val = readw(virtbase + U300_WDOG_D2R);
clk_disable(clk);
if (val != U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
dev_err(parent,
"%s(): watchdog not enabled! D2R value %04x\n",
__func__, val);
}
static void coh901327_disable(void)
{
u16 val;
clk_enable(clk);
/* Disable the watchdog interrupt if it is active */
writew(0x0000U, virtbase + U300_WDOG_IMR);
/* If the watchdog is currently enabled, attempt to disable it */
val = readw(virtbase + U300_WDOG_D2R);
if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED) {
writew(U300_WDOG_D1R_DISABLE1_DISABLE_TIMER,
virtbase + U300_WDOG_D1R);
writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
virtbase + U300_WDOG_D2R);
/* Write this twice (else problems occur) */
writew(U300_WDOG_D2R_DISABLE2_DISABLE_TIMER,
virtbase + U300_WDOG_D2R);
}
val = readw(virtbase + U300_WDOG_D2R);
clk_disable(clk);
if (val != U300_WDOG_D2R_DISABLE_STATUS_DISABLED)
dev_err(parent,
"%s(): watchdog not disabled! D2R value %04x\n",
__func__, val);
}
static void coh901327_start(void)
{
coh901327_enable(margin * 100);
}
static void coh901327_keepalive(void)
{
clk_enable(clk);
/* Feed the watchdog */
writew(U300_WDOG_FR_FEED_RESTART_TIMER,
virtbase + U300_WDOG_FR);
clk_disable(clk);
}
static int coh901327_settimeout(int time)
{
/*
* Max margin is 327 since the 10ms
* timeout register is max
* 0x7FFF = 327670ms ~= 327s.
*/
if (time <= 0 || time > 327)
return -EINVAL;
margin = time;
clk_enable(clk);
/* Set new timeout value */
writew(margin * 100, virtbase + U300_WDOG_TR);
/* Feed the dog */
writew(U300_WDOG_FR_FEED_RESTART_TIMER,
virtbase + U300_WDOG_FR);
clk_disable(clk);
return 0;
}
/*
* This interrupt occurs 10 ms before the watchdog WILL bark.
*/
static irqreturn_t coh901327_interrupt(int irq, void *data)
{
u16 val;
/*
* Ack IRQ? If this occurs we're FUBAR anyway, so
* just acknowledge, disable the interrupt and await the imminent end.
* If you at some point need a host of callbacks to be called
* when the system is about to watchdog-reset, add them here!
*
* NOTE: on future versions of this IP-block, it will be possible
* to prevent a watchdog reset by feeding the watchdog at this
* point.
*/
clk_enable(clk);
val = readw(virtbase + U300_WDOG_IER);
if (val == U300_WDOG_IER_WILL_BARK_IRQ_EVENT_IND)
writew(U300_WDOG_IER_WILL_BARK_IRQ_ACK_ENABLE,
virtbase + U300_WDOG_IER);
writew(0x0000U, virtbase + U300_WDOG_IMR);
clk_disable(clk);
dev_crit(parent, "watchdog is barking!\n");
return IRQ_HANDLED;
}
/*
* Allow only one user (daemon) to open the watchdog
*/
static int coh901327_open(struct inode *inode, struct file *file)
{
if (test_and_set_bit(1, &coh901327_users))
return -EBUSY;
coh901327_start();
return nonseekable_open(inode, file);
}
static int coh901327_release(struct inode *inode, struct file *file)
{
clear_bit(1, &coh901327_users);
coh901327_disable();
return 0;
}
static ssize_t coh901327_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (len)
coh901327_keepalive();
return len;
}
static long coh901327_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret = -ENOTTY;
u16 val;
int time;
int new_options;
union {
struct watchdog_info __user *ident;
int __user *i;
} uarg;
static const struct watchdog_info ident = {
.options = WDIOF_CARDRESET |
WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING,
.identity = "COH 901 327 Watchdog",
.firmware_version = 1,
};
uarg.i = (int __user *)arg;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user(uarg.ident, &ident,
sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETSTATUS:
ret = put_user(0, uarg.i);
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(boot_status, uarg.i);
break;
case WDIOC_SETOPTIONS:
ret = get_user(new_options, uarg.i);
if (ret)
break;
if (new_options & WDIOS_DISABLECARD)
coh901327_disable();
if (new_options & WDIOS_ENABLECARD)
coh901327_start();
ret = 0;
break;
case WDIOC_KEEPALIVE:
coh901327_keepalive();
ret = 0;
break;
case WDIOC_SETTIMEOUT:
ret = get_user(time, uarg.i);
if (ret)
break;
ret = coh901327_settimeout(time);
if (ret)
break;
/* Then fall through to return set value */
case WDIOC_GETTIMEOUT:
ret = put_user(margin, uarg.i);
break;
case WDIOC_GETTIMELEFT:
clk_enable(clk);
/* Read repeatedly until the value is stable! */
val = readw(virtbase + U300_WDOG_CR);
while (val & U300_WDOG_CR_VALID_IND)
val = readw(virtbase + U300_WDOG_CR);
val &= U300_WDOG_CR_COUNT_VALUE_MASK;
clk_disable(clk);
if (val != 0)
val /= 100;
ret = put_user(val, uarg.i);
break;
}
return ret;
}
static const struct file_operations coh901327_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = coh901327_write,
.unlocked_ioctl = coh901327_ioctl,
.open = coh901327_open,
.release = coh901327_release,
};
static struct miscdevice coh901327_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &coh901327_fops,
};
static int __exit coh901327_remove(struct platform_device *pdev)
{
misc_deregister(&coh901327_miscdev);
coh901327_disable();
free_irq(irq, pdev);
clk_put(clk);
iounmap(virtbase);
release_mem_region(phybase, physize);
return 0;
}
static int __init coh901327_probe(struct platform_device *pdev)
{
int ret;
u16 val;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENOENT;
parent = &pdev->dev;
physize = resource_size(res);
phybase = res->start;
if (request_mem_region(phybase, physize, DRV_NAME) == NULL) {
ret = -EBUSY;
goto out;
}
virtbase = ioremap(phybase, physize);
if (!virtbase) {
ret = -ENOMEM;
goto out_no_remap;
}
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(&pdev->dev, "could not get clock\n");
goto out_no_clk;
}
ret = clk_enable(clk);
if (ret) {
dev_err(&pdev->dev, "could not enable clock\n");
goto out_no_clk_enable;
}
val = readw(virtbase + U300_WDOG_SR);
switch (val) {
case U300_WDOG_SR_STATUS_TIMED_OUT:
dev_info(&pdev->dev,
"watchdog timed out since last chip reset!\n");
boot_status = WDIOF_CARDRESET;
/* Status will be cleared below */
break;
case U300_WDOG_SR_STATUS_NORMAL:
dev_info(&pdev->dev,
"in normal status, no timeouts have occurred.\n");
break;
default:
dev_info(&pdev->dev,
"contains an illegal status code (%08x)\n", val);
break;
}
val = readw(virtbase + U300_WDOG_D2R);
switch (val) {
case U300_WDOG_D2R_DISABLE_STATUS_DISABLED:
dev_info(&pdev->dev, "currently disabled.\n");
break;
case U300_WDOG_D2R_DISABLE_STATUS_ENABLED:
dev_info(&pdev->dev,
"currently enabled! (disabling it now)\n");
coh901327_disable();
break;
default:
dev_err(&pdev->dev,
"contains an illegal enable/disable code (%08x)\n",
val);
break;
}
/* Reset the watchdog */
writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR);
irq = platform_get_irq(pdev, 0);
if (request_irq(irq, coh901327_interrupt, IRQF_DISABLED,
DRV_NAME " Bark", pdev)) {
ret = -EIO;
goto out_no_irq;
}
clk_disable(clk);
ret = misc_register(&coh901327_miscdev);
if (ret == 0)
dev_info(&pdev->dev,
"initialized. timer margin=%d sec\n", margin);
else
goto out_no_wdog;
return 0;
out_no_wdog:
free_irq(irq, pdev);
out_no_irq:
clk_disable(clk);
out_no_clk_enable:
clk_put(clk);
out_no_clk:
iounmap(virtbase);
out_no_remap:
release_mem_region(phybase, SZ_4K);
out:
return ret;
}
#ifdef CONFIG_PM
static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
{
irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
wdogenablestore = readw(virtbase + U300_WDOG_D2R);
/* If watchdog is on, disable it here and now */
if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED)
coh901327_disable();
return 0;
}
static int coh901327_resume(struct platform_device *pdev)
{
/* Restore the watchdog interrupt */
writew(irqmaskstore, virtbase + U300_WDOG_IMR);
if (wdogenablestore == U300_WDOG_D2R_DISABLE_STATUS_ENABLED) {
/* Restart the watchdog timer */
writew(U300_WDOG_RR_RESTART_VALUE_RESUME,
virtbase + U300_WDOG_RR);
writew(U300_WDOG_FR_FEED_RESTART_TIMER,
virtbase + U300_WDOG_FR);
}
return 0;
}
#else
#define coh901327_suspend NULL
#define coh901327_resume NULL
#endif
/*
* Mistreating the watchdog is the only way to perform a software reset of the
* system on EMP platforms. So we implement this and export a symbol for it.
*/
void coh901327_watchdog_reset(void)
{
/* Enable even if on JTAG too */
writew(U300_WDOG_JOR_JTAG_WATCHDOG_ENABLE,
virtbase + U300_WDOG_JOR);
/*
* Timeout = 5s, we have to wait for the watchdog reset to
* actually take place: the watchdog will be reloaded with the
* default value immediately, so we HAVE to reboot and get back
* into the kernel in 30s, or the device will reboot again!
* The boot loader will typically deactivate the watchdog, so we
* need time enough for the boot loader to get to the point of
* deactivating the watchdog before it is shut down by it.
*
* NOTE: on future versions of the watchdog, this restriction is
* gone: the watchdog will be reloaded with a default value (1 min)
* instead of last value, and you can conveniently set the watchdog
* timeout to 10ms (value = 1) without any problems.
*/
coh901327_enable(500);
/* Return and await doom */
}
static struct platform_driver coh901327_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "coh901327_wdog",
},
.remove = __exit_p(coh901327_remove),
.suspend = coh901327_suspend,
.resume = coh901327_resume,
};
static int __init coh901327_init(void)
{
return platform_driver_probe(&coh901327_driver, coh901327_probe);
}
module_init(coh901327_init);
static void __exit coh901327_exit(void)
{
platform_driver_unregister(&coh901327_driver);
}
module_exit(coh901327_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
MODULE_DESCRIPTION("COH 901 327 Watchdog");
module_param(margin, int, 0);
MODULE_PARM_DESC(margin, "Watchdog margin in seconds (default 60s)");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
hb72k/android_kernel_huawei_msm8916 | drivers/leds/trigger/ledtrig-camera.c | 4547 | 1375 | /*
* Camera Flash and Torch On/Off Trigger
*
* based on ledtrig-ide-disk.c
*
* Copyright 2013 Texas Instruments
*
* Author: Milo(Woogyom) Kim <milo.kim@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/leds.h>
DEFINE_LED_TRIGGER(ledtrig_flash);
DEFINE_LED_TRIGGER(ledtrig_torch);
void ledtrig_flash_ctrl(bool on)
{
enum led_brightness brt = on ? LED_FULL : LED_OFF;
led_trigger_event(ledtrig_flash, brt);
}
EXPORT_SYMBOL_GPL(ledtrig_flash_ctrl);
void ledtrig_torch_ctrl(bool on)
{
enum led_brightness brt = on ? LED_FULL : LED_OFF;
led_trigger_event(ledtrig_torch, brt);
}
EXPORT_SYMBOL_GPL(ledtrig_torch_ctrl);
static int __init ledtrig_camera_init(void)
{
led_trigger_register_simple("flash", &ledtrig_flash);
led_trigger_register_simple("torch", &ledtrig_torch);
return 0;
}
module_init(ledtrig_camera_init);
static void __exit ledtrig_camera_exit(void)
{
led_trigger_unregister_simple(ledtrig_torch);
led_trigger_unregister_simple(ledtrig_flash);
}
module_exit(ledtrig_camera_exit);
MODULE_DESCRIPTION("LED Trigger for Camera Flash/Torch Control");
MODULE_AUTHOR("Milo Kim");
MODULE_LICENSE("GPL");
| gpl-2.0 |
friedrich420/S4-AEL-LP-FOR-LINUX-TESTING | fs/btrfs/ordered-data.c | 4803 | 26643 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include "ctree.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "extent_io.h"
static u64 entry_end(struct btrfs_ordered_extent *entry)
{
if (entry->file_offset + entry->len < entry->file_offset)
return (u64)-1;
return entry->file_offset + entry->len;
}
/* returns NULL if the insertion worked, or it returns the node it did find
* in the tree
*/
static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset,
struct rb_node *node)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct btrfs_ordered_extent *entry;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node);
if (file_offset < entry->file_offset)
p = &(*p)->rb_left;
else if (file_offset >= entry_end(entry))
p = &(*p)->rb_right;
else
return parent;
}
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
static void ordered_data_tree_panic(struct inode *inode, int errno,
u64 offset)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
btrfs_panic(fs_info, errno, "Inconsistency in ordered tree at offset "
"%llu\n", (unsigned long long)offset);
}
/*
* look for a given offset in the tree, and if it can't be found return the
* first lesser offset
*/
static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
struct rb_node **prev_ret)
{
struct rb_node *n = root->rb_node;
struct rb_node *prev = NULL;
struct rb_node *test;
struct btrfs_ordered_extent *entry;
struct btrfs_ordered_extent *prev_entry = NULL;
while (n) {
entry = rb_entry(n, struct btrfs_ordered_extent, rb_node);
prev = n;
prev_entry = entry;
if (file_offset < entry->file_offset)
n = n->rb_left;
else if (file_offset >= entry_end(entry))
n = n->rb_right;
else
return n;
}
if (!prev_ret)
return NULL;
while (prev && file_offset >= entry_end(prev_entry)) {
test = rb_next(prev);
if (!test)
break;
prev_entry = rb_entry(test, struct btrfs_ordered_extent,
rb_node);
if (file_offset < entry_end(prev_entry))
break;
prev = test;
}
if (prev)
prev_entry = rb_entry(prev, struct btrfs_ordered_extent,
rb_node);
while (prev && file_offset < entry_end(prev_entry)) {
test = rb_prev(prev);
if (!test)
break;
prev_entry = rb_entry(test, struct btrfs_ordered_extent,
rb_node);
prev = test;
}
*prev_ret = prev;
return NULL;
}
/*
* helper to check if a given offset is inside a given entry
*/
static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset)
{
if (file_offset < entry->file_offset ||
entry->file_offset + entry->len <= file_offset)
return 0;
return 1;
}
static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
u64 len)
{
if (file_offset + len <= entry->file_offset ||
entry->file_offset + entry->len <= file_offset)
return 0;
return 1;
}
/*
* look find the first ordered struct that has this offset, otherwise
* the first one less than this offset
*/
static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
u64 file_offset)
{
struct rb_root *root = &tree->tree;
struct rb_node *prev = NULL;
struct rb_node *ret;
struct btrfs_ordered_extent *entry;
if (tree->last) {
entry = rb_entry(tree->last, struct btrfs_ordered_extent,
rb_node);
if (offset_in_entry(entry, file_offset))
return tree->last;
}
ret = __tree_search(root, file_offset, &prev);
if (!ret)
ret = prev;
if (ret)
tree->last = ret;
return ret;
}
/* allocate and add a new ordered_extent into the per-inode tree.
* file_offset is the logical offset in the file
*
* start is the disk block number of an extent already reserved in the
* extent allocation tree
*
* len is the length of the extent
*
* The tree is given a single reference on the ordered extent that was
* inserted.
*/
static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len,
int type, int dio, int compress_type)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry;
tree = &BTRFS_I(inode)->ordered_tree;
entry = kzalloc(sizeof(*entry), GFP_NOFS);
if (!entry)
return -ENOMEM;
entry->file_offset = file_offset;
entry->start = start;
entry->len = len;
entry->disk_len = disk_len;
entry->bytes_left = len;
entry->inode = inode;
entry->compress_type = compress_type;
if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
set_bit(type, &entry->flags);
if (dio)
set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
/* one ref for the tree */
atomic_set(&entry->refs, 1);
init_waitqueue_head(&entry->wait);
INIT_LIST_HEAD(&entry->list);
INIT_LIST_HEAD(&entry->root_extent_list);
trace_btrfs_ordered_extent_add(inode, entry);
spin_lock(&tree->lock);
node = tree_insert(&tree->tree, file_offset,
&entry->rb_node);
if (node)
ordered_data_tree_panic(inode, -EEXIST, file_offset);
spin_unlock(&tree->lock);
spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
list_add_tail(&entry->root_extent_list,
&BTRFS_I(inode)->root->fs_info->ordered_extents);
spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
return 0;
}
int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len, int type)
{
return __btrfs_add_ordered_extent(inode, file_offset, start, len,
disk_len, type, 0,
BTRFS_COMPRESS_NONE);
}
int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len, int type)
{
return __btrfs_add_ordered_extent(inode, file_offset, start, len,
disk_len, type, 1,
BTRFS_COMPRESS_NONE);
}
int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len,
int type, int compress_type)
{
return __btrfs_add_ordered_extent(inode, file_offset, start, len,
disk_len, type, 0,
compress_type);
}
/*
* Add a struct btrfs_ordered_sum into the list of checksums to be inserted
* when an ordered extent is finished. If the list covers more than one
* ordered extent, it is split across multiples.
*/
void btrfs_add_ordered_sum(struct inode *inode,
struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum)
{
struct btrfs_ordered_inode_tree *tree;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock(&tree->lock);
list_add_tail(&sum->list, &entry->list);
spin_unlock(&tree->lock);
}
/*
* this is used to account for finished IO across a given range
* of the file. The IO may span ordered extents. If
* a given ordered_extent is completely done, 1 is returned, otherwise
* 0.
*
* test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
* to make sure this function only returns 1 once for a given ordered extent.
*
* file_offset is updated to one byte past the range that is recorded as
* complete. This allows you to walk forward in the file.
*/
int btrfs_dec_test_first_ordered_pending(struct inode *inode,
struct btrfs_ordered_extent **cached,
u64 *file_offset, u64 io_size)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
int ret;
u64 dec_end;
u64 dec_start;
u64 to_dec;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock(&tree->lock);
node = tree_search(tree, *file_offset);
if (!node) {
ret = 1;
goto out;
}
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (!offset_in_entry(entry, *file_offset)) {
ret = 1;
goto out;
}
dec_start = max(*file_offset, entry->file_offset);
dec_end = min(*file_offset + io_size, entry->file_offset +
entry->len);
*file_offset = dec_end;
if (dec_start > dec_end) {
printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
(unsigned long long)dec_start,
(unsigned long long)dec_end);
}
to_dec = dec_end - dec_start;
if (to_dec > entry->bytes_left) {
printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
(unsigned long long)entry->bytes_left,
(unsigned long long)to_dec);
}
entry->bytes_left -= to_dec;
if (entry->bytes_left == 0)
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
else
ret = 1;
out:
if (!ret && cached && entry) {
*cached = entry;
atomic_inc(&entry->refs);
}
spin_unlock(&tree->lock);
return ret == 0;
}
/*
* this is used to account for finished IO across a given range
* of the file. The IO should not span ordered extents. If
* a given ordered_extent is completely done, 1 is returned, otherwise
* 0.
*
* test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
* to make sure this function only returns 1 once for a given ordered extent.
*/
int btrfs_dec_test_ordered_pending(struct inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
int ret;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock(&tree->lock);
node = tree_search(tree, file_offset);
if (!node) {
ret = 1;
goto out;
}
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (!offset_in_entry(entry, file_offset)) {
ret = 1;
goto out;
}
if (io_size > entry->bytes_left) {
printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
(unsigned long long)entry->bytes_left,
(unsigned long long)io_size);
}
entry->bytes_left -= io_size;
if (entry->bytes_left == 0)
ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
else
ret = 1;
out:
if (!ret && cached && entry) {
*cached = entry;
atomic_inc(&entry->refs);
}
spin_unlock(&tree->lock);
return ret == 0;
}
/*
* used to drop a reference on an ordered extent. This will free
* the extent if the last reference is dropped
*/
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
{
struct list_head *cur;
struct btrfs_ordered_sum *sum;
trace_btrfs_ordered_extent_put(entry->inode, entry);
if (atomic_dec_and_test(&entry->refs)) {
while (!list_empty(&entry->list)) {
cur = entry->list.next;
sum = list_entry(cur, struct btrfs_ordered_sum, list);
list_del(&sum->list);
kfree(sum);
}
kfree(entry);
}
}
/*
* remove an ordered extent from the tree. No references are dropped
* and you must wake_up entry->wait. You must hold the tree lock
* while you call this function.
*/
static void __btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry)
{
struct btrfs_ordered_inode_tree *tree;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct rb_node *node;
tree = &BTRFS_I(inode)->ordered_tree;
node = &entry->rb_node;
rb_erase(node, &tree->tree);
tree->last = NULL;
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
spin_lock(&root->fs_info->ordered_extent_lock);
list_del_init(&entry->root_extent_list);
trace_btrfs_ordered_extent_remove(inode, entry);
/*
* we have no more ordered extents for this inode and
* no dirty pages. We can safely remove it from the
* list of ordered extents
*/
if (RB_EMPTY_ROOT(&tree->tree) &&
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
list_del_init(&BTRFS_I(inode)->ordered_operations);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
}
/*
* remove an ordered extent from the tree. No references are dropped
* but any waiters are woken.
*/
void btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry)
{
struct btrfs_ordered_inode_tree *tree;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock(&tree->lock);
__btrfs_remove_ordered_extent(inode, entry);
spin_unlock(&tree->lock);
wake_up(&entry->wait);
}
/*
* wait for all the ordered extents in a root. This is done when balancing
* space between drives.
*/
void btrfs_wait_ordered_extents(struct btrfs_root *root,
int nocow_only, int delay_iput)
{
struct list_head splice;
struct list_head *cur;
struct btrfs_ordered_extent *ordered;
struct inode *inode;
INIT_LIST_HEAD(&splice);
spin_lock(&root->fs_info->ordered_extent_lock);
list_splice_init(&root->fs_info->ordered_extents, &splice);
while (!list_empty(&splice)) {
cur = splice.next;
ordered = list_entry(cur, struct btrfs_ordered_extent,
root_extent_list);
if (nocow_only &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
list_move(&ordered->root_extent_list,
&root->fs_info->ordered_extents);
cond_resched_lock(&root->fs_info->ordered_extent_lock);
continue;
}
list_del_init(&ordered->root_extent_list);
atomic_inc(&ordered->refs);
/*
* the inode may be getting freed (in sys_unlink path).
*/
inode = igrab(ordered->inode);
spin_unlock(&root->fs_info->ordered_extent_lock);
if (inode) {
btrfs_start_ordered_extent(inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
if (delay_iput)
btrfs_add_delayed_iput(inode);
else
iput(inode);
} else {
btrfs_put_ordered_extent(ordered);
}
spin_lock(&root->fs_info->ordered_extent_lock);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
}
/*
* this is used during transaction commit to write all the inodes
* added to the ordered operation list. These files must be fully on
* disk before the transaction commits.
*
* we have two modes here, one is to just start the IO via filemap_flush
* and the other is to wait for all the io. When we wait, we have an
* extra check to make sure the ordered operation list really is empty
* before we return
*/
void btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
{
struct btrfs_inode *btrfs_inode;
struct inode *inode;
struct list_head splice;
INIT_LIST_HEAD(&splice);
mutex_lock(&root->fs_info->ordered_operations_mutex);
spin_lock(&root->fs_info->ordered_extent_lock);
again:
list_splice_init(&root->fs_info->ordered_operations, &splice);
while (!list_empty(&splice)) {
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
ordered_operations);
inode = &btrfs_inode->vfs_inode;
list_del_init(&btrfs_inode->ordered_operations);
/*
* the inode may be getting freed (in sys_unlink path).
*/
inode = igrab(inode);
if (!wait && inode) {
list_add_tail(&BTRFS_I(inode)->ordered_operations,
&root->fs_info->ordered_operations);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
if (inode) {
if (wait)
btrfs_wait_ordered_range(inode, 0, (u64)-1);
else
filemap_flush(inode->i_mapping);
btrfs_add_delayed_iput(inode);
}
cond_resched();
spin_lock(&root->fs_info->ordered_extent_lock);
}
if (wait && !list_empty(&root->fs_info->ordered_operations))
goto again;
spin_unlock(&root->fs_info->ordered_extent_lock);
mutex_unlock(&root->fs_info->ordered_operations_mutex);
}
/*
* Used to start IO or wait for a given ordered extent to finish.
*
* If wait is one, this effectively waits on page writeback for all the pages
* in the extent, and it waits on the io completion code to insert
* metadata into the btree corresponding to the extent
*/
void btrfs_start_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry,
int wait)
{
u64 start = entry->file_offset;
u64 end = start + entry->len - 1;
trace_btrfs_ordered_extent_start(inode, entry);
/*
* pages in the range can be dirty, clean or writeback. We
* start IO on any dirty ones so the wait doesn't stall waiting
* for pdflush to find them
*/
if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
filemap_fdatawrite_range(inode->i_mapping, start, end);
if (wait) {
wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE,
&entry->flags));
}
}
/*
* Used to wait on ordered extents across a large range of bytes.
*/
void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
{
u64 end;
u64 orig_end;
struct btrfs_ordered_extent *ordered;
int found;
if (start + len < start) {
orig_end = INT_LIMIT(loff_t);
} else {
orig_end = start + len - 1;
if (orig_end > INT_LIMIT(loff_t))
orig_end = INT_LIMIT(loff_t);
}
again:
/* start IO across the range first to instantiate any delalloc
* extents
*/
filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
/* The compression code will leave pages locked but return from
* writepage without setting the page writeback. Starting again
* with WB_SYNC_ALL will end up waiting for the IO to actually start.
*/
filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
filemap_fdatawait_range(inode->i_mapping, start, orig_end);
end = orig_end;
found = 0;
while (1) {
ordered = btrfs_lookup_first_ordered_extent(inode, end);
if (!ordered)
break;
if (ordered->file_offset > orig_end) {
btrfs_put_ordered_extent(ordered);
break;
}
if (ordered->file_offset + ordered->len < start) {
btrfs_put_ordered_extent(ordered);
break;
}
found++;
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
btrfs_put_ordered_extent(ordered);
if (end == 0 || end == start)
break;
end--;
}
if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
EXTENT_DELALLOC, 0, NULL)) {
schedule_timeout(1);
goto again;
}
}
/*
* find an ordered extent corresponding to file_offset. return NULL if
* nothing is found, otherwise take a reference on the extent and return it
*/
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
u64 file_offset)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock(&tree->lock);
node = tree_search(tree, file_offset);
if (!node)
goto out;
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (!offset_in_entry(entry, file_offset))
entry = NULL;
if (entry)
atomic_inc(&entry->refs);
out:
spin_unlock(&tree->lock);
return entry;
}
/* Since the DIO code tries to lock a wide area we need to look for any ordered
* extents that exist in the range, rather than just the start of the range.
*/
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
u64 file_offset,
u64 len)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock(&tree->lock);
node = tree_search(tree, file_offset);
if (!node) {
node = tree_search(tree, file_offset + len);
if (!node)
goto out;
}
while (1) {
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (range_overlaps(entry, file_offset, len))
break;
if (entry->file_offset >= file_offset + len) {
entry = NULL;
break;
}
entry = NULL;
node = rb_next(node);
if (!node)
break;
}
out:
if (entry)
atomic_inc(&entry->refs);
spin_unlock(&tree->lock);
return entry;
}
/*
* lookup and return any extent before 'file_offset'. NULL is returned
* if none is found
*/
struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
{
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock(&tree->lock);
node = tree_search(tree, file_offset);
if (!node)
goto out;
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
atomic_inc(&entry->refs);
out:
spin_unlock(&tree->lock);
return entry;
}
/*
* After an extent is done, call this to conditionally update the on disk
* i_size. i_size is updated to cover any fully written part of the file.
*/
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered)
{
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
u64 disk_i_size;
u64 new_i_size;
u64 i_size_test;
u64 i_size = i_size_read(inode);
struct rb_node *node;
struct rb_node *prev = NULL;
struct btrfs_ordered_extent *test;
int ret = 1;
if (ordered)
offset = entry_end(ordered);
else
offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
spin_lock(&tree->lock);
disk_i_size = BTRFS_I(inode)->disk_i_size;
/* truncate file */
if (disk_i_size > i_size) {
BTRFS_I(inode)->disk_i_size = i_size;
ret = 0;
goto out;
}
/*
* if the disk i_size is already at the inode->i_size, or
* this ordered extent is inside the disk i_size, we're done
*/
if (disk_i_size == i_size || offset <= disk_i_size) {
goto out;
}
/*
* we can't update the disk_isize if there are delalloc bytes
* between disk_i_size and this ordered extent
*/
if (test_range_bit(io_tree, disk_i_size, offset - 1,
EXTENT_DELALLOC, 0, NULL)) {
goto out;
}
/*
* walk backward from this ordered extent to disk_i_size.
* if we find an ordered extent then we can't update disk i_size
* yet
*/
if (ordered) {
node = rb_prev(&ordered->rb_node);
} else {
prev = tree_search(tree, offset);
/*
* we insert file extents without involving ordered struct,
* so there should be no ordered struct cover this offset
*/
if (prev) {
test = rb_entry(prev, struct btrfs_ordered_extent,
rb_node);
BUG_ON(offset_in_entry(test, offset));
}
node = prev;
}
while (node) {
test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (test->file_offset + test->len <= disk_i_size)
break;
if (test->file_offset >= i_size)
break;
if (test->file_offset >= disk_i_size)
goto out;
node = rb_prev(node);
}
new_i_size = min_t(u64, offset, i_size);
/*
* at this point, we know we can safely update i_size to at least
* the offset from this ordered extent. But, we need to
* walk forward and see if ios from higher up in the file have
* finished.
*/
if (ordered) {
node = rb_next(&ordered->rb_node);
} else {
if (prev)
node = rb_next(prev);
else
node = rb_first(&tree->tree);
}
i_size_test = 0;
if (node) {
/*
* do we have an area where IO might have finished
* between our ordered extent and the next one.
*/
test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (test->file_offset > offset)
i_size_test = test->file_offset;
} else {
i_size_test = i_size;
}
/*
* i_size_test is the end of a region after this ordered
* extent where there are no ordered extents. As long as there
* are no delalloc bytes in this area, it is safe to update
* disk_i_size to the end of the region.
*/
if (i_size_test > offset &&
!test_range_bit(io_tree, offset, i_size_test - 1,
EXTENT_DELALLOC, 0, NULL)) {
new_i_size = min_t(u64, i_size_test, i_size);
}
BTRFS_I(inode)->disk_i_size = new_i_size;
ret = 0;
out:
/*
* we need to remove the ordered extent with the tree lock held
* so that other people calling this function don't find our fully
* processed ordered entry and skip updating the i_size
*/
if (ordered)
__btrfs_remove_ordered_extent(inode, ordered);
spin_unlock(&tree->lock);
if (ordered)
wake_up(&ordered->wait);
return ret;
}
/*
* search the ordered extents for one corresponding to 'offset' and
* try to find a checksum. This is used because we allow pages to
* be reclaimed before their checksum is actually put into the btree
*/
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
u32 *sum)
{
struct btrfs_ordered_sum *ordered_sum;
struct btrfs_sector_sum *sector_sums;
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
unsigned long num_sectors;
unsigned long i;
u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
int ret = 1;
ordered = btrfs_lookup_ordered_extent(inode, offset);
if (!ordered)
return 1;
spin_lock(&tree->lock);
list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
if (disk_bytenr >= ordered_sum->bytenr) {
num_sectors = ordered_sum->len / sectorsize;
sector_sums = ordered_sum->sums;
for (i = 0; i < num_sectors; i++) {
if (sector_sums[i].bytenr == disk_bytenr) {
*sum = sector_sums[i].sum;
ret = 0;
goto out;
}
}
}
}
out:
spin_unlock(&tree->lock);
btrfs_put_ordered_extent(ordered);
return ret;
}
/*
* add a given inode to the list of inodes that must be fully on
* disk before a transaction commit finishes.
*
* This basically gives us the ext3 style data=ordered mode, and it is mostly
* used to make sure renamed files are fully on disk.
*
* It is a noop if the inode is already fully on disk.
*
* If trans is not null, we'll do a friendly check for a transaction that
* is already flushing things and force the IO down ourselves.
*/
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
u64 last_mod;
last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
/*
* if this file hasn't been changed since the last transaction
* commit, we can safely return without doing anything
*/
if (last_mod < root->fs_info->last_trans_committed)
return;
/*
* the transaction is already committing. Just start the IO and
* don't bother with all of this list nonsense
*/
if (trans && root->fs_info->running_transaction->blocked) {
btrfs_wait_ordered_range(inode, 0, (u64)-1);
return;
}
spin_lock(&root->fs_info->ordered_extent_lock);
if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
list_add_tail(&BTRFS_I(inode)->ordered_operations,
&root->fs_info->ordered_operations);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
}
| gpl-2.0 |
revjunkie/i9505 | drivers/scsi/bfa/bfa_fcs_lport.c | 4803 | 153698 | /*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
#include "bfa_fc.h"
BFA_TRC_FILE(FCS, PORT);
static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs, u8 reason_code,
u8 reason_code_expl);
static void bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs, struct fc_logi_s *plogi);
static void bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs,
struct fc_echo_s *echo, u16 len);
static void bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs,
struct fc_rnid_cmd_s *rnid, u16 len);
static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
struct fc_rnid_general_topology_data_s *gen_topo_data);
static void bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port);
static void bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port);
static struct {
void (*init) (struct bfa_fcs_lport_s *port);
void (*online) (struct bfa_fcs_lport_s *port);
void (*offline) (struct bfa_fcs_lport_s *port);
} __port_action[] = {
{
bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online,
bfa_fcs_lport_unknown_offline}, {
bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online,
bfa_fcs_lport_fab_offline}, {
bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online,
bfa_fcs_lport_n2n_offline},
};
/*
* fcs_port_sm FCS logical port state machine
*/
enum bfa_fcs_lport_event {
BFA_FCS_PORT_SM_CREATE = 1,
BFA_FCS_PORT_SM_ONLINE = 2,
BFA_FCS_PORT_SM_OFFLINE = 3,
BFA_FCS_PORT_SM_DELETE = 4,
BFA_FCS_PORT_SM_DELRPORT = 5,
BFA_FCS_PORT_SM_STOP = 6,
};
static void bfa_fcs_lport_sm_uninit(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
static void bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
static void bfa_fcs_lport_sm_online(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
static void bfa_fcs_lport_sm_offline(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
static void bfa_fcs_lport_sm_deleting(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
static void bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event);
static void
bfa_fcs_lport_sm_uninit(
struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event)
{
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case BFA_FCS_PORT_SM_CREATE:
bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_sm_init(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event)
{
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case BFA_FCS_PORT_SM_ONLINE:
bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
bfa_fcs_lport_online_actions(port);
break;
case BFA_FCS_PORT_SM_DELETE:
bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
bfa_fcs_lport_deleted(port);
break;
case BFA_FCS_PORT_SM_STOP:
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
break;
case BFA_FCS_PORT_SM_OFFLINE:
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_sm_online(
struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event)
{
struct bfa_fcs_rport_s *rport;
struct list_head *qe, *qen;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case BFA_FCS_PORT_SM_OFFLINE:
bfa_sm_set_state(port, bfa_fcs_lport_sm_offline);
bfa_fcs_lport_offline_actions(port);
break;
case BFA_FCS_PORT_SM_STOP:
__port_action[port->fabric->fab_type].offline(port);
if (port->num_rports == 0) {
bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
} else {
bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
list_for_each_safe(qe, qen, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
}
}
break;
case BFA_FCS_PORT_SM_DELETE:
__port_action[port->fabric->fab_type].offline(port);
if (port->num_rports == 0) {
bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
bfa_fcs_lport_deleted(port);
} else {
bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
list_for_each_safe(qe, qen, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
}
}
break;
case BFA_FCS_PORT_SM_DELRPORT:
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_sm_offline(
struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event)
{
struct bfa_fcs_rport_s *rport;
struct list_head *qe, *qen;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case BFA_FCS_PORT_SM_ONLINE:
bfa_sm_set_state(port, bfa_fcs_lport_sm_online);
bfa_fcs_lport_online_actions(port);
break;
case BFA_FCS_PORT_SM_STOP:
if (port->num_rports == 0) {
bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
} else {
bfa_sm_set_state(port, bfa_fcs_lport_sm_stopping);
list_for_each_safe(qe, qen, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
}
}
break;
case BFA_FCS_PORT_SM_DELETE:
if (port->num_rports == 0) {
bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
bfa_fcs_lport_deleted(port);
} else {
bfa_sm_set_state(port, bfa_fcs_lport_sm_deleting);
list_for_each_safe(qe, qen, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
}
}
break;
case BFA_FCS_PORT_SM_DELRPORT:
case BFA_FCS_PORT_SM_OFFLINE:
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_sm_stopping(struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event)
{
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case BFA_FCS_PORT_SM_DELRPORT:
if (port->num_rports == 0) {
bfa_sm_set_state(port, bfa_fcs_lport_sm_init);
/* If vport - send completion call back */
if (port->vport)
bfa_fcs_vport_stop_comp(port->vport);
}
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_sm_deleting(
struct bfa_fcs_lport_s *port,
enum bfa_fcs_lport_event event)
{
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case BFA_FCS_PORT_SM_DELRPORT:
if (port->num_rports == 0) {
bfa_sm_set_state(port, bfa_fcs_lport_sm_uninit);
bfa_fcs_lport_deleted(port);
}
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
/*
* fcs_port_pvt
*/
/*
* Send AEN notification
*/
static void
bfa_fcs_lport_aen_post(struct bfa_fcs_lport_s *port,
enum bfa_lport_aen_event event)
{
struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
struct bfa_aen_entry_s *aen_entry;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
aen_entry->aen_data.lport.roles = port->port_cfg.roles;
aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
bfa_fcs_get_base_port(port->fcs));
aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
BFA_AEN_CAT_LPORT, event);
}
/*
* Send a LS reject
*/
static void
bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
u8 reason_code, u8 reason_code_expl)
{
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
struct bfa_rport_s *bfa_rport = NULL;
int len;
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id, reason_code, reason_code_expl);
bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
FC_MAX_PDUSZ, 0);
}
/*
* Send a FCCT Reject
*/
static void
bfa_fcs_lport_send_fcgs_rjt(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl)
{
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
struct bfa_rport_s *bfa_rport = NULL;
int len;
struct ct_hdr_s *rx_cthdr = (struct ct_hdr_s *)(rx_fchs + 1);
struct ct_hdr_s *ct_hdr;
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
ct_hdr = bfa_fcxp_get_reqbuf(fcxp);
ct_hdr->gs_type = rx_cthdr->gs_type;
ct_hdr->gs_sub_type = rx_cthdr->gs_sub_type;
len = fc_gs_rjt_build(&fchs, ct_hdr, rx_fchs->s_id,
bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id, reason_code, reason_code_expl);
bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
FC_MAX_PDUSZ, 0);
}
/*
* Process incoming plogi from a remote port.
*/
static void
bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs, struct fc_logi_s *plogi)
{
struct bfa_fcs_rport_s *rport;
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
/*
* If min cfg mode is enabled, drop any incoming PLOGIs
*/
if (__fcs_min_cfg(port->fcs)) {
bfa_trc(port->fcs, rx_fchs->s_id);
return;
}
if (fc_plogi_parse(rx_fchs) != FC_PARSE_OK) {
bfa_trc(port->fcs, rx_fchs->s_id);
/*
* send a LS reject
*/
bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
FC_LS_RJT_RSN_PROTOCOL_ERROR,
FC_LS_RJT_EXP_SPARMS_ERR_OPTIONS);
return;
}
/*
* Direct Attach P2P mode : verify address assigned by the r-port.
*/
if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
(memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
(void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
if (BFA_FCS_PID_IS_WKA(rx_fchs->d_id)) {
/* Address assigned to us cannot be a WKA */
bfa_fcs_lport_send_ls_rjt(port, rx_fchs,
FC_LS_RJT_RSN_PROTOCOL_ERROR,
FC_LS_RJT_EXP_INVALID_NPORT_ID);
return;
}
port->pid = rx_fchs->d_id;
bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
}
/*
* First, check if we know the device by pwwn.
*/
rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
if (rport) {
/*
* Direct Attach P2P mode : handle address assigned by r-port.
*/
if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
(memcmp((void *)&bfa_fcs_lport_get_pwwn(port),
(void *)&plogi->port_name, sizeof(wwn_t)) < 0)) {
port->pid = rx_fchs->d_id;
bfa_lps_set_n2n_pid(port->fabric->lps, rx_fchs->d_id);
rport->pid = rx_fchs->s_id;
}
bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
return;
}
/*
* Next, lookup rport by PID.
*/
rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
if (!rport) {
/*
* Inbound PLOGI from a new device.
*/
bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
return;
}
/*
* Rport is known only by PID.
*/
if (rport->pwwn) {
/*
* This is a different device with the same pid. Old device
* disappeared. Send implicit LOGO to old device.
*/
WARN_ON(rport->pwwn == plogi->port_name);
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
/*
* Inbound PLOGI from a new device (with old PID).
*/
bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
return;
}
/*
* PLOGI crossing each other.
*/
WARN_ON(rport->pwwn != WWN_NULL);
bfa_fcs_rport_plogi(rport, rx_fchs, plogi);
}
/*
* Process incoming ECHO.
* Since it does not require a login, it is processed here.
*/
static void
bfa_fcs_lport_echo(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
struct fc_echo_s *echo, u16 rx_len)
{
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
struct bfa_rport_s *bfa_rport = NULL;
int len, pyld_len;
bfa_trc(port->fcs, rx_fchs->s_id);
bfa_trc(port->fcs, rx_fchs->d_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id);
/*
* Copy the payload (if any) from the echo frame
*/
pyld_len = rx_len - sizeof(struct fchs_s);
bfa_trc(port->fcs, rx_len);
bfa_trc(port->fcs, pyld_len);
if (pyld_len > len)
memcpy(((u8 *) bfa_fcxp_get_reqbuf(fcxp)) +
sizeof(struct fc_echo_s), (echo + 1),
(pyld_len - sizeof(struct fc_echo_s)));
bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
BFA_FALSE, FC_CLASS_3, pyld_len, &fchs, NULL, NULL,
FC_MAX_PDUSZ, 0);
}
/*
* Process incoming RNID.
* Since it does not require a login, it is processed here.
*/
static void
bfa_fcs_lport_rnid(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
struct fc_rnid_cmd_s *rnid, u16 rx_len)
{
struct fc_rnid_common_id_data_s common_id_data;
struct fc_rnid_general_topology_data_s gen_topo_data;
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
struct bfa_rport_s *bfa_rport = NULL;
u16 len;
u32 data_format;
bfa_trc(port->fcs, rx_fchs->s_id);
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_len);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
/*
* Check Node Indentification Data Format
* We only support General Topology Discovery Format.
* For any other requested Data Formats, we return Common Node Id Data
* only, as per FC-LS.
*/
bfa_trc(port->fcs, rnid->node_id_data_format);
if (rnid->node_id_data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
data_format = RNID_NODEID_DATA_FORMAT_DISCOVERY;
/*
* Get General topology data for this port
*/
bfa_fs_port_get_gen_topo_data(port, &gen_topo_data);
} else {
data_format = RNID_NODEID_DATA_FORMAT_COMMON;
}
/*
* Copy the Node Id Info
*/
common_id_data.port_name = bfa_fcs_lport_get_pwwn(port);
common_id_data.node_name = bfa_fcs_lport_get_nwwn(port);
len = fc_rnid_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id, data_format, &common_id_data,
&gen_topo_data);
bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
FC_MAX_PDUSZ, 0);
}
/*
* Fill out General Topolpgy Discovery Data for RNID ELS.
*/
static void
bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
struct fc_rnid_general_topology_data_s *gen_topo_data)
{
memset(gen_topo_data, 0,
sizeof(struct fc_rnid_general_topology_data_s));
gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST);
gen_topo_data->phy_port_num = 0; /* @todo */
gen_topo_data->num_attached_nodes = cpu_to_be32(1);
}
static void
bfa_fcs_lport_online_actions(struct bfa_fcs_lport_s *port)
{
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
bfa_trc(port->fcs, port->fabric->oper_type);
__port_action[port->fabric->fab_type].init(port);
__port_action[port->fabric->fab_type].online(port);
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Logical port online: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_ONLINE);
bfad->bfad_flags |= BFAD_PORT_ONLINE;
}
static void
bfa_fcs_lport_offline_actions(struct bfa_fcs_lport_s *port)
{
struct list_head *qe, *qen;
struct bfa_fcs_rport_s *rport;
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
bfa_trc(port->fcs, port->fabric->oper_type);
__port_action[port->fabric->fab_type].offline(port);
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
if (bfa_sm_cmp_state(port->fabric,
bfa_fcs_fabric_sm_online) == BFA_TRUE) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Logical port lost fabric connectivity: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DISCONNECT);
} else {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Logical port taken offline: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_OFFLINE);
}
list_for_each_safe(qe, qen, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
}
}
static void
bfa_fcs_lport_unknown_init(struct bfa_fcs_lport_s *port)
{
WARN_ON(1);
}
static void
bfa_fcs_lport_unknown_online(struct bfa_fcs_lport_s *port)
{
WARN_ON(1);
}
static void
bfa_fcs_lport_unknown_offline(struct bfa_fcs_lport_s *port)
{
WARN_ON(1);
}
static void
bfa_fcs_lport_abts_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs)
{
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
int len;
bfa_trc(port->fcs, rx_fchs->d_id);
bfa_trc(port->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
len = fc_ba_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id, 0);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
FC_MAX_PDUSZ, 0);
}
static void
bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port)
{
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Logical port deleted: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
bfa_fcs_lport_aen_post(port, BFA_LPORT_AEN_DELETE);
/* Base port will be deleted by the OS driver */
if (port->vport) {
bfa_fcb_lport_delete(port->fcs->bfad, port->port_cfg.roles,
port->fabric->vf_drv,
port->vport ? port->vport->vport_drv : NULL);
bfa_fcs_vport_delete_comp(port->vport);
} else {
bfa_wc_down(&port->fabric->wc);
}
}
/*
* Unsolicited frame receive handling.
*/
void
bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,
struct fchs_s *fchs, u16 len)
{
u32 pid = fchs->s_id;
struct bfa_fcs_rport_s *rport = NULL;
struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
bfa_stats(lport, uf_recvs);
bfa_trc(lport->fcs, fchs->type);
if (!bfa_fcs_lport_is_online(lport)) {
bfa_stats(lport, uf_recv_drops);
return;
}
/*
* First, handle ELSs that donot require a login.
*/
/*
* Handle PLOGI first
*/
if ((fchs->type == FC_TYPE_ELS) &&
(els_cmd->els_code == FC_ELS_PLOGI)) {
bfa_fcs_lport_plogi(lport, fchs, (struct fc_logi_s *) els_cmd);
return;
}
/*
* Handle ECHO separately.
*/
if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_ECHO)) {
bfa_fcs_lport_echo(lport, fchs,
(struct fc_echo_s *)els_cmd, len);
return;
}
/*
* Handle RNID separately.
*/
if ((fchs->type == FC_TYPE_ELS) && (els_cmd->els_code == FC_ELS_RNID)) {
bfa_fcs_lport_rnid(lport, fchs,
(struct fc_rnid_cmd_s *) els_cmd, len);
return;
}
if (fchs->type == FC_TYPE_BLS) {
if ((fchs->routing == FC_RTG_BASIC_LINK) &&
(fchs->cat_info == FC_CAT_ABTS))
bfa_fcs_lport_abts_acc(lport, fchs);
return;
}
if (fchs->type == FC_TYPE_SERVICES) {
/*
* Unhandled FC-GS frames. Send a FC-CT Reject
*/
bfa_fcs_lport_send_fcgs_rjt(lport, fchs, CT_RSN_NOT_SUPP,
CT_NS_EXP_NOADDITIONAL);
return;
}
/*
* look for a matching remote port ID
*/
rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
if (rport) {
bfa_trc(rport->fcs, fchs->s_id);
bfa_trc(rport->fcs, fchs->d_id);
bfa_trc(rport->fcs, fchs->type);
bfa_fcs_rport_uf_recv(rport, fchs, len);
return;
}
/*
* Only handles ELS frames for now.
*/
if (fchs->type != FC_TYPE_ELS) {
bfa_trc(lport->fcs, fchs->s_id);
bfa_trc(lport->fcs, fchs->d_id);
/* ignore type FC_TYPE_FC_FSS */
if (fchs->type != FC_TYPE_FC_FSS)
bfa_sm_fault(lport->fcs, fchs->type);
return;
}
bfa_trc(lport->fcs, els_cmd->els_code);
if (els_cmd->els_code == FC_ELS_RSCN) {
bfa_fcs_lport_scn_process_rscn(lport, fchs, len);
return;
}
if (els_cmd->els_code == FC_ELS_LOGO) {
/*
* @todo Handle LOGO frames received.
*/
return;
}
if (els_cmd->els_code == FC_ELS_PRLI) {
/*
* @todo Handle PRLI frames received.
*/
return;
}
/*
* Unhandled ELS frames. Send a LS_RJT.
*/
bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
FC_LS_RJT_EXP_NO_ADDL_INFO);
}
/*
* PID based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid)
{
struct bfa_fcs_rport_s *rport;
struct list_head *qe;
list_for_each(qe, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
if (rport->pid == pid)
return rport;
}
bfa_trc(port->fcs, pid);
return NULL;
}
/*
* PWWN based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn)
{
struct bfa_fcs_rport_s *rport;
struct list_head *qe;
list_for_each(qe, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
if (wwn_is_equal(rport->pwwn, pwwn))
return rport;
}
bfa_trc(port->fcs, pwwn);
return NULL;
}
/*
* NWWN based Lookup for a R-Port in the Port R-Port Queue
*/
struct bfa_fcs_rport_s *
bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn)
{
struct bfa_fcs_rport_s *rport;
struct list_head *qe;
list_for_each(qe, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
if (wwn_is_equal(rport->nwwn, nwwn))
return rport;
}
bfa_trc(port->fcs, nwwn);
return NULL;
}
/*
* Called by rport module when new rports are discovered.
*/
void
bfa_fcs_lport_add_rport(
struct bfa_fcs_lport_s *port,
struct bfa_fcs_rport_s *rport)
{
list_add_tail(&rport->qe, &port->rport_q);
port->num_rports++;
}
/*
* Called by rport module to when rports are deleted.
*/
void
bfa_fcs_lport_del_rport(
struct bfa_fcs_lport_s *port,
struct bfa_fcs_rport_s *rport)
{
WARN_ON(!bfa_q_is_on_q(&port->rport_q, rport));
list_del(&rport->qe);
port->num_rports--;
bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
}
/*
* Called by fabric for base port when fabric login is complete.
* Called by vport for virtual ports when FDISC is complete.
*/
void
bfa_fcs_lport_online(struct bfa_fcs_lport_s *port)
{
bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
}
/*
* Called by fabric for base port when fabric goes offline.
* Called by vport for virtual ports when virtual port becomes offline.
*/
void
bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port)
{
bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
}
/*
* Called by fabric to delete base lport and associated resources.
*
* Called by vport to delete lport and associated resources. Should call
* bfa_fcs_vport_delete_comp() for vports on completion.
*/
void
bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port)
{
bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
}
/*
* Return TRUE if port is online, else return FALSE
*/
bfa_boolean_t
bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port)
{
return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
}
/*
* Attach time initialization of logical ports.
*/
void
bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
u16 vf_id, struct bfa_fcs_vport_s *vport)
{
lport->fcs = fcs;
lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
lport->vport = vport;
lport->lp_tag = (vport) ? vport->lps->bfa_tag :
lport->fabric->lps->bfa_tag;
INIT_LIST_HEAD(&lport->rport_q);
lport->num_rports = 0;
}
/*
* Logical port initialization of base or virtual port.
* Called by fabric for base port or by vport for virtual ports.
*/
void
bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
struct bfa_lport_cfg_s *port_cfg)
{
struct bfa_fcs_vport_s *vport = lport->vport;
struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
lport->port_cfg = *port_cfg;
lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport,
lport->port_cfg.roles,
lport->fabric->vf_drv,
vport ? vport->vport_drv : NULL);
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(lport));
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"New logical port created: WWN = %s Role = %s\n",
lpwwn_buf, "Initiator");
bfa_fcs_lport_aen_post(lport, BFA_LPORT_AEN_NEW);
bfa_sm_set_state(lport, bfa_fcs_lport_sm_uninit);
bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
}
/*
* fcs_lport_api
*/
void
bfa_fcs_lport_get_attr(
struct bfa_fcs_lport_s *port,
struct bfa_lport_attr_s *port_attr)
{
if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online))
port_attr->pid = port->pid;
else
port_attr->pid = 0;
port_attr->port_cfg = port->port_cfg;
if (port->fabric) {
port_attr->port_type = port->fabric->oper_type;
port_attr->loopback = bfa_sm_cmp_state(port->fabric,
bfa_fcs_fabric_sm_loopback);
port_attr->authfail =
bfa_sm_cmp_state(port->fabric,
bfa_fcs_fabric_sm_auth_failed);
port_attr->fabric_name = bfa_fcs_lport_get_fabric_name(port);
memcpy(port_attr->fabric_ip_addr,
bfa_fcs_lport_get_fabric_ipaddr(port),
BFA_FCS_FABRIC_IPADDR_SZ);
if (port->vport != NULL) {
port_attr->port_type = BFA_PORT_TYPE_VPORT;
port_attr->fpma_mac =
port->vport->lps->lp_mac;
} else {
port_attr->fpma_mac =
port->fabric->lps->lp_mac;
}
} else {
port_attr->port_type = BFA_PORT_TYPE_UNKNOWN;
port_attr->state = BFA_LPORT_UNINIT;
}
}
/*
* bfa_fcs_lport_fab port fab functions
*/
/*
* Called by port to initialize fabric services of the base port.
*/
static void
bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port)
{
bfa_fcs_lport_ns_init(port);
bfa_fcs_lport_scn_init(port);
bfa_fcs_lport_ms_init(port);
}
/*
* Called by port to notify transition to online state.
*/
static void
bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port)
{
bfa_fcs_lport_ns_online(port);
bfa_fcs_lport_scn_online(port);
}
/*
* Called by port to notify transition to offline state.
*/
static void
bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port)
{
bfa_fcs_lport_ns_offline(port);
bfa_fcs_lport_scn_offline(port);
bfa_fcs_lport_ms_offline(port);
}
/*
* bfa_fcs_lport_n2n functions
*/
/*
* Called by fcs/port to initialize N2N topology.
*/
static void
bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port)
{
}
/*
* Called by fcs/port to notify transition to online state.
*/
static void
bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
struct bfa_lport_cfg_s *pcfg = &port->port_cfg;
struct bfa_fcs_rport_s *rport;
bfa_trc(port->fcs, pcfg->pwwn);
/*
* If our PWWN is > than that of the r-port, we have to initiate PLOGI
* and assign an Address. if not, we need to wait for its PLOGI.
*
* If our PWWN is < than that of the remote port, it will send a PLOGI
* with the PIDs assigned. The rport state machine take care of this
* incoming PLOGI.
*/
if (memcmp
((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
sizeof(wwn_t)) > 0) {
port->pid = N2N_LOCAL_PID;
bfa_lps_set_n2n_pid(port->fabric->lps, N2N_LOCAL_PID);
/*
* First, check if we know the device by pwwn.
*/
rport = bfa_fcs_lport_get_rport_by_pwwn(port,
n2n_port->rem_port_wwn);
if (rport) {
bfa_trc(port->fcs, rport->pid);
bfa_trc(port->fcs, rport->pwwn);
rport->pid = N2N_REMOTE_PID;
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
return;
}
/*
* In n2n there can be only one rport. Delete the old one
* whose pid should be zero, because it is offline.
*/
if (port->num_rports > 0) {
rport = bfa_fcs_lport_get_rport_by_pid(port, 0);
WARN_ON(rport == NULL);
if (rport) {
bfa_trc(port->fcs, rport->pwwn);
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
}
}
bfa_fcs_rport_create(port, N2N_REMOTE_PID);
}
}
/*
* Called by fcs/port to notify transition to offline state.
*/
static void
bfa_fcs_lport_n2n_offline(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_n2n_s *n2n_port = &port->port_topo.pn2n;
bfa_trc(port->fcs, port->pid);
port->pid = 0;
n2n_port->rem_port_wwn = 0;
n2n_port->reply_oxid = 0;
}
#define BFA_FCS_FDMI_CMD_MAX_RETRIES 2
/*
* forward declarations
*/
static void bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_fdmi_rhba_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_fdmi_rprt_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_fdmi_rpa_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_fdmi_timeout(void *arg);
static u16 bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld);
static u16 bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld);
static u16 bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld);
static u16 bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *
fdmi, u8 *pyld);
static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_fdmi_port_attr_s *port_attr);
u32 bfa_fcs_fdmi_convert_speed(enum bfa_port_speed pport_speed);
/*
* fcs_fdmi_sm FCS FDMI state machine
*/
/*
* FDMI State Machine events
*/
enum port_fdmi_event {
FDMISM_EVENT_PORT_ONLINE = 1,
FDMISM_EVENT_PORT_OFFLINE = 2,
FDMISM_EVENT_RSP_OK = 4,
FDMISM_EVENT_RSP_ERROR = 5,
FDMISM_EVENT_TIMEOUT = 6,
FDMISM_EVENT_RHBA_SENT = 7,
FDMISM_EVENT_RPRT_SENT = 8,
FDMISM_EVENT_RPA_SENT = 9,
};
static void bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_sending_rhba(
struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_rhba_retry(
struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_sending_rprt(
struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_rprt_retry(
struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_sending_rpa(
struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_rpa_retry(
struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
static void bfa_fcs_lport_fdmi_sm_disabled(
struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event);
/*
* Start in offline state - awaiting MS to send start.
*/
static void
bfa_fcs_lport_fdmi_sm_offline(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
fdmi->retry_cnt = 0;
switch (event) {
case FDMISM_EVENT_PORT_ONLINE:
if (port->vport) {
/*
* For Vports, register a new port.
*/
bfa_sm_set_state(fdmi,
bfa_fcs_lport_fdmi_sm_sending_rprt);
bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
} else {
/*
* For a base port, we should first register the HBA
* attribute. The HBA attribute also contains the base
* port registration.
*/
bfa_sm_set_state(fdmi,
bfa_fcs_lport_fdmi_sm_sending_rhba);
bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
}
break;
case FDMISM_EVENT_PORT_OFFLINE:
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_fdmi_sm_sending_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_RHBA_SENT:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rhba);
break;
case FDMISM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
&fdmi->fcxp_wqe);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_fdmi_sm_rhba(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_RSP_ERROR:
/*
* if max retries have not been reached, start timer for a
* delayed retry
*/
if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
bfa_sm_set_state(fdmi,
bfa_fcs_lport_fdmi_sm_rhba_retry);
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
&fdmi->timer,
bfa_fcs_lport_fdmi_timeout, fdmi,
BFA_FCS_RETRY_TIMEOUT);
} else {
/*
* set state to offline
*/
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
}
break;
case FDMISM_EVENT_RSP_OK:
/*
* Initiate Register Port Attributes
*/
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
fdmi->retry_cnt = 0;
bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
break;
case FDMISM_EVENT_PORT_OFFLINE:
bfa_fcxp_discard(fdmi->fcxp);
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_fdmi_sm_rhba_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_TIMEOUT:
/*
* Retry Timer Expired. Re-send
*/
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rhba);
bfa_fcs_lport_fdmi_send_rhba(fdmi, NULL);
break;
case FDMISM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
bfa_timer_stop(&fdmi->timer);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
/*
* RPRT : Register Port
*/
static void
bfa_fcs_lport_fdmi_sm_sending_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_RPRT_SENT:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rprt);
break;
case FDMISM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
&fdmi->fcxp_wqe);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_fdmi_sm_rprt(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_RSP_ERROR:
/*
* if max retries have not been reached, start timer for a
* delayed retry
*/
if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
bfa_sm_set_state(fdmi,
bfa_fcs_lport_fdmi_sm_rprt_retry);
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
&fdmi->timer,
bfa_fcs_lport_fdmi_timeout, fdmi,
BFA_FCS_RETRY_TIMEOUT);
} else {
/*
* set state to offline
*/
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
fdmi->retry_cnt = 0;
}
break;
case FDMISM_EVENT_RSP_OK:
fdmi->retry_cnt = 0;
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
break;
case FDMISM_EVENT_PORT_OFFLINE:
bfa_fcxp_discard(fdmi->fcxp);
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_fdmi_sm_rprt_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_TIMEOUT:
/*
* Retry Timer Expired. Re-send
*/
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rprt);
bfa_fcs_lport_fdmi_send_rprt(fdmi, NULL);
break;
case FDMISM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
bfa_timer_stop(&fdmi->timer);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
/*
* Register Port Attributes
*/
static void
bfa_fcs_lport_fdmi_sm_sending_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_RPA_SENT:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa);
break;
case FDMISM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(port),
&fdmi->fcxp_wqe);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_fdmi_sm_rpa(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_RSP_ERROR:
/*
* if max retries have not been reached, start timer for a
* delayed retry
*/
if (fdmi->retry_cnt++ < BFA_FCS_FDMI_CMD_MAX_RETRIES) {
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_rpa_retry);
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(port),
&fdmi->timer,
bfa_fcs_lport_fdmi_timeout, fdmi,
BFA_FCS_RETRY_TIMEOUT);
} else {
/*
* set state to offline
*/
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
fdmi->retry_cnt = 0;
}
break;
case FDMISM_EVENT_RSP_OK:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_online);
fdmi->retry_cnt = 0;
break;
case FDMISM_EVENT_PORT_OFFLINE:
bfa_fcxp_discard(fdmi->fcxp);
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_fdmi_sm_rpa_retry(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_TIMEOUT:
/*
* Retry Timer Expired. Re-send
*/
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_sending_rpa);
bfa_fcs_lport_fdmi_send_rpa(fdmi, NULL);
break;
case FDMISM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
bfa_timer_stop(&fdmi->timer);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
switch (event) {
case FDMISM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
/*
* FDMI is disabled state.
*/
static void
bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi,
enum port_fdmi_event event)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
bfa_trc(port->fcs, port->port_cfg.pwwn);
bfa_trc(port->fcs, event);
/* No op State. It can only be enabled at Driver Init. */
}
/*
* RHBA : Register HBA Attributes.
*/
static void
bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct fchs_s fchs;
int len, attr_len;
struct bfa_fcxp_s *fcxp;
u8 *pyld;
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
bfa_fcs_lport_fdmi_send_rhba, fdmi);
return;
}
fdmi->fcxp = fcxp;
pyld = bfa_fcxp_get_reqbuf(fcxp);
memset(pyld, 0, FC_MAX_PDUSZ);
len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
FDMI_RHBA);
attr_len =
bfa_fcs_lport_fdmi_build_rhba_pyld(fdmi,
(u8 *) ((struct ct_hdr_s *) pyld
+ 1));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, (len + attr_len), &fchs,
bfa_fcs_lport_fdmi_rhba_response, (void *)fdmi,
FC_MAX_PDUSZ, FC_FCCT_TOV);
bfa_sm_send_event(fdmi, FDMISM_EVENT_RHBA_SENT);
}
static u16
bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct bfa_fcs_fdmi_hba_attr_s hba_attr;
struct bfa_fcs_fdmi_hba_attr_s *fcs_hba_attr = &hba_attr;
struct fdmi_rhba_s *rhba = (struct fdmi_rhba_s *) pyld;
struct fdmi_attr_s *attr;
u8 *curr_ptr;
u16 len, count;
u16 templen;
/*
* get hba attributes
*/
bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
rhba->hba_id = bfa_fcs_lport_get_pwwn(port);
rhba->port_list.num_ports = cpu_to_be32(1);
rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port);
len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
count = 0;
len += sizeof(rhba->hba_attr_blk.attr_count);
/*
* fill out the invididual entries of the HBA attrib Block
*/
curr_ptr = (u8 *) &rhba->hba_attr_blk.hba_attr;
/*
* Node Name
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
templen = sizeof(wwn_t);
memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), templen);
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* Manufacturer
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
templen = (u16) strlen(fcs_hba_attr->manufacturer);
memcpy(attr->value, fcs_hba_attr->manufacturer, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* Serial Number
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
templen = (u16) strlen(fcs_hba_attr->serial_num);
memcpy(attr->value, fcs_hba_attr->serial_num, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* Model
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
templen = (u16) strlen(fcs_hba_attr->model);
memcpy(attr->value, fcs_hba_attr->model, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* Model Desc
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
templen = (u16) strlen(fcs_hba_attr->model_desc);
memcpy(attr->value, fcs_hba_attr->model_desc, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* H/W Version
*/
if (fcs_hba_attr->hw_version[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
templen = (u16) strlen(fcs_hba_attr->hw_version);
memcpy(attr->value, fcs_hba_attr->hw_version, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
}
/*
* Driver Version
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
templen = (u16) strlen(fcs_hba_attr->driver_version);
memcpy(attr->value, fcs_hba_attr->driver_version, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* Option Rom Version
*/
if (fcs_hba_attr->option_rom_ver[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
templen = (u16) strlen(fcs_hba_attr->option_rom_ver);
memcpy(attr->value, fcs_hba_attr->option_rom_ver, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
}
/*
* f/w Version = driver version
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
templen = (u16) strlen(fcs_hba_attr->driver_version);
memcpy(attr->value, fcs_hba_attr->driver_version, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* OS Name
*/
if (fcs_hba_attr->os_name[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
templen = (u16) strlen(fcs_hba_attr->os_name);
memcpy(attr->value, fcs_hba_attr->os_name, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
}
/*
* MAX_CT_PAYLOAD
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
templen = sizeof(fcs_hba_attr->max_ct_pyld);
memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, templen);
len += templen;
count++;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* Update size of payload
*/
len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
return len;
}
static void
bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_fdmi_s *fdmi =
(struct bfa_fcs_lport_fdmi_s *) cbarg;
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct ct_hdr_s *cthdr = NULL;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
return;
}
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
}
/*
* RPRT : Register Port
*/
static void
bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct fchs_s fchs;
u16 len, attr_len;
struct bfa_fcxp_s *fcxp;
u8 *pyld;
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
bfa_fcs_lport_fdmi_send_rprt, fdmi);
return;
}
fdmi->fcxp = fcxp;
pyld = bfa_fcxp_get_reqbuf(fcxp);
memset(pyld, 0, FC_MAX_PDUSZ);
len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
FDMI_RPRT);
attr_len =
bfa_fcs_lport_fdmi_build_rprt_pyld(fdmi,
(u8 *) ((struct ct_hdr_s *) pyld
+ 1));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len + attr_len, &fchs,
bfa_fcs_lport_fdmi_rprt_response, (void *)fdmi,
FC_MAX_PDUSZ, FC_FCCT_TOV);
bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
}
/*
* This routine builds Port Attribute Block that used in RPA, RPRT commands.
*/
static u16
bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi,
u8 *pyld)
{
struct bfa_fcs_fdmi_port_attr_s fcs_port_attr;
struct fdmi_port_attr_s *port_attrib = (struct fdmi_port_attr_s *) pyld;
struct fdmi_attr_s *attr;
u8 *curr_ptr;
u16 len;
u8 count = 0;
u16 templen;
/*
* get port attributes
*/
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
len = sizeof(port_attrib->attr_count);
/*
* fill out the invididual entries
*/
curr_ptr = (u8 *) &port_attrib->port_attr;
/*
* FC4 Types
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
templen = sizeof(fcs_port_attr.supp_fc4_types);
memcpy(attr->value, fcs_port_attr.supp_fc4_types, templen);
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
++count;
attr->len =
cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* Supported Speed
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
templen = sizeof(fcs_port_attr.supp_speed);
memcpy(attr->value, &fcs_port_attr.supp_speed, templen);
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
++count;
attr->len =
cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* current Port Speed
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
templen = sizeof(fcs_port_attr.curr_speed);
memcpy(attr->value, &fcs_port_attr.curr_speed, templen);
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
++count;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* max frame size
*/
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
templen = sizeof(fcs_port_attr.max_frm_size);
memcpy(attr->value, &fcs_port_attr.max_frm_size, templen);
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
++count;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
/*
* OS Device Name
*/
if (fcs_port_attr.os_device_name[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
templen = (u16) strlen(fcs_port_attr.os_device_name);
memcpy(attr->value, fcs_port_attr.os_device_name, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
++count;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
}
/*
* Host Name
*/
if (fcs_port_attr.host_name[0] != '\0') {
attr = (struct fdmi_attr_s *) curr_ptr;
attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
templen = (u16) strlen(fcs_port_attr.host_name);
memcpy(attr->value, fcs_port_attr.host_name, templen);
templen = fc_roundup(templen, sizeof(u32));
curr_ptr += sizeof(attr->type) + sizeof(templen) + templen;
len += templen;
++count;
attr->len = cpu_to_be16(templen + sizeof(attr->type) +
sizeof(templen));
}
/*
* Update size of payload
*/
port_attrib->attr_count = cpu_to_be32(count);
len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
return len;
}
static u16
bfa_fcs_lport_fdmi_build_rprt_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct fdmi_rprt_s *rprt = (struct fdmi_rprt_s *) pyld;
u16 len;
rprt->hba_id = bfa_fcs_lport_get_pwwn(bfa_fcs_get_base_port(port->fcs));
rprt->port_name = bfa_fcs_lport_get_pwwn(port);
len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
(u8 *) &rprt->port_attr_blk);
len += sizeof(rprt->hba_id) + sizeof(rprt->port_name);
return len;
}
static void
bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_fdmi_s *fdmi =
(struct bfa_fcs_lport_fdmi_s *) cbarg;
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct ct_hdr_s *cthdr = NULL;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
return;
}
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
}
/*
* RPA : Register Port Attributes.
*/
static void
bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_fdmi_s *fdmi = fdmi_cbarg;
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct fchs_s fchs;
u16 len, attr_len;
struct bfa_fcxp_s *fcxp;
u8 *pyld;
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &fdmi->fcxp_wqe,
bfa_fcs_lport_fdmi_send_rpa, fdmi);
return;
}
fdmi->fcxp = fcxp;
pyld = bfa_fcxp_get_reqbuf(fcxp);
memset(pyld, 0, FC_MAX_PDUSZ);
len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
FDMI_RPA);
attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
(u8 *) ((struct ct_hdr_s *) pyld + 1));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len + attr_len, &fchs,
bfa_fcs_lport_fdmi_rpa_response, (void *)fdmi,
FC_MAX_PDUSZ, FC_FCCT_TOV);
bfa_sm_send_event(fdmi, FDMISM_EVENT_RPA_SENT);
}
static u16
bfa_fcs_lport_fdmi_build_rpa_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct fdmi_rpa_s *rpa = (struct fdmi_rpa_s *) pyld;
u16 len;
rpa->port_name = bfa_fcs_lport_get_pwwn(port);
len = bfa_fcs_lport_fdmi_build_portattr_block(fdmi,
(u8 *) &rpa->port_attr_blk);
len += sizeof(rpa->port_name);
return len;
}
static void
bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_fdmi_s *fdmi =
(struct bfa_fcs_lport_fdmi_s *) cbarg;
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct ct_hdr_s *cthdr = NULL;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
return;
}
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
}
static void
bfa_fcs_lport_fdmi_timeout(void *arg)
{
struct bfa_fcs_lport_fdmi_s *fdmi = (struct bfa_fcs_lport_fdmi_s *) arg;
bfa_sm_send_event(fdmi, FDMISM_EVENT_TIMEOUT);
}
static void
bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_fdmi_hba_attr_s *hba_attr)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
hba_attr->manufacturer);
bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
hba_attr->serial_num);
bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
hba_attr->model);
bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc,
hba_attr->model_desc);
bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc,
hba_attr->hw_version);
bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
hba_attr->option_rom_ver);
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
hba_attr->fw_version);
strncpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
strncpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
/*
* If there is a patch level, append it
* to the os name along with a separator
*/
if (driver_info->host_os_patch[0] != '\0') {
strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
strncat(hba_attr->os_name, driver_info->host_os_patch,
sizeof(driver_info->host_os_patch));
}
hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
}
static void
bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
struct bfa_fcs_fdmi_port_attr_s *port_attr)
{
struct bfa_fcs_lport_s *port = fdmi->ms->port;
struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
struct bfa_port_attr_s pport_attr;
memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
/*
* get pport attributes from hal
*/
bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
/*
* get FC4 type Bitmask
*/
fc_get_fc4type_bitmask(FC_TYPE_FCP, port_attr->supp_fc4_types);
/*
* Supported Speeds
*/
switch (pport_attr.speed_supported) {
case BFA_PORT_SPEED_16GBPS:
port_attr->supp_speed =
cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_16G);
break;
case BFA_PORT_SPEED_10GBPS:
port_attr->supp_speed =
cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_10G);
break;
case BFA_PORT_SPEED_8GBPS:
port_attr->supp_speed =
cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_8G);
break;
case BFA_PORT_SPEED_4GBPS:
port_attr->supp_speed =
cpu_to_be32(BFA_FCS_FDMI_SUPP_SPEEDS_4G);
break;
default:
bfa_sm_fault(port->fcs, pport_attr.speed_supported);
}
/*
* Current Speed
*/
port_attr->curr_speed = cpu_to_be32(
bfa_fcs_fdmi_convert_speed(pport_attr.speed));
/*
* Max PDU Size.
*/
port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ);
/*
* OS device Name
*/
strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
sizeof(port_attr->os_device_name));
/*
* Host name
*/
strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
sizeof(port_attr->host_name));
}
/*
* Convert BFA speed to FDMI format.
*/
u32
bfa_fcs_fdmi_convert_speed(bfa_port_speed_t pport_speed)
{
u32 ret;
switch (pport_speed) {
case BFA_PORT_SPEED_1GBPS:
case BFA_PORT_SPEED_2GBPS:
ret = pport_speed;
break;
case BFA_PORT_SPEED_4GBPS:
ret = FDMI_TRANS_SPEED_4G;
break;
case BFA_PORT_SPEED_8GBPS:
ret = FDMI_TRANS_SPEED_8G;
break;
case BFA_PORT_SPEED_10GBPS:
ret = FDMI_TRANS_SPEED_10G;
break;
case BFA_PORT_SPEED_16GBPS:
ret = FDMI_TRANS_SPEED_16G;
break;
default:
ret = FDMI_TRANS_SPEED_UNKNOWN;
}
return ret;
}
void
bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms)
{
struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
fdmi->ms = ms;
if (ms->port->fcs->fdmi_enabled)
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_offline);
else
bfa_sm_set_state(fdmi, bfa_fcs_lport_fdmi_sm_disabled);
}
void
bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms)
{
struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
fdmi->ms = ms;
bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_OFFLINE);
}
void
bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms)
{
struct bfa_fcs_lport_fdmi_s *fdmi = &ms->fdmi;
fdmi->ms = ms;
bfa_sm_send_event(fdmi, FDMISM_EVENT_PORT_ONLINE);
}
#define BFA_FCS_MS_CMD_MAX_RETRIES 2
/*
* forward declarations
*/
static void bfa_fcs_lport_ms_send_plogi(void *ms_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ms_timeout(void *arg);
static void bfa_fcs_lport_ms_plogi_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ms_send_gmal(void *ms_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ms_gmal_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ms_send_gfn(void *ms_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ms_gfn_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
/*
* fcs_ms_sm FCS MS state machine
*/
/*
* MS State Machine events
*/
enum port_ms_event {
MSSM_EVENT_PORT_ONLINE = 1,
MSSM_EVENT_PORT_OFFLINE = 2,
MSSM_EVENT_RSP_OK = 3,
MSSM_EVENT_RSP_ERROR = 4,
MSSM_EVENT_TIMEOUT = 5,
MSSM_EVENT_FCXP_SENT = 6,
MSSM_EVENT_PORT_FABRIC_RSCN = 7
};
static void bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event);
/*
* Start in offline state - awaiting NS to send start.
*/
static void
bfa_fcs_lport_ms_sm_offline(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_PORT_ONLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
bfa_fcs_lport_ms_send_plogi(ms, NULL);
break;
case MSSM_EVENT_PORT_OFFLINE:
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
static void
bfa_fcs_lport_ms_sm_plogi_sending(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_FCXP_SENT:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi);
break;
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
&ms->fcxp_wqe);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
static void
bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_RSP_ERROR:
/*
* Start timer for a delayed retry
*/
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_retry);
ms->port->stats.ms_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
&ms->timer, bfa_fcs_lport_ms_timeout, ms,
BFA_FCS_RETRY_TIMEOUT);
break;
case MSSM_EVENT_RSP_OK:
/*
* since plogi is done, now invoke MS related sub-modules
*/
bfa_fcs_lport_fdmi_online(ms);
/*
* if this is a Vport, go to online state.
*/
if (ms->port->vport) {
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
break;
}
/*
* For a base port we need to get the
* switch's IP address.
*/
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
bfa_fcs_lport_ms_send_gmal(ms, NULL);
break;
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
bfa_fcxp_discard(ms->fcxp);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
static void
bfa_fcs_lport_ms_sm_plogi_retry(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_TIMEOUT:
/*
* Retry Timer Expired. Re-send
*/
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_plogi_sending);
bfa_fcs_lport_ms_send_plogi(ms, NULL);
break;
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
bfa_timer_stop(&ms->timer);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
static void
bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
break;
case MSSM_EVENT_PORT_FABRIC_RSCN:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
ms->retry_cnt = 0;
bfa_fcs_lport_ms_send_gfn(ms, NULL);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
static void
bfa_fcs_lport_ms_sm_gmal_sending(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_FCXP_SENT:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal);
break;
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
&ms->fcxp_wqe);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
static void
bfa_fcs_lport_ms_sm_gmal(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_RSP_ERROR:
/*
* Start timer for a delayed retry
*/
if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_retry);
ms->port->stats.ms_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
&ms->timer, bfa_fcs_lport_ms_timeout, ms,
BFA_FCS_RETRY_TIMEOUT);
} else {
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
bfa_fcs_lport_ms_send_gfn(ms, NULL);
ms->retry_cnt = 0;
}
break;
case MSSM_EVENT_RSP_OK:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
bfa_fcs_lport_ms_send_gfn(ms, NULL);
break;
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
bfa_fcxp_discard(ms->fcxp);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
static void
bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_TIMEOUT:
/*
* Retry Timer Expired. Re-send
*/
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gmal_sending);
bfa_fcs_lport_ms_send_gmal(ms, NULL);
break;
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
bfa_timer_stop(&ms->timer);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
/*
* ms_pvt MS local functions
*/
static void
bfa_fcs_lport_ms_send_gmal(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
bfa_fcs_lport_t *port = ms->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
bfa_fcs_lport_ms_send_gmal, ms);
return;
}
ms->fcxp = fcxp;
len = fc_gmal_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port),
port->fabric->lps->pr_nwwn);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ms_gmal_response, (void *)ms,
FC_MAX_PDUSZ, FC_FCCT_TOV);
bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
}
static void
bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
bfa_fcs_lport_t *port = ms->port;
struct ct_hdr_s *cthdr = NULL;
struct fcgs_gmal_resp_s *gmal_resp;
struct fcgs_gmal_entry_s *gmal_entry;
u32 num_entries;
u8 *rsp_str;
bfa_trc(port->fcs, req_status);
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
num_entries = be32_to_cpu(gmal_resp->ms_len);
if (num_entries == 0) {
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
return;
}
/*
* The response could contain multiple Entries.
* Entries for SNMP interface, etc.
* We look for the entry with a telnet prefix.
* First "http://" entry refers to IP addr
*/
gmal_entry = (struct fcgs_gmal_entry_s *)gmal_resp->ms_ma;
while (num_entries > 0) {
if (strncmp(gmal_entry->prefix,
CT_GMAL_RESP_PREFIX_HTTP,
sizeof(gmal_entry->prefix)) == 0) {
/*
* if the IP address is terminating with a '/',
* remove it.
* Byte 0 consists of the length of the string.
*/
rsp_str = &(gmal_entry->prefix[0]);
if (rsp_str[gmal_entry->len-1] == '/')
rsp_str[gmal_entry->len-1] = 0;
/* copy IP Address to fabric */
strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
gmal_entry->ip_addr,
BFA_FCS_FABRIC_IPADDR_SZ);
break;
} else {
--num_entries;
++gmal_entry;
}
}
bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
return;
}
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
}
static void
bfa_fcs_lport_ms_sm_gfn_sending(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_FCXP_SENT:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn);
break;
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
&ms->fcxp_wqe);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
static void
bfa_fcs_lport_ms_sm_gfn(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_RSP_ERROR:
/*
* Start timer for a delayed retry
*/
if (ms->retry_cnt++ < BFA_FCS_MS_CMD_MAX_RETRIES) {
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_retry);
ms->port->stats.ms_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ms->port),
&ms->timer, bfa_fcs_lport_ms_timeout, ms,
BFA_FCS_RETRY_TIMEOUT);
} else {
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
ms->retry_cnt = 0;
}
break;
case MSSM_EVENT_RSP_OK:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_online);
break;
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
bfa_fcxp_discard(ms->fcxp);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
static void
bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms,
enum port_ms_event event)
{
bfa_trc(ms->port->fcs, ms->port->port_cfg.pwwn);
bfa_trc(ms->port->fcs, event);
switch (event) {
case MSSM_EVENT_TIMEOUT:
/*
* Retry Timer Expired. Re-send
*/
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_gfn_sending);
bfa_fcs_lport_ms_send_gfn(ms, NULL);
break;
case MSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
bfa_timer_stop(&ms->timer);
break;
default:
bfa_sm_fault(ms->port->fcs, event);
}
}
/*
* ms_pvt MS local functions
*/
static void
bfa_fcs_lport_ms_send_gfn(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
bfa_fcs_lport_t *port = ms->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
bfa_fcs_lport_ms_send_gfn, ms);
return;
}
ms->fcxp = fcxp;
len = fc_gfn_req_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port),
port->fabric->lps->pr_nwwn);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ms_gfn_response, (void *)ms,
FC_MAX_PDUSZ, FC_FCCT_TOV);
bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
}
static void
bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
bfa_fcs_lport_t *port = ms->port;
struct ct_hdr_s *cthdr = NULL;
wwn_t *gfn_resp;
bfa_trc(port->fcs, req_status);
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
gfn_resp = (wwn_t *)(cthdr + 1);
/* check if it has actually changed */
if ((memcmp((void *)&bfa_fcs_lport_get_fabric_name(port),
gfn_resp, sizeof(wwn_t)) != 0)) {
bfa_fcs_fabric_set_fabric_name(port->fabric, *gfn_resp);
}
bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
return;
}
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
}
/*
* ms_pvt MS local functions
*/
static void
bfa_fcs_lport_ms_send_plogi(void *ms_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ms_s *ms = ms_cbarg;
struct bfa_fcs_lport_s *port = ms->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
port->stats.ms_plogi_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ms->fcxp_wqe,
bfa_fcs_lport_ms_send_plogi, ms);
return;
}
ms->fcxp = fcxp;
len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_hton3b(FC_MGMT_SERVER),
bfa_fcs_lport_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
bfa_fcport_get_maxfrsize(port->fcs->bfa),
bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ms_plogi_response, (void *)ms,
FC_MAX_PDUSZ, FC_ELS_TOV);
port->stats.ms_plogi_sent++;
bfa_sm_send_event(ms, MSSM_EVENT_FCXP_SENT);
}
static void
bfa_fcs_lport_ms_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) cbarg;
struct bfa_fcs_lport_s *port = ms->port;
struct fc_els_cmd_s *els_cmd;
struct fc_ls_rjt_s *ls_rjt;
bfa_trc(port->fcs, req_status);
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
port->stats.ms_plogi_rsp_err++;
bfa_trc(port->fcs, req_status);
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
return;
}
els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
switch (els_cmd->els_code) {
case FC_ELS_ACC:
if (rsp_len < sizeof(struct fc_logi_s)) {
bfa_trc(port->fcs, rsp_len);
port->stats.ms_plogi_acc_err++;
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
break;
}
port->stats.ms_plogi_accepts++;
bfa_sm_send_event(ms, MSSM_EVENT_RSP_OK);
break;
case FC_ELS_LS_RJT:
ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
bfa_trc(port->fcs, ls_rjt->reason_code);
bfa_trc(port->fcs, ls_rjt->reason_code_expl);
port->stats.ms_rejects++;
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
break;
default:
port->stats.ms_plogi_unknown_rsp++;
bfa_trc(port->fcs, els_cmd->els_code);
bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
}
}
static void
bfa_fcs_lport_ms_timeout(void *arg)
{
struct bfa_fcs_lport_ms_s *ms = (struct bfa_fcs_lport_ms_s *) arg;
ms->port->stats.ms_timeouts++;
bfa_sm_send_event(ms, MSSM_EVENT_TIMEOUT);
}
void
bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
ms->port = port;
bfa_sm_set_state(ms, bfa_fcs_lport_ms_sm_offline);
/*
* Invoke init routines of sub modules.
*/
bfa_fcs_lport_fdmi_init(ms);
}
void
bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
ms->port = port;
bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
bfa_fcs_lport_fdmi_offline(ms);
}
void
bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
ms->port = port;
bfa_sm_send_event(ms, MSSM_EVENT_PORT_ONLINE);
}
void
bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_ms_s *ms = BFA_FCS_GET_MS_FROM_PORT(port);
/* todo. Handle this only when in Online state */
if (bfa_sm_cmp_state(ms, bfa_fcs_lport_ms_sm_online))
bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
}
/*
* @page ns_sm_info VPORT NS State Machine
*
* @section ns_sm_interactions VPORT NS State Machine Interactions
*
* @section ns_sm VPORT NS State Machine
* img ns_sm.jpg
*/
/*
* forward declarations
*/
static void bfa_fcs_lport_ns_send_plogi(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_ns_timeout(void *arg);
static void bfa_fcs_lport_ns_plogi_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ns_rspn_id_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ns_rft_id_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ns_rff_id_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ns_gid_ft_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_ns_process_gidft_pids(
struct bfa_fcs_lport_s *port,
u32 *pid_buf, u32 n_pids);
static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
/*
* fcs_ns_sm FCS nameserver interface state machine
*/
/*
* VPort NS State Machine events
*/
enum vport_ns_event {
NSSM_EVENT_PORT_ONLINE = 1,
NSSM_EVENT_PORT_OFFLINE = 2,
NSSM_EVENT_PLOGI_SENT = 3,
NSSM_EVENT_RSP_OK = 4,
NSSM_EVENT_RSP_ERROR = 5,
NSSM_EVENT_TIMEOUT = 6,
NSSM_EVENT_NS_QUERY = 7,
NSSM_EVENT_RSPNID_SENT = 8,
NSSM_EVENT_RFTID_SENT = 9,
NSSM_EVENT_RFFID_SENT = 10,
NSSM_EVENT_GIDFT_SENT = 11,
};
static void bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_sending_rspn_id(
struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_sending_rft_id(
struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_sending_rff_id(
struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_sending_gid_ft(
struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event);
/*
* Start in offline state - awaiting linkup
*/
static void
bfa_fcs_lport_ns_sm_offline(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_PORT_ONLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
bfa_fcs_lport_ns_send_plogi(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_plogi_sending(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_PLOGI_SENT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->fcxp_wqe);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_plogi(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RSP_ERROR:
/*
* Start timer for a delayed retry
*/
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_retry);
ns->port->stats.ns_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->timer, bfa_fcs_lport_ns_timeout, ns,
BFA_FCS_RETRY_TIMEOUT);
break;
case NSSM_EVENT_RSP_OK:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_discard(ns->fcxp);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_plogi_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_TIMEOUT:
/*
* Retry Timer Expired. Re-send
*/
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_plogi_sending);
bfa_fcs_lport_ns_send_plogi(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_timer_stop(&ns->timer);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_sending_rspn_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RSPNID_SENT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->fcxp_wqe);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rspn_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RSP_ERROR:
/*
* Start timer for a delayed retry
*/
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rspn_id_retry);
ns->port->stats.ns_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->timer, bfa_fcs_lport_ns_timeout, ns,
BFA_FCS_RETRY_TIMEOUT);
break;
case NSSM_EVENT_RSP_OK:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
bfa_fcs_lport_ns_send_rft_id(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_fcxp_discard(ns->fcxp);
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rspn_id_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_TIMEOUT:
/*
* Retry Timer Expired. Re-send
*/
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rspn_id);
bfa_fcs_lport_ns_send_rspn_id(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_timer_stop(&ns->timer);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_sending_rft_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RFTID_SENT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->fcxp_wqe);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rft_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RSP_OK:
/* Now move to register FC4 Features */
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
bfa_fcs_lport_ns_send_rff_id(ns, NULL);
break;
case NSSM_EVENT_RSP_ERROR:
/*
* Start timer for a delayed retry
*/
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rft_id_retry);
ns->port->stats.ns_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->timer, bfa_fcs_lport_ns_timeout, ns,
BFA_FCS_RETRY_TIMEOUT);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_discard(ns->fcxp);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rft_id_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_TIMEOUT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rft_id);
bfa_fcs_lport_ns_send_rft_id(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_timer_stop(&ns->timer);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_sending_rff_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RFFID_SENT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->fcxp_wqe);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rff_id(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RSP_OK:
/*
* If min cfg mode is enabled, we donot initiate rport
* discovery with the fabric. Instead, we will retrieve the
* boot targets from HAL/FW.
*/
if (__fcs_min_cfg(ns->port->fcs)) {
bfa_fcs_lport_ns_boot_target_disc(ns->port);
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
return;
}
/*
* If the port role is Initiator Mode issue NS query.
* If it is Target Mode, skip this and go to online.
*/
if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
bfa_sm_set_state(ns,
bfa_fcs_lport_ns_sm_sending_gid_ft);
bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
}
/*
* kick off mgmt srvr state machine
*/
bfa_fcs_lport_ms_online(ns->port);
break;
case NSSM_EVENT_RSP_ERROR:
/*
* Start timer for a delayed retry
*/
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_rff_id_retry);
ns->port->stats.ns_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->timer, bfa_fcs_lport_ns_timeout, ns,
BFA_FCS_RETRY_TIMEOUT);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_discard(ns->fcxp);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_rff_id_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_TIMEOUT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_rff_id);
bfa_fcs_lport_ns_send_rff_id(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_timer_stop(&ns->timer);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_sending_gid_ft(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_GIDFT_SENT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_walloc_cancel(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->fcxp_wqe);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_gid_ft(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_RSP_OK:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_online);
break;
case NSSM_EVENT_RSP_ERROR:
/*
* TBD: for certain reject codes, we don't need to retry
*/
/*
* Start timer for a delayed retry
*/
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_gid_ft_retry);
ns->port->stats.ns_retries++;
bfa_timer_start(BFA_FCS_GET_HAL_FROM_PORT(ns->port),
&ns->timer, bfa_fcs_lport_ns_timeout, ns,
BFA_FCS_RETRY_TIMEOUT);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_fcxp_discard(ns->fcxp);
break;
case NSSM_EVENT_NS_QUERY:
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_TIMEOUT:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_sending_gid_ft);
bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
break;
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
bfa_timer_stop(&ns->timer);
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
static void
bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
enum vport_ns_event event)
{
bfa_trc(ns->port->fcs, ns->port->port_cfg.pwwn);
bfa_trc(ns->port->fcs, event);
switch (event) {
case NSSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
break;
case NSSM_EVENT_NS_QUERY:
/*
* If the port role is Initiator Mode issue NS query.
* If it is Target Mode, skip this and go to online.
*/
if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port)) {
bfa_sm_set_state(ns,
bfa_fcs_lport_ns_sm_sending_gid_ft);
bfa_fcs_lport_ns_send_gid_ft(ns, NULL);
};
break;
default:
bfa_sm_fault(ns->port->fcs, event);
}
}
/*
* ns_pvt Nameserver local functions
*/
static void
bfa_fcs_lport_ns_send_plogi(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
port->stats.ns_plogi_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_plogi, ns);
return;
}
ns->fcxp = fcxp;
len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_hton3b(FC_NAME_SERVER),
bfa_fcs_lport_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
bfa_fcport_get_maxfrsize(port->fcs->bfa),
bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ns_plogi_response, (void *)ns,
FC_MAX_PDUSZ, FC_ELS_TOV);
port->stats.ns_plogi_sent++;
bfa_sm_send_event(ns, NSSM_EVENT_PLOGI_SENT);
}
static void
bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
struct bfa_fcs_lport_s *port = ns->port;
/* struct fc_logi_s *plogi_resp; */
struct fc_els_cmd_s *els_cmd;
struct fc_ls_rjt_s *ls_rjt;
bfa_trc(port->fcs, req_status);
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
port->stats.ns_plogi_rsp_err++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
return;
}
els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
switch (els_cmd->els_code) {
case FC_ELS_ACC:
if (rsp_len < sizeof(struct fc_logi_s)) {
bfa_trc(port->fcs, rsp_len);
port->stats.ns_plogi_acc_err++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
break;
}
port->stats.ns_plogi_accepts++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
break;
case FC_ELS_LS_RJT:
ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
bfa_trc(port->fcs, ls_rjt->reason_code);
bfa_trc(port->fcs, ls_rjt->reason_code_expl);
port->stats.ns_rejects++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
break;
default:
port->stats.ns_plogi_unknown_rsp++;
bfa_trc(port->fcs, els_cmd->els_code);
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
}
/*
* Register the symbolic port name.
*/
static void
bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
u8 symbl[256];
u8 *psymbl = &symbl[0];
memset(symbl, 0, sizeof(symbl));
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
port->stats.ns_rspnid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_rspn_id, ns);
return;
}
ns->fcxp = fcxp;
/*
* for V-Port, form a Port Symbolic Name
*/
if (port->vport) {
/*
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
strncpy((char *)psymbl,
(char *) &
(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
strlen((char *) &
bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
(port->fcs))));
/* Ensure we have a null terminating string. */
((char *)psymbl)[strlen((char *) &
bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
(port->fcs)))] = 0;
strncat((char *)psymbl,
(char *) &(bfa_fcs_lport_get_psym_name(port)),
strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
} else {
psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
}
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port), 0, psymbl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ns_rspn_id_response, (void *)ns,
FC_MAX_PDUSZ, FC_FCCT_TOV);
port->stats.ns_rspnid_sent++;
bfa_sm_send_event(ns, NSSM_EVENT_RSPNID_SENT);
}
static void
bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct ct_hdr_s *cthdr = NULL;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
port->stats.ns_rspnid_rsp_err++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
port->stats.ns_rspnid_accepts++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
return;
}
port->stats.ns_rspnid_rejects++;
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
/*
* Register FC4-Types
*/
static void
bfa_fcs_lport_ns_send_rft_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
port->stats.ns_rftid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_rft_id, ns);
return;
}
ns->fcxp = fcxp;
len = fc_rftid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port), 0, port->port_cfg.roles);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ns_rft_id_response, (void *)ns,
FC_MAX_PDUSZ, FC_FCCT_TOV);
port->stats.ns_rftid_sent++;
bfa_sm_send_event(ns, NSSM_EVENT_RFTID_SENT);
}
static void
bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct ct_hdr_s *cthdr = NULL;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
port->stats.ns_rftid_rsp_err++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
port->stats.ns_rftid_accepts++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
return;
}
port->stats.ns_rftid_rejects++;
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
/*
* Register FC4-Features : Should be done after RFT_ID
*/
static void
bfa_fcs_lport_ns_send_rff_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
u8 fc4_ftrs = 0;
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
port->stats.ns_rffid_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_rff_id, ns);
return;
}
ns->fcxp = fcxp;
if (BFA_FCS_VPORT_IS_INITIATOR_MODE(ns->port))
fc4_ftrs = FC_GS_FCP_FC4_FEATURE_INITIATOR;
len = fc_rffid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port), 0,
FC_TYPE_FCP, fc4_ftrs);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ns_rff_id_response, (void *)ns,
FC_MAX_PDUSZ, FC_FCCT_TOV);
port->stats.ns_rffid_sent++;
bfa_sm_send_event(ns, NSSM_EVENT_RFFID_SENT);
}
static void
bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct ct_hdr_s *cthdr = NULL;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
port->stats.ns_rffid_rsp_err++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
port->stats.ns_rffid_accepts++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
return;
}
port->stats.ns_rffid_rejects++;
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
if (cthdr->reason_code == CT_RSN_NOT_SUPP) {
/* if this command is not supported, we don't retry */
bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
} else
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
/*
* Query Fabric for FC4-Types Devices.
*
* TBD : Need to use a local (FCS private) response buffer, since the response
* can be larger than 2K.
*/
static void
bfa_fcs_lport_ns_send_gid_ft(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_ns_s *ns = ns_cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(port->fcs, port->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
port->stats.ns_gidft_alloc_wait++;
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &ns->fcxp_wqe,
bfa_fcs_lport_ns_send_gid_ft, ns);
return;
}
ns->fcxp = fcxp;
/*
* This query is only initiated for FCP initiator mode.
*/
len = fc_gid_ft_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
ns->port->pid, FC_TYPE_FCP);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_ns_gid_ft_response, (void *)ns,
bfa_fcxp_get_maxrsp(port->fcs->bfa), FC_FCCT_TOV);
port->stats.ns_gidft_sent++;
bfa_sm_send_event(ns, NSSM_EVENT_GIDFT_SENT);
}
static void
bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status,
u32 rsp_len, u32 resid_len,
struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) cbarg;
struct bfa_fcs_lport_s *port = ns->port;
struct ct_hdr_s *cthdr = NULL;
u32 n_pids;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
port->stats.ns_gidft_rsp_err++;
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
return;
}
if (resid_len != 0) {
/*
* TBD : we will need to allocate a larger buffer & retry the
* command
*/
bfa_trc(port->fcs, rsp_len);
bfa_trc(port->fcs, resid_len);
return;
}
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
switch (cthdr->cmd_rsp_code) {
case CT_RSP_ACCEPT:
port->stats.ns_gidft_accepts++;
n_pids = (fc_get_ctresp_pyld_len(rsp_len) / sizeof(u32));
bfa_trc(port->fcs, n_pids);
bfa_fcs_lport_ns_process_gidft_pids(port,
(u32 *) (cthdr + 1),
n_pids);
bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
break;
case CT_RSP_REJECT:
/*
* Check the reason code & explanation.
* There may not have been any FC4 devices in the fabric
*/
port->stats.ns_gidft_rejects++;
bfa_trc(port->fcs, cthdr->reason_code);
bfa_trc(port->fcs, cthdr->exp_code);
if ((cthdr->reason_code == CT_RSN_UNABLE_TO_PERF)
&& (cthdr->exp_code == CT_NS_EXP_FT_NOT_REG)) {
bfa_sm_send_event(ns, NSSM_EVENT_RSP_OK);
} else {
/*
* for all other errors, retry
*/
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
break;
default:
port->stats.ns_gidft_unknown_rsp++;
bfa_trc(port->fcs, cthdr->cmd_rsp_code);
bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
}
}
/*
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] port - pointer to bfa_fcs_lport_t.
*
* return
* void
*
* Special Considerations:
*
* note
*/
static void
bfa_fcs_lport_ns_timeout(void *arg)
{
struct bfa_fcs_lport_ns_s *ns = (struct bfa_fcs_lport_ns_s *) arg;
ns->port->stats.ns_timeouts++;
bfa_sm_send_event(ns, NSSM_EVENT_TIMEOUT);
}
/*
* Process the PID list in GID_FT response
*/
static void
bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf,
u32 n_pids)
{
struct fcgs_gidft_resp_s *gidft_entry;
struct bfa_fcs_rport_s *rport;
u32 ii;
for (ii = 0; ii < n_pids; ii++) {
gidft_entry = (struct fcgs_gidft_resp_s *) &pid_buf[ii];
if (gidft_entry->pid == port->pid)
continue;
/*
* Check if this rport already exists
*/
rport = bfa_fcs_lport_get_rport_by_pid(port, gidft_entry->pid);
if (rport == NULL) {
/*
* this is a new device. create rport
*/
rport = bfa_fcs_rport_create(port, gidft_entry->pid);
} else {
/*
* this rport already exists
*/
bfa_fcs_rport_scn(rport);
}
bfa_trc(port->fcs, gidft_entry->pid);
/*
* if the last entry bit is set, bail out.
*/
if (gidft_entry->last)
return;
}
}
/*
* fcs_ns_public FCS nameserver public interfaces
*/
/*
* Functions called by port/fab.
* These will send relevant Events to the ns state machine.
*/
void
bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
ns->port = port;
bfa_sm_set_state(ns, bfa_fcs_lport_ns_sm_offline);
}
void
bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
ns->port = port;
bfa_sm_send_event(ns, NSSM_EVENT_PORT_OFFLINE);
}
void
bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
ns->port = port;
bfa_sm_send_event(ns, NSSM_EVENT_PORT_ONLINE);
}
void
bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_ns_s *ns = BFA_FCS_GET_NS_FROM_PORT(port);
bfa_trc(port->fcs, port->pid);
bfa_sm_send_event(ns, NSSM_EVENT_NS_QUERY);
}
static void
bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port)
{
struct bfa_fcs_rport_s *rport;
u8 nwwns;
wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX];
int ii;
bfa_iocfc_get_bootwwns(port->fcs->bfa, &nwwns, wwns);
for (ii = 0 ; ii < nwwns; ++ii) {
rport = bfa_fcs_rport_create_by_wwn(port, wwns[ii]);
WARN_ON(!rport);
}
}
/*
* FCS SCN
*/
#define FC_QOS_RSCN_EVENT 0x0c
#define FC_FABRIC_NAME_RSCN_EVENT 0x0d
/*
* forward declarations
*/
static void bfa_fcs_lport_scn_send_scr(void *scn_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_lport_scn_scr_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs);
static void bfa_fcs_lport_scn_timeout(void *arg);
/*
* fcs_scm_sm FCS SCN state machine
*/
/*
* VPort SCN State Machine events
*/
enum port_scn_event {
SCNSM_EVENT_PORT_ONLINE = 1,
SCNSM_EVENT_PORT_OFFLINE = 2,
SCNSM_EVENT_RSP_OK = 3,
SCNSM_EVENT_RSP_ERROR = 4,
SCNSM_EVENT_TIMEOUT = 5,
SCNSM_EVENT_SCR_SENT = 6,
};
static void bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event);
static void bfa_fcs_lport_scn_sm_sending_scr(
struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event);
static void bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event);
static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event);
static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event);
/*
* Starting state - awaiting link up.
*/
static void
bfa_fcs_lport_scn_sm_offline(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event)
{
switch (event) {
case SCNSM_EVENT_PORT_ONLINE:
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
bfa_fcs_lport_scn_send_scr(scn, NULL);
break;
case SCNSM_EVENT_PORT_OFFLINE:
break;
default:
bfa_sm_fault(scn->port->fcs, event);
}
}
static void
bfa_fcs_lport_scn_sm_sending_scr(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event)
{
switch (event) {
case SCNSM_EVENT_SCR_SENT:
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr);
break;
case SCNSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
bfa_fcxp_walloc_cancel(scn->port->fcs->bfa, &scn->fcxp_wqe);
break;
default:
bfa_sm_fault(scn->port->fcs, event);
}
}
static void
bfa_fcs_lport_scn_sm_scr(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event)
{
struct bfa_fcs_lport_s *port = scn->port;
switch (event) {
case SCNSM_EVENT_RSP_OK:
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_online);
break;
case SCNSM_EVENT_RSP_ERROR:
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_scr_retry);
bfa_timer_start(port->fcs->bfa, &scn->timer,
bfa_fcs_lport_scn_timeout, scn,
BFA_FCS_RETRY_TIMEOUT);
break;
case SCNSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
bfa_fcxp_discard(scn->fcxp);
break;
default:
bfa_sm_fault(port->fcs, event);
}
}
static void
bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event)
{
switch (event) {
case SCNSM_EVENT_TIMEOUT:
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_sending_scr);
bfa_fcs_lport_scn_send_scr(scn, NULL);
break;
case SCNSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
bfa_timer_stop(&scn->timer);
break;
default:
bfa_sm_fault(scn->port->fcs, event);
}
}
static void
bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
enum port_scn_event event)
{
switch (event) {
case SCNSM_EVENT_PORT_OFFLINE:
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
break;
default:
bfa_sm_fault(scn->port->fcs, event);
}
}
/*
* fcs_scn_private FCS SCN private functions
*/
/*
* This routine will be called to send a SCR command.
*/
static void
bfa_fcs_lport_scn_send_scr(void *scn_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_lport_scn_s *scn = scn_cbarg;
struct bfa_fcs_lport_s *port = scn->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(port->fcs, port->pid);
bfa_trc(port->fcs, port->port_cfg.pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &scn->fcxp_wqe,
bfa_fcs_lport_scn_send_scr, scn);
return;
}
scn->fcxp = fcxp;
/* Handle VU registrations for Base port only */
if ((!port->vport) && bfa_ioc_get_fcmode(&port->fcs->bfa->ioc)) {
len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
port->fabric->lps->brcd_switch,
port->pid, 0);
} else {
len = fc_scr_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
BFA_FALSE,
port->pid, 0);
}
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs,
bfa_fcs_lport_scn_scr_response,
(void *)scn, FC_MAX_PDUSZ, FC_ELS_TOV);
bfa_sm_send_event(scn, SCNSM_EVENT_SCR_SENT);
}
static void
bfa_fcs_lport_scn_scr_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
void *cbarg, bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) cbarg;
struct bfa_fcs_lport_s *port = scn->port;
struct fc_els_cmd_s *els_cmd;
struct fc_ls_rjt_s *ls_rjt;
bfa_trc(port->fcs, port->port_cfg.pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(port->fcs, req_status);
bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
return;
}
els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp);
switch (els_cmd->els_code) {
case FC_ELS_ACC:
bfa_sm_send_event(scn, SCNSM_EVENT_RSP_OK);
break;
case FC_ELS_LS_RJT:
ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
bfa_trc(port->fcs, ls_rjt->reason_code);
bfa_trc(port->fcs, ls_rjt->reason_code_expl);
bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
break;
default:
bfa_sm_send_event(scn, SCNSM_EVENT_RSP_ERROR);
}
}
/*
* Send a LS Accept
*/
static void
bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port,
struct fchs_s *rx_fchs)
{
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
struct bfa_rport_s *bfa_rport = NULL;
int len;
bfa_trc(port->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
len = fc_ls_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id);
bfa_fcxp_send(fcxp, bfa_rport, port->fabric->vf_id, port->lp_tag,
BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
FC_MAX_PDUSZ, 0);
}
/*
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] vport - pointer to bfa_fcs_lport_t.
* param[out] vport_status - pointer to return vport status in
*
* return
* void
*
* Special Considerations:
*
* note
*/
static void
bfa_fcs_lport_scn_timeout(void *arg)
{
struct bfa_fcs_lport_scn_s *scn = (struct bfa_fcs_lport_scn_s *) arg;
bfa_sm_send_event(scn, SCNSM_EVENT_TIMEOUT);
}
/*
* fcs_scn_public FCS state change notification public interfaces
*/
/*
* Functions called by port/fab
*/
void
bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
scn->port = port;
bfa_sm_set_state(scn, bfa_fcs_lport_scn_sm_offline);
}
void
bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
scn->port = port;
bfa_sm_send_event(scn, SCNSM_EVENT_PORT_OFFLINE);
}
void
bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *port)
{
struct bfa_fcs_lport_scn_s *scn = BFA_FCS_GET_SCN_FROM_PORT(port);
scn->port = port;
bfa_sm_send_event(scn, SCNSM_EVENT_PORT_ONLINE);
}
static void
bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid)
{
struct bfa_fcs_rport_s *rport;
bfa_trc(port->fcs, rpid);
/*
* If this is an unknown device, then it just came online.
* Otherwise let rport handle the RSCN event.
*/
rport = bfa_fcs_lport_get_rport_by_pid(port, rpid);
if (rport == NULL) {
/*
* If min cfg mode is enabled, we donot need to
* discover any new rports.
*/
if (!__fcs_min_cfg(port->fcs))
rport = bfa_fcs_rport_create(port, rpid);
} else
bfa_fcs_rport_scn(rport);
}
/*
* rscn format based PID comparison
*/
#define __fc_pid_match(__c0, __c1, __fmt) \
(((__fmt) == FC_RSCN_FORMAT_FABRIC) || \
(((__fmt) == FC_RSCN_FORMAT_DOMAIN) && \
((__c0)[0] == (__c1)[0])) || \
(((__fmt) == FC_RSCN_FORMAT_AREA) && \
((__c0)[0] == (__c1)[0]) && \
((__c0)[1] == (__c1)[1])))
static void
bfa_fcs_lport_scn_multiport_rscn(struct bfa_fcs_lport_s *port,
enum fc_rscn_format format,
u32 rscn_pid)
{
struct bfa_fcs_rport_s *rport;
struct list_head *qe, *qe_next;
u8 *c0, *c1;
bfa_trc(port->fcs, format);
bfa_trc(port->fcs, rscn_pid);
c0 = (u8 *) &rscn_pid;
list_for_each_safe(qe, qe_next, &port->rport_q) {
rport = (struct bfa_fcs_rport_s *) qe;
c1 = (u8 *) &rport->pid;
if (__fc_pid_match(c0, c1, format))
bfa_fcs_rport_scn(rport);
}
}
void
bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
struct fchs_s *fchs, u32 len)
{
struct fc_rscn_pl_s *rscn = (struct fc_rscn_pl_s *) (fchs + 1);
int num_entries;
u32 rscn_pid;
bfa_boolean_t nsquery = BFA_FALSE, found;
int i = 0, j;
num_entries =
(be16_to_cpu(rscn->payldlen) -
sizeof(u32)) / sizeof(rscn->event[0]);
bfa_trc(port->fcs, num_entries);
port->stats.num_rscn++;
bfa_fcs_lport_scn_send_ls_acc(port, fchs);
for (i = 0; i < num_entries; i++) {
rscn_pid = rscn->event[i].portid;
bfa_trc(port->fcs, rscn->event[i].format);
bfa_trc(port->fcs, rscn_pid);
/* check for duplicate entries in the list */
found = BFA_FALSE;
for (j = 0; j < i; j++) {
if (rscn->event[j].portid == rscn_pid) {
found = BFA_TRUE;
break;
}
}
/* if found in down the list, pid has been already processed */
if (found) {
bfa_trc(port->fcs, rscn_pid);
continue;
}
switch (rscn->event[i].format) {
case FC_RSCN_FORMAT_PORTID:
if (rscn->event[i].qualifier == FC_QOS_RSCN_EVENT) {
/*
* Ignore this event.
* f/w would have processed it
*/
bfa_trc(port->fcs, rscn_pid);
} else {
port->stats.num_portid_rscn++;
bfa_fcs_lport_scn_portid_rscn(port, rscn_pid);
}
break;
case FC_RSCN_FORMAT_FABRIC:
if (rscn->event[i].qualifier ==
FC_FABRIC_NAME_RSCN_EVENT) {
bfa_fcs_lport_ms_fabric_rscn(port);
break;
}
/* !!!!!!!!! Fall Through !!!!!!!!!!!!! */
case FC_RSCN_FORMAT_AREA:
case FC_RSCN_FORMAT_DOMAIN:
nsquery = BFA_TRUE;
bfa_fcs_lport_scn_multiport_rscn(port,
rscn->event[i].format,
rscn_pid);
break;
default:
WARN_ON(1);
nsquery = BFA_TRUE;
}
}
/*
* If any of area, domain or fabric RSCN is received, do a fresh
* discovery to find new devices.
*/
if (nsquery)
bfa_fcs_lport_ns_query(port);
}
/*
* BFA FCS port
*/
/*
* fcs_port_api BFA FCS port API
*/
struct bfa_fcs_lport_s *
bfa_fcs_get_base_port(struct bfa_fcs_s *fcs)
{
return &fcs->fabric.bport;
}
wwn_t
bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn, int index,
int nrports, bfa_boolean_t bwwn)
{
struct list_head *qh, *qe;
struct bfa_fcs_rport_s *rport = NULL;
int i;
struct bfa_fcs_s *fcs;
if (port == NULL || nrports == 0)
return (wwn_t) 0;
fcs = port->fcs;
bfa_trc(fcs, (u32) nrports);
i = 0;
qh = &port->rport_q;
qe = bfa_q_first(qh);
while ((qe != qh) && (i < nrports)) {
rport = (struct bfa_fcs_rport_s *) qe;
if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
qe = bfa_q_next(qe);
bfa_trc(fcs, (u32) rport->pwwn);
bfa_trc(fcs, rport->pid);
bfa_trc(fcs, i);
continue;
}
if (bwwn) {
if (!memcmp(&wwn, &rport->pwwn, 8))
break;
} else {
if (i == index)
break;
}
i++;
qe = bfa_q_next(qe);
}
bfa_trc(fcs, i);
if (rport)
return rport->pwwn;
else
return (wwn_t) 0;
}
void
bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
wwn_t rport_wwns[], int *nrports)
{
struct list_head *qh, *qe;
struct bfa_fcs_rport_s *rport = NULL;
int i;
struct bfa_fcs_s *fcs;
if (port == NULL || rport_wwns == NULL || *nrports == 0)
return;
fcs = port->fcs;
bfa_trc(fcs, (u32) *nrports);
i = 0;
qh = &port->rport_q;
qe = bfa_q_first(qh);
while ((qe != qh) && (i < *nrports)) {
rport = (struct bfa_fcs_rport_s *) qe;
if (bfa_ntoh3b(rport->pid) > 0xFFF000) {
qe = bfa_q_next(qe);
bfa_trc(fcs, (u32) rport->pwwn);
bfa_trc(fcs, rport->pid);
bfa_trc(fcs, i);
continue;
}
rport_wwns[i] = rport->pwwn;
i++;
qe = bfa_q_next(qe);
}
bfa_trc(fcs, i);
*nrports = i;
}
/*
* Iterate's through all the rport's in the given port to
* determine the maximum operating speed.
*
* !!!! To be used in TRL Functionality only !!!!
*/
bfa_port_speed_t
bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
{
struct list_head *qh, *qe;
struct bfa_fcs_rport_s *rport = NULL;
struct bfa_fcs_s *fcs;
bfa_port_speed_t max_speed = 0;
struct bfa_port_attr_s port_attr;
bfa_port_speed_t port_speed, rport_speed;
bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
if (port == NULL)
return 0;
fcs = port->fcs;
/* Get Physical port's current speed */
bfa_fcport_get_attr(port->fcs->bfa, &port_attr);
port_speed = port_attr.speed;
bfa_trc(fcs, port_speed);
qh = &port->rport_q;
qe = bfa_q_first(qh);
while (qe != qh) {
rport = (struct bfa_fcs_rport_s *) qe;
if ((bfa_ntoh3b(rport->pid) > 0xFFF000) ||
(bfa_fcs_rport_get_state(rport) == BFA_RPORT_OFFLINE) ||
(rport->scsi_function != BFA_RPORT_TARGET)) {
qe = bfa_q_next(qe);
continue;
}
rport_speed = rport->rpf.rpsc_speed;
if ((trl_enabled) && (rport_speed ==
BFA_PORT_SPEED_UNKNOWN)) {
/* Use default ratelim speed setting */
rport_speed =
bfa_fcport_get_ratelim_speed(port->fcs->bfa);
}
if (rport_speed > max_speed)
max_speed = rport_speed;
qe = bfa_q_next(qe);
}
if (max_speed > port_speed)
max_speed = port_speed;
bfa_trc(fcs, max_speed);
return max_speed;
}
struct bfa_fcs_lport_s *
bfa_fcs_lookup_port(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t lpwwn)
{
struct bfa_fcs_vport_s *vport;
bfa_fcs_vf_t *vf;
WARN_ON(fcs == NULL);
vf = bfa_fcs_vf_lookup(fcs, vf_id);
if (vf == NULL) {
bfa_trc(fcs, vf_id);
return NULL;
}
if (!lpwwn || (vf->bport.port_cfg.pwwn == lpwwn))
return &vf->bport;
vport = bfa_fcs_fabric_vport_lookup(vf, lpwwn);
if (vport)
return &vport->lport;
return NULL;
}
/*
* API corresponding to NPIV_VPORT_GETINFO.
*/
void
bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
struct bfa_lport_info_s *port_info)
{
bfa_trc(port->fcs, port->fabric->fabric_name);
if (port->vport == NULL) {
/*
* This is a Physical port
*/
port_info->port_type = BFA_LPORT_TYPE_PHYSICAL;
/*
* @todo : need to fix the state & reason
*/
port_info->port_state = 0;
port_info->offline_reason = 0;
port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
port_info->max_vports_supp =
bfa_lps_get_max_vport(port->fcs->bfa);
port_info->num_vports_inuse =
port->fabric->num_vports;
port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
port_info->num_rports_inuse = port->num_rports;
} else {
/*
* This is a virtual port
*/
port_info->port_type = BFA_LPORT_TYPE_VIRTUAL;
/*
* @todo : need to fix the state & reason
*/
port_info->port_state = 0;
port_info->offline_reason = 0;
port_info->port_wwn = bfa_fcs_lport_get_pwwn(port);
port_info->node_wwn = bfa_fcs_lport_get_nwwn(port);
}
}
void
bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
struct bfa_lport_stats_s *port_stats)
{
*port_stats = fcs_port->stats;
}
void
bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
{
memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
}
/*
* FCS virtual port state machine
*/
#define __vport_fcs(__vp) ((__vp)->lport.fcs)
#define __vport_pwwn(__vp) ((__vp)->lport.port_cfg.pwwn)
#define __vport_nwwn(__vp) ((__vp)->lport.port_cfg.nwwn)
#define __vport_bfa(__vp) ((__vp)->lport.fcs->bfa)
#define __vport_fcid(__vp) ((__vp)->lport.pid)
#define __vport_fabric(__vp) ((__vp)->lport.fabric)
#define __vport_vfid(__vp) ((__vp)->lport.fabric->vf_id)
#define BFA_FCS_VPORT_MAX_RETRIES 5
/*
* Forward declarations
*/
static void bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport);
static void bfa_fcs_vport_timeout(void *vport_arg);
static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
/*
* fcs_vport_sm FCS virtual port state machine
*/
/*
* VPort State Machine events
*/
enum bfa_fcs_vport_event {
BFA_FCS_VPORT_SM_CREATE = 1, /* vport create event */
BFA_FCS_VPORT_SM_DELETE = 2, /* vport delete event */
BFA_FCS_VPORT_SM_START = 3, /* vport start request */
BFA_FCS_VPORT_SM_STOP = 4, /* stop: unsupported */
BFA_FCS_VPORT_SM_ONLINE = 5, /* fabric online */
BFA_FCS_VPORT_SM_OFFLINE = 6, /* fabric offline event */
BFA_FCS_VPORT_SM_FRMSENT = 7, /* fdisc/logo sent events */
BFA_FCS_VPORT_SM_RSP_OK = 8, /* good response */
BFA_FCS_VPORT_SM_RSP_ERROR = 9, /* error/bad response */
BFA_FCS_VPORT_SM_TIMEOUT = 10, /* delay timer event */
BFA_FCS_VPORT_SM_DELCOMP = 11, /* lport delete completion */
BFA_FCS_VPORT_SM_RSP_DUP_WWN = 12, /* Dup wnn error*/
BFA_FCS_VPORT_SM_RSP_FAILED = 13, /* non-retryable failure */
BFA_FCS_VPORT_SM_STOPCOMP = 14, /* vport delete completion */
};
static void bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static void bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event);
static struct bfa_sm_table_s vport_sm_table[] = {
{BFA_SM(bfa_fcs_vport_sm_uninit), BFA_FCS_VPORT_UNINIT},
{BFA_SM(bfa_fcs_vport_sm_created), BFA_FCS_VPORT_CREATED},
{BFA_SM(bfa_fcs_vport_sm_offline), BFA_FCS_VPORT_OFFLINE},
{BFA_SM(bfa_fcs_vport_sm_fdisc), BFA_FCS_VPORT_FDISC},
{BFA_SM(bfa_fcs_vport_sm_fdisc_retry), BFA_FCS_VPORT_FDISC_RETRY},
{BFA_SM(bfa_fcs_vport_sm_fdisc_rsp_wait), BFA_FCS_VPORT_FDISC_RSP_WAIT},
{BFA_SM(bfa_fcs_vport_sm_online), BFA_FCS_VPORT_ONLINE},
{BFA_SM(bfa_fcs_vport_sm_deleting), BFA_FCS_VPORT_DELETING},
{BFA_SM(bfa_fcs_vport_sm_cleanup), BFA_FCS_VPORT_CLEANUP},
{BFA_SM(bfa_fcs_vport_sm_logo), BFA_FCS_VPORT_LOGO},
{BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
};
/*
* Beginning state.
*/
static void
bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_CREATE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
bfa_fcs_fabric_addvport(__vport_fabric(vport), vport);
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* Created state - a start event is required to start up the state machine.
*/
static void
bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_START:
if (bfa_sm_cmp_state(__vport_fabric(vport),
bfa_fcs_fabric_sm_online)
&& bfa_fcs_fabric_npiv_capable(__vport_fabric(vport))) {
bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
bfa_fcs_vport_do_fdisc(vport);
} else {
/*
* Fabric is offline or not NPIV capable, stay in
* offline state.
*/
vport->vport_stats.fab_no_npiv++;
bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
}
break;
case BFA_FCS_VPORT_SM_DELETE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
bfa_fcs_lport_delete(&vport->lport);
break;
case BFA_FCS_VPORT_SM_ONLINE:
case BFA_FCS_VPORT_SM_OFFLINE:
/*
* Ignore ONLINE/OFFLINE events from fabric
* till vport is started.
*/
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* Offline state - awaiting ONLINE event from fabric SM.
*/
static void
bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_DELETE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
bfa_fcs_lport_delete(&vport->lport);
break;
case BFA_FCS_VPORT_SM_ONLINE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
vport->fdisc_retries = 0;
bfa_fcs_vport_do_fdisc(vport);
break;
case BFA_FCS_VPORT_SM_STOP:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
break;
case BFA_FCS_VPORT_SM_OFFLINE:
/*
* This can happen if the vport couldn't be initialzied
* due the fact that the npiv was not enabled on the switch.
* In that case we will put the vport in offline state.
* However, the link can go down and cause the this event to
* be sent when we are already offline. Ignore it.
*/
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* FDISC is sent and awaiting reply from fabric.
*/
static void
bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_DELETE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_rsp_wait);
break;
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
break;
case BFA_FCS_VPORT_SM_RSP_OK:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_online);
bfa_fcs_lport_online(&vport->lport);
break;
case BFA_FCS_VPORT_SM_RSP_ERROR:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc_retry);
bfa_timer_start(__vport_bfa(vport), &vport->timer,
bfa_fcs_vport_timeout, vport,
BFA_FCS_RETRY_TIMEOUT);
break;
case BFA_FCS_VPORT_SM_RSP_FAILED:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
break;
case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_error);
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* FDISC attempt failed - a timer is active to retry FDISC.
*/
static void
bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_DELETE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
bfa_timer_stop(&vport->timer);
bfa_fcs_lport_delete(&vport->lport);
break;
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
bfa_timer_stop(&vport->timer);
break;
case BFA_FCS_VPORT_SM_TIMEOUT:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
vport->vport_stats.fdisc_retries++;
vport->fdisc_retries++;
bfa_fcs_vport_do_fdisc(vport);
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* FDISC is in progress and we got a vport delete request -
* this is a wait state while we wait for fdisc response and
* we will transition to the appropriate state - on rsp status.
*/
static void
bfa_fcs_vport_sm_fdisc_rsp_wait(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_RSP_OK:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
bfa_fcs_lport_delete(&vport->lport);
break;
case BFA_FCS_VPORT_SM_DELETE:
break;
case BFA_FCS_VPORT_SM_OFFLINE:
case BFA_FCS_VPORT_SM_RSP_ERROR:
case BFA_FCS_VPORT_SM_RSP_FAILED:
case BFA_FCS_VPORT_SM_RSP_DUP_WWN:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
bfa_fcs_lport_delete(&vport->lport);
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* Vport is online (FDISC is complete).
*/
static void
bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_DELETE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_deleting);
bfa_fcs_lport_delete(&vport->lport);
break;
case BFA_FCS_VPORT_SM_STOP:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_stopping);
bfa_sm_send_event(&vport->lport, BFA_FCS_PORT_SM_STOP);
break;
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_offline);
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
bfa_fcs_lport_offline(&vport->lport);
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* Vport is being stopped - awaiting lport stop completion to send
* LOGO to fabric.
*/
static void
bfa_fcs_vport_sm_stopping(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_STOPCOMP:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo_for_stop);
bfa_fcs_vport_do_logo(vport);
break;
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* Vport is being deleted - awaiting lport delete completion to send
* LOGO to fabric.
*/
static void
bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_DELETE:
break;
case BFA_FCS_VPORT_SM_DELCOMP:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_logo);
bfa_fcs_vport_do_logo(vport);
break;
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* Error State.
* This state will be set when the Vport Creation fails due
* to errors like Dup WWN. In this state only operation allowed
* is a Vport Delete.
*/
static void
bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_DELETE:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_cleanup);
bfa_fcs_lport_delete(&vport->lport);
break;
default:
bfa_trc(__vport_fcs(vport), event);
}
}
/*
* Lport cleanup is in progress since vport is being deleted. Fabric is
* offline, so no LOGO is needed to complete vport deletion.
*/
static void
bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_DELCOMP:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
bfa_fcs_vport_free(vport);
break;
case BFA_FCS_VPORT_SM_STOPCOMP:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
break;
case BFA_FCS_VPORT_SM_DELETE:
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* LOGO is sent to fabric. Vport stop is in progress. Lport stop cleanup
* is done.
*/
static void
bfa_fcs_vport_sm_logo_for_stop(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
/*
* !!! fall through !!!
*/
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_created);
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
* is done.
*/
static void
bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport,
enum bfa_fcs_vport_event event)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), event);
switch (event) {
case BFA_FCS_VPORT_SM_OFFLINE:
bfa_sm_send_event(vport->lps, BFA_LPS_SM_OFFLINE);
/*
* !!! fall through !!!
*/
case BFA_FCS_VPORT_SM_RSP_OK:
case BFA_FCS_VPORT_SM_RSP_ERROR:
bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
bfa_fcs_vport_free(vport);
break;
case BFA_FCS_VPORT_SM_DELETE:
break;
default:
bfa_sm_fault(__vport_fcs(vport), event);
}
}
/*
* fcs_vport_private FCS virtual port private functions
*/
/*
* Send AEN notification
*/
static void
bfa_fcs_vport_aen_post(struct bfa_fcs_lport_s *port,
enum bfa_lport_aen_event event)
{
struct bfad_s *bfad = (struct bfad_s *)port->fabric->fcs->bfad;
struct bfa_aen_entry_s *aen_entry;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
aen_entry->aen_data.lport.vf_id = port->fabric->vf_id;
aen_entry->aen_data.lport.roles = port->port_cfg.roles;
aen_entry->aen_data.lport.ppwwn = bfa_fcs_lport_get_pwwn(
bfa_fcs_get_base_port(port->fcs));
aen_entry->aen_data.lport.lpwwn = bfa_fcs_lport_get_pwwn(port);
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++port->fcs->fcs_aen_seq,
BFA_AEN_CAT_LPORT, event);
}
/*
* This routine will be called to send a FDISC command.
*/
static void
bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
{
bfa_lps_fdisc(vport->lps, vport,
bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
__vport_pwwn(vport), __vport_nwwn(vport));
vport->vport_stats.fdisc_sent++;
}
static void
bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport)
{
u8 lsrjt_rsn = vport->lps->lsrjt_rsn;
u8 lsrjt_expl = vport->lps->lsrjt_expl;
bfa_trc(__vport_fcs(vport), lsrjt_rsn);
bfa_trc(__vport_fcs(vport), lsrjt_expl);
/* For certain reason codes, we don't want to retry. */
switch (vport->lps->lsrjt_expl) {
case FC_LS_RJT_EXP_INV_PORT_NAME: /* by brocade */
case FC_LS_RJT_EXP_INVALID_NPORT_ID: /* by Cisco */
if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
else {
bfa_fcs_vport_aen_post(&vport->lport,
BFA_LPORT_AEN_NPIV_DUP_WWN);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_DUP_WWN);
}
break;
case FC_LS_RJT_EXP_INSUFF_RES:
/*
* This means max logins per port/switch setting on the
* switch was exceeded.
*/
if (vport->fdisc_retries < BFA_FCS_VPORT_MAX_RETRIES)
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
else {
bfa_fcs_vport_aen_post(&vport->lport,
BFA_LPORT_AEN_NPIV_FABRIC_MAX);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_FAILED);
}
break;
default:
if (vport->fdisc_retries == 0)
bfa_fcs_vport_aen_post(&vport->lport,
BFA_LPORT_AEN_NPIV_UNKNOWN);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
}
}
/*
* Called to send a logout to the fabric. Used when a V-Port is
* deleted/stopped.
*/
static void
bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport)
{
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
vport->vport_stats.logo_sent++;
bfa_lps_fdisclogo(vport->lps);
}
/*
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] vport - pointer to bfa_fcs_vport_t.
* param[out] vport_status - pointer to return vport status in
*
* return
* void
*
* Special Considerations:
*
* note
*/
static void
bfa_fcs_vport_timeout(void *vport_arg)
{
struct bfa_fcs_vport_s *vport = (struct bfa_fcs_vport_s *) vport_arg;
vport->vport_stats.fdisc_timeouts++;
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_TIMEOUT);
}
static void
bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
{
struct bfad_vport_s *vport_drv =
(struct bfad_vport_s *)vport->vport_drv;
bfa_fcs_fabric_delvport(__vport_fabric(vport), vport);
if (vport_drv->comp_del)
complete(vport_drv->comp_del);
else
kfree(vport_drv);
bfa_lps_delete(vport->lps);
}
/*
* fcs_vport_public FCS virtual port public interfaces
*/
/*
* Online notification from fabric SM.
*/
void
bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport)
{
vport->vport_stats.fab_online++;
if (bfa_fcs_fabric_npiv_capable(__vport_fabric(vport)))
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
else
vport->vport_stats.fab_no_npiv++;
}
/*
* Offline notification from fabric SM.
*/
void
bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport)
{
vport->vport_stats.fab_offline++;
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
}
/*
* Cleanup notification from fabric SM on link timer expiry.
*/
void
bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport)
{
vport->vport_stats.fab_cleanup++;
}
/*
* delete notification from fabric SM. To be invoked from within FCS.
*/
void
bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport)
{
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
}
/*
* Stop completion callback from associated lport
*/
void
bfa_fcs_vport_stop_comp(struct bfa_fcs_vport_s *vport)
{
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOPCOMP);
}
/*
* Delete completion callback from associated lport
*/
void
bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport)
{
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELCOMP);
}
/*
* fcs_vport_api Virtual port API
*/
/*
* Use this function to instantiate a new FCS vport object. This
* function will not trigger any HW initialization process (which will be
* done in vport_start() call)
*
* param[in] vport - pointer to bfa_fcs_vport_t. This space
* needs to be allocated by the driver.
* param[in] fcs - FCS instance
* param[in] vport_cfg - vport configuration
* param[in] vf_id - VF_ID if vport is created within a VF.
* FC_VF_ID_NULL to specify base fabric.
* param[in] vport_drv - Opaque handle back to the driver's vport
* structure
*
* retval BFA_STATUS_OK - on success.
* retval BFA_STATUS_FAILED - on failure.
*/
bfa_status_t
bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
struct bfad_vport_s *vport_drv)
{
if (vport_cfg->pwwn == 0)
return BFA_STATUS_INVALID_WWN;
if (bfa_fcs_lport_get_pwwn(&fcs->fabric.bport) == vport_cfg->pwwn)
return BFA_STATUS_VPORT_WWN_BP;
if (bfa_fcs_vport_lookup(fcs, vf_id, vport_cfg->pwwn) != NULL)
return BFA_STATUS_VPORT_EXISTS;
if (fcs->fabric.num_vports ==
bfa_lps_get_max_vport(fcs->bfa))
return BFA_STATUS_VPORT_MAX;
vport->lps = bfa_lps_alloc(fcs->bfa);
if (!vport->lps)
return BFA_STATUS_VPORT_MAX;
vport->vport_drv = vport_drv;
vport_cfg->preboot_vp = BFA_FALSE;
bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
bfa_fcs_lport_init(&vport->lport, vport_cfg);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
return BFA_STATUS_OK;
}
/*
* Use this function to instantiate a new FCS PBC vport object. This
* function will not trigger any HW initialization process (which will be
* done in vport_start() call)
*
* param[in] vport - pointer to bfa_fcs_vport_t. This space
* needs to be allocated by the driver.
* param[in] fcs - FCS instance
* param[in] vport_cfg - vport configuration
* param[in] vf_id - VF_ID if vport is created within a VF.
* FC_VF_ID_NULL to specify base fabric.
* param[in] vport_drv - Opaque handle back to the driver's vport
* structure
*
* retval BFA_STATUS_OK - on success.
* retval BFA_STATUS_FAILED - on failure.
*/
bfa_status_t
bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs,
u16 vf_id, struct bfa_lport_cfg_s *vport_cfg,
struct bfad_vport_s *vport_drv)
{
bfa_status_t rc;
rc = bfa_fcs_vport_create(vport, fcs, vf_id, vport_cfg, vport_drv);
vport->lport.port_cfg.preboot_vp = BFA_TRUE;
return rc;
}
/*
* Use this function to findout if this is a pbc vport or not.
*
* @param[in] vport - pointer to bfa_fcs_vport_t.
*
* @returns None
*/
bfa_boolean_t
bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport)
{
if (vport && (vport->lport.port_cfg.preboot_vp == BFA_TRUE))
return BFA_TRUE;
else
return BFA_FALSE;
}
/*
* Use this function initialize the vport.
*
* @param[in] vport - pointer to bfa_fcs_vport_t.
*
* @returns None
*/
bfa_status_t
bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport)
{
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_START);
return BFA_STATUS_OK;
}
/*
* Use this function quiese the vport object. This function will return
* immediately, when the vport is actually stopped, the
* bfa_drv_vport_stop_cb() will be called.
*
* param[in] vport - pointer to bfa_fcs_vport_t.
*
* return None
*/
bfa_status_t
bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport)
{
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_STOP);
return BFA_STATUS_OK;
}
/*
* Use this function to delete a vport object. Fabric object should
* be stopped before this function call.
*
* !!!!!!! Donot invoke this from within FCS !!!!!!!
*
* param[in] vport - pointer to bfa_fcs_vport_t.
*
* return None
*/
bfa_status_t
bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport)
{
if (vport->lport.port_cfg.preboot_vp)
return BFA_STATUS_PBC;
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
return BFA_STATUS_OK;
}
/*
* Use this function to get vport's current status info.
*
* param[in] vport pointer to bfa_fcs_vport_t.
* param[out] attr pointer to return vport attributes
*
* return None
*/
void
bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
struct bfa_vport_attr_s *attr)
{
if (vport == NULL || attr == NULL)
return;
memset(attr, 0, sizeof(struct bfa_vport_attr_s));
bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
}
/*
* Lookup a virtual port. Excludes base port from lookup.
*/
struct bfa_fcs_vport_s *
bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn)
{
struct bfa_fcs_vport_s *vport;
struct bfa_fcs_fabric_s *fabric;
bfa_trc(fcs, vf_id);
bfa_trc(fcs, vpwwn);
fabric = bfa_fcs_vf_lookup(fcs, vf_id);
if (!fabric) {
bfa_trc(fcs, vf_id);
return NULL;
}
vport = bfa_fcs_fabric_vport_lookup(fabric, vpwwn);
return vport;
}
/*
* FDISC Response
*/
void
bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status)
{
struct bfa_fcs_vport_s *vport = uarg;
bfa_trc(__vport_fcs(vport), __vport_pwwn(vport));
bfa_trc(__vport_fcs(vport), status);
switch (status) {
case BFA_STATUS_OK:
/*
* Initialize the V-Port fields
*/
__vport_fcid(vport) = vport->lps->lp_pid;
vport->vport_stats.fdisc_accepts++;
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
break;
case BFA_STATUS_INVALID_MAC:
/* Only for CNA */
vport->vport_stats.fdisc_acc_bad++;
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
break;
case BFA_STATUS_EPROTOCOL:
switch (vport->lps->ext_status) {
case BFA_EPROTO_BAD_ACCEPT:
vport->vport_stats.fdisc_acc_bad++;
break;
case BFA_EPROTO_UNKNOWN_RSP:
vport->vport_stats.fdisc_unknown_rsp++;
break;
default:
break;
}
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
break;
case BFA_STATUS_FABRIC_RJT:
vport->vport_stats.fdisc_rejects++;
bfa_fcs_vport_fdisc_rejected(vport);
break;
default:
vport->vport_stats.fdisc_rsp_err++;
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_ERROR);
}
}
/*
* LOGO response
*/
void
bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg)
{
struct bfa_fcs_vport_s *vport = uarg;
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
}
/*
* Received clear virtual link
*/
void
bfa_cb_lps_cvl_event(void *bfad, void *uarg)
{
struct bfa_fcs_vport_s *vport = uarg;
/* Send an Offline followed by an ONLINE */
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
}
| gpl-2.0 |
SaberMod/android_kernel_lge_hammerhead | drivers/scsi/pm8001/pm8001_init.c | 4803 | 26040 | /*
* PMC-Sierra SPC 8001 SAS/SATA based host adapters driver
*
* Copyright (c) 2008-2009 USI Co., Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*
*/
#include <linux/slab.h>
#include "pm8001_sas.h"
#include "pm8001_chips.h"
static struct scsi_transport_template *pm8001_stt;
static const struct pm8001_chip_info pm8001_chips[] = {
[chip_8001] = { 8, &pm8001_8001_dispatch,},
};
static int pm8001_id;
LIST_HEAD(hba_list);
struct workqueue_struct *pm8001_wq;
/**
* The main structure which LLDD must register for scsi core.
*/
static struct scsi_host_template pm8001_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = pm8001_scan_finished,
.scan_start = pm8001_scan_start,
.change_queue_depth = sas_change_queue_depth,
.change_queue_type = sas_change_queue_type,
.bios_param = sas_bios_param,
.can_queue = 1,
.cmd_per_lun = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
.eh_device_reset_handler = sas_eh_device_reset_handler,
.eh_bus_reset_handler = sas_eh_bus_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = pm8001_host_attrs,
};
/**
* Sas layer call this function to execute specific task.
*/
static struct sas_domain_function_template pm8001_transport_ops = {
.lldd_dev_found = pm8001_dev_found,
.lldd_dev_gone = pm8001_dev_gone,
.lldd_execute_task = pm8001_queue_command,
.lldd_control_phy = pm8001_phy_control,
.lldd_abort_task = pm8001_abort_task,
.lldd_abort_task_set = pm8001_abort_task_set,
.lldd_clear_aca = pm8001_clear_aca,
.lldd_clear_task_set = pm8001_clear_task_set,
.lldd_I_T_nexus_reset = pm8001_I_T_nexus_reset,
.lldd_lu_reset = pm8001_lu_reset,
.lldd_query_task = pm8001_query_task,
};
/**
*pm8001_phy_init - initiate our adapter phys
*@pm8001_ha: our hba structure.
*@phy_id: phy id.
*/
static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
int phy_id)
{
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
phy->phy_state = 0;
phy->pm8001_ha = pm8001_ha;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
sas_phy->id = phy_id;
sas_phy->sas_addr = &pm8001_ha->sas_addr[0];
sas_phy->frame_rcvd = &phy->frame_rcvd[0];
sas_phy->ha = (struct sas_ha_struct *)pm8001_ha->shost->hostdata;
sas_phy->lldd_phy = phy;
}
/**
*pm8001_free - free hba
*@pm8001_ha: our hba structure.
*
*/
static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
{
int i;
if (!pm8001_ha)
return;
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
pci_free_consistent(pm8001_ha->pdev,
pm8001_ha->memoryMap.region[i].element_size,
pm8001_ha->memoryMap.region[i].virt_ptr,
pm8001_ha->memoryMap.region[i].phys_addr);
}
}
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
if (pm8001_ha->shost)
scsi_host_put(pm8001_ha->shost);
flush_workqueue(pm8001_wq);
kfree(pm8001_ha->tags);
kfree(pm8001_ha);
}
#ifdef PM8001_USE_TASKLET
static void pm8001_tasklet(unsigned long opaque)
{
struct pm8001_hba_info *pm8001_ha;
pm8001_ha = (struct pm8001_hba_info *)opaque;
if (unlikely(!pm8001_ha))
BUG_ON(1);
PM8001_CHIP_DISP->isr(pm8001_ha);
}
#endif
/**
* pm8001_interrupt - when HBA originate a interrupt,we should invoke this
* dispatcher to handle each case.
* @irq: irq number.
* @opaque: the passed general host adapter struct
*/
static irqreturn_t pm8001_interrupt(int irq, void *opaque)
{
struct pm8001_hba_info *pm8001_ha;
irqreturn_t ret = IRQ_HANDLED;
struct sas_ha_struct *sha = opaque;
pm8001_ha = sha->lldd_ha;
if (unlikely(!pm8001_ha))
return IRQ_NONE;
if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))
return IRQ_NONE;
#ifdef PM8001_USE_TASKLET
tasklet_schedule(&pm8001_ha->tasklet);
#else
ret = PM8001_CHIP_DISP->isr(pm8001_ha);
#endif
return ret;
}
/**
* pm8001_alloc - initiate our hba structure and 6 DMAs area.
* @pm8001_ha:our hba structure.
*
*/
static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
{
int i;
spin_lock_init(&pm8001_ha->lock);
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
pm8001_phy_init(pm8001_ha, i);
pm8001_ha->port[i].wide_port_phymap = 0;
pm8001_ha->port[i].port_attached = 0;
pm8001_ha->port[i].port_state = 0;
INIT_LIST_HEAD(&pm8001_ha->port[i].list);
}
pm8001_ha->tags = kzalloc(PM8001_MAX_CCB, GFP_KERNEL);
if (!pm8001_ha->tags)
goto err_out;
/* MPI Memory region 1 for AAP Event Log for fw */
pm8001_ha->memoryMap.region[AAP1].num_elements = 1;
pm8001_ha->memoryMap.region[AAP1].element_size = PM8001_EVENT_LOG_SIZE;
pm8001_ha->memoryMap.region[AAP1].total_len = PM8001_EVENT_LOG_SIZE;
pm8001_ha->memoryMap.region[AAP1].alignment = 32;
/* MPI Memory region 2 for IOP Event Log for fw */
pm8001_ha->memoryMap.region[IOP].num_elements = 1;
pm8001_ha->memoryMap.region[IOP].element_size = PM8001_EVENT_LOG_SIZE;
pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE;
pm8001_ha->memoryMap.region[IOP].alignment = 32;
/* MPI Memory region 3 for consumer Index of inbound queues */
pm8001_ha->memoryMap.region[CI].num_elements = 1;
pm8001_ha->memoryMap.region[CI].element_size = 4;
pm8001_ha->memoryMap.region[CI].total_len = 4;
pm8001_ha->memoryMap.region[CI].alignment = 4;
/* MPI Memory region 4 for producer Index of outbound queues */
pm8001_ha->memoryMap.region[PI].num_elements = 1;
pm8001_ha->memoryMap.region[PI].element_size = 4;
pm8001_ha->memoryMap.region[PI].total_len = 4;
pm8001_ha->memoryMap.region[PI].alignment = 4;
/* MPI Memory region 5 inbound queues */
pm8001_ha->memoryMap.region[IB].num_elements = 256;
pm8001_ha->memoryMap.region[IB].element_size = 64;
pm8001_ha->memoryMap.region[IB].total_len = 256 * 64;
pm8001_ha->memoryMap.region[IB].alignment = 64;
/* MPI Memory region 6 inbound queues */
pm8001_ha->memoryMap.region[OB].num_elements = 256;
pm8001_ha->memoryMap.region[OB].element_size = 64;
pm8001_ha->memoryMap.region[OB].total_len = 256 * 64;
pm8001_ha->memoryMap.region[OB].alignment = 64;
/* Memory region write DMA*/
pm8001_ha->memoryMap.region[NVMD].num_elements = 1;
pm8001_ha->memoryMap.region[NVMD].element_size = 4096;
pm8001_ha->memoryMap.region[NVMD].total_len = 4096;
/* Memory region for devices*/
pm8001_ha->memoryMap.region[DEV_MEM].num_elements = 1;
pm8001_ha->memoryMap.region[DEV_MEM].element_size = PM8001_MAX_DEVICES *
sizeof(struct pm8001_device);
pm8001_ha->memoryMap.region[DEV_MEM].total_len = PM8001_MAX_DEVICES *
sizeof(struct pm8001_device);
/* Memory region for ccb_info*/
pm8001_ha->memoryMap.region[CCB_MEM].num_elements = 1;
pm8001_ha->memoryMap.region[CCB_MEM].element_size = PM8001_MAX_CCB *
sizeof(struct pm8001_ccb_info);
pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB *
sizeof(struct pm8001_ccb_info);
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_mem_alloc(pm8001_ha->pdev,
&pm8001_ha->memoryMap.region[i].virt_ptr,
&pm8001_ha->memoryMap.region[i].phys_addr,
&pm8001_ha->memoryMap.region[i].phys_addr_hi,
&pm8001_ha->memoryMap.region[i].phys_addr_lo,
pm8001_ha->memoryMap.region[i].total_len,
pm8001_ha->memoryMap.region[i].alignment) != 0) {
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Mem%d alloc failed\n",
i));
goto err_out;
}
}
pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr;
for (i = 0; i < PM8001_MAX_DEVICES; i++) {
pm8001_ha->devices[i].dev_type = NO_DEVICE;
pm8001_ha->devices[i].id = i;
pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES;
pm8001_ha->devices[i].running_req = 0;
}
pm8001_ha->ccb_info = pm8001_ha->memoryMap.region[CCB_MEM].virt_ptr;
for (i = 0; i < PM8001_MAX_CCB; i++) {
pm8001_ha->ccb_info[i].ccb_dma_handle =
pm8001_ha->memoryMap.region[CCB_MEM].phys_addr +
i * sizeof(struct pm8001_ccb_info);
pm8001_ha->ccb_info[i].task = NULL;
pm8001_ha->ccb_info[i].ccb_tag = 0xffffffff;
pm8001_ha->ccb_info[i].device = NULL;
++pm8001_ha->tags_num;
}
pm8001_ha->flags = PM8001F_INIT_TIME;
/* Initialize tags */
pm8001_tag_init(pm8001_ha);
return 0;
err_out:
return 1;
}
/**
* pm8001_ioremap - remap the pci high physical address to kernal virtual
* address so that we can access them.
* @pm8001_ha:our hba structure.
*/
static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
{
u32 bar;
u32 logicalBar = 0;
struct pci_dev *pdev;
pdev = pm8001_ha->pdev;
/* map pci mem (PMC pci base 0-3)*/
for (bar = 0; bar < 6; bar++) {
/*
** logical BARs for SPC:
** bar 0 and 1 - logical BAR0
** bar 2 and 3 - logical BAR1
** bar4 - logical BAR2
** bar5 - logical BAR3
** Skip the appropriate assignments:
*/
if ((bar == 1) || (bar == 3))
continue;
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
pm8001_ha->io_mem[logicalBar].membase =
pci_resource_start(pdev, bar);
pm8001_ha->io_mem[logicalBar].membase &=
(u32)PCI_BASE_ADDRESS_MEM_MASK;
pm8001_ha->io_mem[logicalBar].memsize =
pci_resource_len(pdev, bar);
pm8001_ha->io_mem[logicalBar].memvirtaddr =
ioremap(pm8001_ha->io_mem[logicalBar].membase,
pm8001_ha->io_mem[logicalBar].memsize);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("PCI: bar %d, logicalBar %d "
"virt_addr=%lx,len=%d\n", bar, logicalBar,
(unsigned long)
pm8001_ha->io_mem[logicalBar].memvirtaddr,
pm8001_ha->io_mem[logicalBar].memsize));
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
pm8001_ha->io_mem[logicalBar].memsize = 0;
pm8001_ha->io_mem[logicalBar].memvirtaddr = 0;
}
logicalBar++;
}
return 0;
}
/**
* pm8001_pci_alloc - initialize our ha card structure
* @pdev: pci device.
* @ent: ent
* @shost: scsi host struct which has been initialized before.
*/
static struct pm8001_hba_info *__devinit
pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
{
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
pm8001_ha = sha->lldd_ha;
if (!pm8001_ha)
return NULL;
pm8001_ha->pdev = pdev;
pm8001_ha->dev = &pdev->dev;
pm8001_ha->chip_id = chip_id;
pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id];
pm8001_ha->irq = pdev->irq;
pm8001_ha->sas = sha;
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
pm8001_ha->logging_level = 0x01;
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
#ifdef PM8001_USE_TASKLET
tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
(unsigned long)pm8001_ha);
#endif
pm8001_ioremap(pm8001_ha);
if (!pm8001_alloc(pm8001_ha))
return pm8001_ha;
pm8001_free(pm8001_ha);
return NULL;
}
/**
* pci_go_44 - pm8001 specified, its DMA is 44 bit rather than 64 bit
* @pdev: pci device.
*/
static int pci_go_44(struct pci_dev *pdev)
{
int rc;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"44-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
}
return rc;
}
/**
* pm8001_prep_sas_ha_init - allocate memory in general hba struct && init them.
* @shost: scsi host which has been allocated outside.
* @chip_info: our ha struct.
*/
static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost,
const struct pm8001_chip_info *chip_info)
{
int phy_nr, port_nr;
struct asd_sas_phy **arr_phy;
struct asd_sas_port **arr_port;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
phy_nr = chip_info->n_phy;
port_nr = phy_nr;
memset(sha, 0x00, sizeof(*sha));
arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
if (!arr_phy)
goto exit;
arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
if (!arr_port)
goto exit_free2;
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
sha->lldd_ha = kzalloc(sizeof(struct pm8001_hba_info), GFP_KERNEL);
if (!sha->lldd_ha)
goto exit_free1;
shost->transportt = pm8001_stt;
shost->max_id = PM8001_MAX_DEVICES;
shost->max_lun = 8;
shost->max_channel = 0;
shost->unique_id = pm8001_id;
shost->max_cmd_len = 16;
shost->can_queue = PM8001_CAN_QUEUE;
shost->cmd_per_lun = 32;
return 0;
exit_free1:
kfree(arr_port);
exit_free2:
kfree(arr_phy);
exit:
return -1;
}
/**
* pm8001_post_sas_ha_init - initialize general hba struct defined in libsas
* @shost: scsi host which has been allocated outside
* @chip_info: our ha struct.
*/
static void __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost,
const struct pm8001_chip_info *chip_info)
{
int i = 0;
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
pm8001_ha = sha->lldd_ha;
for (i = 0; i < chip_info->n_phy; i++) {
sha->sas_phy[i] = &pm8001_ha->phy[i].sas_phy;
sha->sas_port[i] = &pm8001_ha->port[i].sas_port;
}
sha->sas_ha_name = DRV_NAME;
sha->dev = pm8001_ha->dev;
sha->lldd_module = THIS_MODULE;
sha->sas_addr = &pm8001_ha->sas_addr[0];
sha->num_phys = chip_info->n_phy;
sha->lldd_max_execute_num = 1;
sha->lldd_queue_size = PM8001_CAN_QUEUE;
sha->core.shost = shost;
}
/**
* pm8001_init_sas_add - initialize sas address
* @chip_info: our ha struct.
*
* Currently we just set the fixed SAS address to our HBA,for manufacture,
* it should read from the EEPROM
*/
static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
{
u8 i;
#ifdef PM8001_READ_VPD
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
pm8001_ha->nvmd_completion = &completion;
payload.minor_function = 0;
payload.length = 128;
payload.func_specific = kzalloc(128, GFP_KERNEL);
PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
wait_for_completion(&completion);
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr,
SAS_ADDR_SIZE);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("phy %d sas_addr = %016llx \n", i,
pm8001_ha->phy[i].dev_sas_addr));
}
#else
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
pm8001_ha->phy[i].dev_sas_addr =
cpu_to_be64((u64)
(*(u64 *)&pm8001_ha->phy[i].dev_sas_addr));
}
memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr,
SAS_ADDR_SIZE);
#endif
}
#ifdef PM8001_USE_MSIX
/**
* pm8001_setup_msix - enable MSI-X interrupt
* @chip_info: our ha struct.
* @irq_handler: irq_handler
*/
static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha,
irq_handler_t irq_handler)
{
u32 i = 0, j = 0;
u32 number_of_intr = 1;
int flag = 0;
u32 max_entry;
int rc;
max_entry = sizeof(pm8001_ha->msix_entries) /
sizeof(pm8001_ha->msix_entries[0]);
flag |= IRQF_DISABLED;
for (i = 0; i < max_entry ; i++)
pm8001_ha->msix_entries[i].entry = i;
rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
number_of_intr);
pm8001_ha->number_of_intr = number_of_intr;
if (!rc) {
for (i = 0; i < number_of_intr; i++) {
if (request_irq(pm8001_ha->msix_entries[i].vector,
irq_handler, flag, DRV_NAME,
SHOST_TO_SAS_HA(pm8001_ha->shost))) {
for (j = 0; j < i; j++)
free_irq(
pm8001_ha->msix_entries[j].vector,
SHOST_TO_SAS_HA(pm8001_ha->shost));
pci_disable_msix(pm8001_ha->pdev);
break;
}
}
}
return rc;
}
#endif
/**
* pm8001_request_irq - register interrupt
* @chip_info: our ha struct.
*/
static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
{
struct pci_dev *pdev;
irq_handler_t irq_handler = pm8001_interrupt;
int rc;
pdev = pm8001_ha->pdev;
#ifdef PM8001_USE_MSIX
if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
return pm8001_setup_msix(pm8001_ha, irq_handler);
else
goto intx;
#endif
intx:
/* initialize the INT-X interrupt */
rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
SHOST_TO_SAS_HA(pm8001_ha->shost));
return rc;
}
/**
* pm8001_pci_probe - probe supported device
* @pdev: pci device which kernel has been prepared for.
* @ent: pci device id
*
* This function is the main initialization function, when register a new
* pci driver it is invoked, all struct an hardware initilization should be done
* here, also, register interrupt
*/
static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned int rc;
u32 pci_reg;
struct pm8001_hba_info *pm8001_ha;
struct Scsi_Host *shost = NULL;
const struct pm8001_chip_info *chip;
dev_printk(KERN_INFO, &pdev->dev,
"pm8001: driver version %s\n", DRV_VERSION);
rc = pci_enable_device(pdev);
if (rc)
goto err_out_enable;
pci_set_master(pdev);
/*
* Enable pci slot busmaster by setting pci command register.
* This is required by FW for Cyclone card.
*/
pci_read_config_dword(pdev, PCI_COMMAND, &pci_reg);
pci_reg |= 0x157;
pci_write_config_dword(pdev, PCI_COMMAND, pci_reg);
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out_disable;
rc = pci_go_44(pdev);
if (rc)
goto err_out_regions;
shost = scsi_host_alloc(&pm8001_sht, sizeof(void *));
if (!shost) {
rc = -ENOMEM;
goto err_out_regions;
}
chip = &pm8001_chips[ent->driver_data];
SHOST_TO_SAS_HA(shost) =
kzalloc(sizeof(struct sas_ha_struct), GFP_KERNEL);
if (!SHOST_TO_SAS_HA(shost)) {
rc = -ENOMEM;
goto err_out_free_host;
}
rc = pm8001_prep_sas_ha_init(shost, chip);
if (rc) {
rc = -ENOMEM;
goto err_out_free;
}
pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost);
if (!pm8001_ha) {
rc = -ENOMEM;
goto err_out_free;
}
list_add_tail(&pm8001_ha->list, &hba_list);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc)
goto err_out_ha_free;
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha_free;
rc = pm8001_request_irq(pm8001_ha);
if (rc)
goto err_out_shost;
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
pm8001_init_sas_add(pm8001_ha);
pm8001_post_sas_ha_init(shost, chip);
rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
if (rc)
goto err_out_shost;
scsi_scan_host(pm8001_ha->shost);
return 0;
err_out_shost:
scsi_remove_host(pm8001_ha->shost);
err_out_ha_free:
pm8001_free(pm8001_ha);
err_out_free:
kfree(SHOST_TO_SAS_HA(shost));
err_out_free_host:
kfree(shost);
err_out_regions:
pci_release_regions(pdev);
err_out_disable:
pci_disable_device(pdev);
err_out_enable:
return rc;
}
static void __devexit pm8001_pci_remove(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
int i;
pm8001_ha = sha->lldd_ha;
pci_set_drvdata(pdev, NULL);
sas_unregister_ha(sha);
sas_remove_host(pm8001_ha->shost);
list_del(&pm8001_ha->list);
scsi_remove_host(pm8001_ha->shost);
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
#ifdef PM8001_USE_MSIX
for (i = 0; i < pm8001_ha->number_of_intr; i++)
synchronize_irq(pm8001_ha->msix_entries[i].vector);
for (i = 0; i < pm8001_ha->number_of_intr; i++)
free_irq(pm8001_ha->msix_entries[i].vector, sha);
pci_disable_msix(pdev);
#else
free_irq(pm8001_ha->irq, sha);
#endif
#ifdef PM8001_USE_TASKLET
tasklet_kill(&pm8001_ha->tasklet);
#endif
pm8001_free(pm8001_ha);
kfree(sha->sas_phy);
kfree(sha->sas_port);
kfree(sha);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
/**
* pm8001_pci_suspend - power management suspend main entry point
* @pdev: PCI device struct
* @state: PM state change to (usually PCI_D3)
*
* Returns 0 success, anything else error.
*/
static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
int i , pos;
u32 device_state;
pm8001_ha = sha->lldd_ha;
flush_workqueue(pm8001_wq);
scsi_block_requests(pm8001_ha->shost);
pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pos == 0) {
printk(KERN_ERR " PCI PM not supported\n");
return -ENODEV;
}
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
#ifdef PM8001_USE_MSIX
for (i = 0; i < pm8001_ha->number_of_intr; i++)
synchronize_irq(pm8001_ha->msix_entries[i].vector);
for (i = 0; i < pm8001_ha->number_of_intr; i++)
free_irq(pm8001_ha->msix_entries[i].vector, sha);
pci_disable_msix(pdev);
#else
free_irq(pm8001_ha->irq, sha);
#endif
#ifdef PM8001_USE_TASKLET
tasklet_kill(&pm8001_ha->tasklet);
#endif
device_state = pci_choose_state(pdev, state);
pm8001_printk("pdev=0x%p, slot=%s, entering "
"operating state [D%d]\n", pdev,
pm8001_ha->name, device_state);
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, device_state);
return 0;
}
/**
* pm8001_pci_resume - power management resume main entry point
* @pdev: PCI device struct
*
* Returns 0 success, anything else error.
*/
static int pm8001_pci_resume(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
int rc;
u32 device_state;
pm8001_ha = sha->lldd_ha;
device_state = pdev->current_state;
pm8001_printk("pdev=0x%p, slot=%s, resuming from previous "
"operating state [D%d]\n", pdev, pm8001_ha->name, device_state);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
pci_restore_state(pdev);
rc = pci_enable_device(pdev);
if (rc) {
pm8001_printk("slot=%s Enable device failed during resume\n",
pm8001_ha->name);
goto err_out_enable;
}
pci_set_master(pdev);
rc = pci_go_44(pdev);
if (rc)
goto err_out_disable;
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd);
rc = PM8001_CHIP_DISP->chip_init(pm8001_ha);
if (rc)
goto err_out_disable;
PM8001_CHIP_DISP->interrupt_disable(pm8001_ha);
rc = pm8001_request_irq(pm8001_ha);
if (rc)
goto err_out_disable;
#ifdef PM8001_USE_TASKLET
tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet,
(unsigned long)pm8001_ha);
#endif
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha);
scsi_unblock_requests(pm8001_ha->shost);
return 0;
err_out_disable:
scsi_remove_host(pm8001_ha->shost);
pci_disable_device(pdev);
err_out_enable:
return rc;
}
static struct pci_device_id __devinitdata pm8001_pci_table[] = {
{
PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
},
{
PCI_DEVICE(0x117c, 0x0042),
.driver_data = chip_8001
},
{} /* terminate list */
};
static struct pci_driver pm8001_pci_driver = {
.name = DRV_NAME,
.id_table = pm8001_pci_table,
.probe = pm8001_pci_probe,
.remove = __devexit_p(pm8001_pci_remove),
.suspend = pm8001_pci_suspend,
.resume = pm8001_pci_resume,
};
/**
* pm8001_init - initialize scsi transport template
*/
static int __init pm8001_init(void)
{
int rc = -ENOMEM;
pm8001_wq = alloc_workqueue("pm8001", 0, 0);
if (!pm8001_wq)
goto err;
pm8001_id = 0;
pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
if (!pm8001_stt)
goto err_wq;
rc = pci_register_driver(&pm8001_pci_driver);
if (rc)
goto err_tp;
return 0;
err_tp:
sas_release_transport(pm8001_stt);
err_wq:
destroy_workqueue(pm8001_wq);
err:
return rc;
}
static void __exit pm8001_exit(void)
{
pci_unregister_driver(&pm8001_pci_driver);
sas_release_transport(pm8001_stt);
destroy_workqueue(pm8001_wq);
}
module_init(pm8001_init);
module_exit(pm8001_exit);
MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>");
MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pm8001_pci_table);
| gpl-2.0 |
poondog/m7_stock_443 | lib/raid6/recov.c | 5059 | 3597 | /* -*- linux-c -*- ------------------------------------------------------- *
*
* Copyright 2002 H. Peter Anvin - All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, Inc., 53 Temple Place Ste 330,
* Boston MA 02111-1307, USA; either version 2 of the License, or
* (at your option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
/*
* raid6/recov.c
*
* RAID-6 data recovery in dual failure mode. In single failure mode,
* use the RAID-5 algorithm (or, in the case of Q failure, just reconstruct
* the syndrome.)
*/
#include <linux/export.h>
#include <linux/raid/pq.h>
/* Recover two failed data blocks. */
void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
void **ptrs)
{
u8 *p, *q, *dp, *dq;
u8 px, qx, db;
const u8 *pbmul; /* P multiplier table for B data */
const u8 *qmul; /* Q multiplier table (for both) */
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data pages
Use the dead data pages as temporary storage for
delta p and delta q */
dp = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-2] = dp;
dq = (u8 *)ptrs[failb];
ptrs[failb] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dp;
ptrs[failb] = dq;
ptrs[disks-2] = p;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
pbmul = raid6_gfmul[raid6_gfexi[failb-faila]];
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]];
/* Now do it... */
while ( bytes-- ) {
px = *p ^ *dp;
qx = qmul[*q ^ *dq];
*dq++ = db = pbmul[px] ^ qx; /* Reconstructed B */
*dp++ = db ^ px; /* Reconstructed A */
p++; q++;
}
}
EXPORT_SYMBOL_GPL(raid6_2data_recov);
/* Recover failure of one data block plus the P block */
void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs)
{
u8 *p, *q, *dq;
const u8 *qmul; /* Q multiplier table */
p = (u8 *)ptrs[disks-2];
q = (u8 *)ptrs[disks-1];
/* Compute syndrome with zero for the missing data page
Use the dead data page as temporary storage for delta q */
dq = (u8 *)ptrs[faila];
ptrs[faila] = (void *)raid6_empty_zero_page;
ptrs[disks-1] = dq;
raid6_call.gen_syndrome(disks, bytes, ptrs);
/* Restore pointer table */
ptrs[faila] = dq;
ptrs[disks-1] = q;
/* Now, pick the proper data tables */
qmul = raid6_gfmul[raid6_gfinv[raid6_gfexp[faila]]];
/* Now do it... */
while ( bytes-- ) {
*p++ ^= *dq = qmul[*q ^ *dq];
q++; dq++;
}
}
EXPORT_SYMBOL_GPL(raid6_datap_recov);
#ifndef __KERNEL__
/* Testing only */
/* Recover two failed blocks. */
void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, void **ptrs)
{
if ( faila > failb ) {
int tmp = faila;
faila = failb;
failb = tmp;
}
if ( failb == disks-1 ) {
if ( faila == disks-2 ) {
/* P+Q failure. Just rebuild the syndrome. */
raid6_call.gen_syndrome(disks, bytes, ptrs);
} else {
/* data+Q failure. Reconstruct data from P,
then rebuild syndrome. */
/* NOT IMPLEMENTED - equivalent to RAID-5 */
}
} else {
if ( failb == disks-2 ) {
/* data+P failure. */
raid6_datap_recov(disks, bytes, faila, ptrs);
} else {
/* data+data failure. */
raid6_2data_recov(disks, bytes, faila, failb, ptrs);
}
}
}
#endif
| gpl-2.0 |
Benowit/android_kernel_motorola_msm8226 | arch/powerpc/platforms/embedded6xx/storcenter.c | 7363 | 2976 | /*
* Board setup routines for the storcenter
*
* Copyright 2007 (C) Oyvind Repvik (nail@nslu2-linux.org)
* Copyright 2007 Andy Wilcox, Jon Loeliger
*
* Based on linkstation.c by G. Liakhovetski
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/initrd.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/prom.h>
#include <asm/mpic.h>
#include <asm/pci-bridge.h>
#include "mpc10x.h"
static __initdata struct of_device_id storcenter_of_bus[] = {
{ .name = "soc", },
{},
};
static int __init storcenter_device_probe(void)
{
of_platform_bus_probe(NULL, storcenter_of_bus, NULL);
return 0;
}
machine_device_initcall(storcenter, storcenter_device_probe);
static int __init storcenter_add_bridge(struct device_node *dev)
{
#ifdef CONFIG_PCI
int len;
struct pci_controller *hose;
const int *bus_range;
printk("Adding PCI host bridge %s\n", dev->full_name);
hose = pcibios_alloc_controller(dev);
if (hose == NULL)
return -ENOMEM;
bus_range = of_get_property(dev, "bus-range", &len);
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
setup_indirect_pci(hose, MPC10X_MAPB_CNFG_ADDR, MPC10X_MAPB_CNFG_DATA, 0);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, 1);
#endif
return 0;
}
static void __init storcenter_setup_arch(void)
{
struct device_node *np;
/* Lookup PCI host bridges */
for_each_compatible_node(np, "pci", "mpc10x-pci")
storcenter_add_bridge(np);
printk(KERN_INFO "IOMEGA StorCenter\n");
}
/*
* Interrupt setup and service. Interrupts on the turbostation come
* from the four PCI slots plus onboard 8241 devices: I2C, DUART.
*/
static void __init storcenter_init_IRQ(void)
{
struct mpic *mpic;
mpic = mpic_alloc(NULL, 0, 0, 16, 0, " OpenPIC ");
BUG_ON(mpic == NULL);
/*
* 16 Serial Interrupts followed by 16 Internal Interrupts.
* I2C is the second internal, so it is at 17, 0x11020.
*/
mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200);
mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000);
mpic_init(mpic);
}
static void storcenter_restart(char *cmd)
{
local_irq_disable();
/* Set exception prefix high - to the firmware */
_nmask_and_or_msr(0, MSR_IP);
/* Wait for reset to happen */
for (;;) ;
}
static int __init storcenter_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "iomega,storcenter");
}
define_machine(storcenter){
.name = "IOMEGA StorCenter",
.probe = storcenter_probe,
.setup_arch = storcenter_setup_arch,
.init_IRQ = storcenter_init_IRQ,
.get_irq = mpic_get_irq,
.restart = storcenter_restart,
.calibrate_decr = generic_calibrate_decr,
};
| gpl-2.0 |
jejecule/android_kernel_oppo_msm8974 | drivers/media/video/cx23885/cimax2.c | 8131 | 12963 | /*
* cimax2.c
*
* CIMax2(R) SP2 driver in conjunction with NetUp Dual DVB-S2 CI card
*
* Copyright (C) 2009 NetUP Inc.
* Copyright (C) 2009 Igor M. Liplianin <liplianin@netup.ru>
* Copyright (C) 2009 Abylay Ospan <aospan@netup.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx23885.h"
#include "dvb_ca_en50221.h"
/**** Bit definitions for MC417_RWD and MC417_OEN registers ***
bits 31-16
+-----------+
| Reserved |
+-----------+
bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8
+-------+-------+-------+-------+-------+-------+-------+-------+
| WR# | RD# | | ACK# | ADHI | ADLO | CS1# | CS0# |
+-------+-------+-------+-------+-------+-------+-------+-------+
bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0
+-------+-------+-------+-------+-------+-------+-------+-------+
| DATA7| DATA6| DATA5| DATA4| DATA3| DATA2| DATA1| DATA0|
+-------+-------+-------+-------+-------+-------+-------+-------+
***/
/* MC417 */
#define NETUP_DATA 0x000000ff
#define NETUP_WR 0x00008000
#define NETUP_RD 0x00004000
#define NETUP_ACK 0x00001000
#define NETUP_ADHI 0x00000800
#define NETUP_ADLO 0x00000400
#define NETUP_CS1 0x00000200
#define NETUP_CS0 0x00000100
#define NETUP_EN_ALL 0x00001000
#define NETUP_CTRL_OFF (NETUP_CS1 | NETUP_CS0 | NETUP_WR | NETUP_RD)
#define NETUP_CI_CTL 0x04
#define NETUP_CI_RD 1
#define NETUP_IRQ_DETAM 0x1
#define NETUP_IRQ_IRQAM 0x4
static unsigned int ci_dbg;
module_param(ci_dbg, int, 0644);
MODULE_PARM_DESC(ci_dbg, "Enable CI debugging");
static unsigned int ci_irq_enable;
module_param(ci_irq_enable, int, 0644);
MODULE_PARM_DESC(ci_irq_enable, "Enable IRQ from CAM");
#define ci_dbg_print(args...) \
do { \
if (ci_dbg) \
printk(KERN_DEBUG args); \
} while (0)
#define ci_irq_flags() (ci_irq_enable ? NETUP_IRQ_IRQAM : 0)
/* stores all private variables for communication with CI */
struct netup_ci_state {
struct dvb_ca_en50221 ca;
struct mutex ca_mutex;
struct i2c_adapter *i2c_adap;
u8 ci_i2c_addr;
int status;
struct work_struct work;
void *priv;
u8 current_irq_mode;
int current_ci_flag;
unsigned long next_status_checked_time;
};
int netup_read_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
struct i2c_msg msg[] = {
{
.addr = addr,
.flags = 0,
.buf = ®,
.len = 1
}, {
.addr = addr,
.flags = I2C_M_RD,
.buf = buf,
.len = len
}
};
ret = i2c_transfer(i2c_adap, msg, 2);
if (ret != 2) {
ci_dbg_print("%s: i2c read error, Reg = 0x%02x, Status = %d\n",
__func__, reg, ret);
return -1;
}
ci_dbg_print("%s: i2c read Addr=0x%04x, Reg = 0x%02x, data = %02x\n",
__func__, addr, reg, buf[0]);
return 0;
}
int netup_write_i2c(struct i2c_adapter *i2c_adap, u8 addr, u8 reg,
u8 *buf, int len)
{
int ret;
u8 buffer[len + 1];
struct i2c_msg msg = {
.addr = addr,
.flags = 0,
.buf = &buffer[0],
.len = len + 1
};
buffer[0] = reg;
memcpy(&buffer[1], buf, len);
ret = i2c_transfer(i2c_adap, &msg, 1);
if (ret != 1) {
ci_dbg_print("%s: i2c write error, Reg=[0x%02x], Status=%d\n",
__func__, reg, ret);
return -1;
}
return 0;
}
int netup_ci_get_mem(struct cx23885_dev *dev)
{
int mem;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
for (;;) {
mem = cx_read(MC417_RWD);
if ((mem & NETUP_ACK) == 0)
break;
if (time_after(jiffies, timeout))
break;
udelay(1);
}
cx_set(MC417_RWD, NETUP_CTRL_OFF);
return mem & 0xff;
}
int netup_ci_op_cam(struct dvb_ca_en50221 *en50221, int slot,
u8 flag, u8 read, int addr, u8 data)
{
struct netup_ci_state *state = en50221->data;
struct cx23885_tsport *port = state->priv;
struct cx23885_dev *dev = port->dev;
u8 store;
int mem;
int ret;
if (0 != slot)
return -EINVAL;
if (state->current_ci_flag != flag) {
ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &store, 1);
if (ret != 0)
return ret;
store &= ~0x0c;
store |= flag;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &store, 1);
if (ret != 0)
return ret;
};
state->current_ci_flag = flag;
mutex_lock(&dev->gpio_lock);
/* write addr */
cx_write(MC417_OEN, NETUP_EN_ALL);
cx_write(MC417_RWD, NETUP_CTRL_OFF |
NETUP_ADLO | (0xff & addr));
cx_clear(MC417_RWD, NETUP_ADLO);
cx_write(MC417_RWD, NETUP_CTRL_OFF |
NETUP_ADHI | (0xff & (addr >> 8)));
cx_clear(MC417_RWD, NETUP_ADHI);
if (read) { /* data in */
cx_write(MC417_OEN, NETUP_EN_ALL | NETUP_DATA);
} else /* data out */
cx_write(MC417_RWD, NETUP_CTRL_OFF | data);
/* choose chip */
cx_clear(MC417_RWD,
(state->ci_i2c_addr == 0x40) ? NETUP_CS0 : NETUP_CS1);
/* read/write */
cx_clear(MC417_RWD, (read) ? NETUP_RD : NETUP_WR);
mem = netup_ci_get_mem(dev);
mutex_unlock(&dev->gpio_lock);
if (!read)
if (mem < 0)
return -EREMOTEIO;
ci_dbg_print("%s: %s: chipaddr=[0x%x] addr=[0x%02x], %s=%x\n", __func__,
(read) ? "read" : "write", state->ci_i2c_addr, addr,
(flag == NETUP_CI_CTL) ? "ctl" : "mem",
(read) ? mem : data);
if (read)
return mem;
return 0;
}
int netup_ci_read_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr)
{
return netup_ci_op_cam(en50221, slot, 0, NETUP_CI_RD, addr, 0);
}
int netup_ci_write_attribute_mem(struct dvb_ca_en50221 *en50221,
int slot, int addr, u8 data)
{
return netup_ci_op_cam(en50221, slot, 0, 0, addr, data);
}
int netup_ci_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL,
NETUP_CI_RD, addr, 0);
}
int netup_ci_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot,
u8 addr, u8 data)
{
return netup_ci_op_cam(en50221, slot, NETUP_CI_CTL, 0, addr, data);
}
int netup_ci_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
{
struct netup_ci_state *state = en50221->data;
u8 buf = 0x80;
int ret;
if (0 != slot)
return -EINVAL;
udelay(500);
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
if (ret != 0)
return ret;
udelay(500);
buf = 0x00;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
msleep(1000);
dvb_ca_en50221_camready_irq(&state->ca, 0);
return 0;
}
int netup_ci_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
{
/* not implemented */
return 0;
}
int netup_ci_set_irq(struct dvb_ca_en50221 *en50221, u8 irq_mode)
{
struct netup_ci_state *state = en50221->data;
int ret;
if (irq_mode == state->current_irq_mode)
return 0;
ci_dbg_print("%s: chipaddr=[0x%x] setting ci IRQ to [0x%x] \n",
__func__, state->ci_i2c_addr, irq_mode);
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x1b, &irq_mode, 1);
if (ret != 0)
return ret;
state->current_irq_mode = irq_mode;
return 0;
}
int netup_ci_slot_ts_ctl(struct dvb_ca_en50221 *en50221, int slot)
{
struct netup_ci_state *state = en50221->data;
u8 buf;
if (0 != slot)
return -EINVAL;
netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
buf |= 0x60;
return netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf, 1);
}
/* work handler */
static void netup_read_ci_status(struct work_struct *work)
{
struct netup_ci_state *state =
container_of(work, struct netup_ci_state, work);
u8 buf[33];
int ret;
/* CAM module IRQ processing. fast operation */
dvb_ca_en50221_frda_irq(&state->ca, 0);
/* CAM module INSERT/REMOVE processing. slow operation because of i2c
* transfers */
if (time_after(jiffies, state->next_status_checked_time)
|| !state->status) {
ret = netup_read_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &buf[0], 33);
state->next_status_checked_time = jiffies
+ msecs_to_jiffies(1000);
if (ret != 0)
return;
ci_dbg_print("%s: Slot Status Addr=[0x%04x], "
"Reg=[0x%02x], data=%02x, "
"TS config = %02x\n", __func__,
state->ci_i2c_addr, 0, buf[0],
buf[0]);
if (buf[0] & 1)
state->status = DVB_CA_EN50221_POLL_CAM_PRESENT |
DVB_CA_EN50221_POLL_CAM_READY;
else
state->status = 0;
}
}
/* CI irq handler */
int netup_ci_slot_status(struct cx23885_dev *dev, u32 pci_status)
{
struct cx23885_tsport *port = NULL;
struct netup_ci_state *state = NULL;
ci_dbg_print("%s:\n", __func__);
if (0 == (pci_status & (PCI_MSK_GPIO0 | PCI_MSK_GPIO1)))
return 0;
if (pci_status & PCI_MSK_GPIO0) {
port = &dev->ts1;
state = port->port_priv;
schedule_work(&state->work);
ci_dbg_print("%s: Wakeup CI0\n", __func__);
}
if (pci_status & PCI_MSK_GPIO1) {
port = &dev->ts2;
state = port->port_priv;
schedule_work(&state->work);
ci_dbg_print("%s: Wakeup CI1\n", __func__);
}
return 1;
}
int netup_poll_ci_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
{
struct netup_ci_state *state = en50221->data;
if (0 != slot)
return -EINVAL;
netup_ci_set_irq(en50221, open ? (NETUP_IRQ_DETAM | ci_irq_flags())
: NETUP_IRQ_DETAM);
return state->status;
}
int netup_ci_init(struct cx23885_tsport *port)
{
struct netup_ci_state *state;
u8 cimax_init[34] = {
0x00, /* module A control*/
0x00, /* auto select mask high A */
0x00, /* auto select mask low A */
0x00, /* auto select pattern high A */
0x00, /* auto select pattern low A */
0x44, /* memory access time A */
0x00, /* invert input A */
0x00, /* RFU */
0x00, /* RFU */
0x00, /* module B control*/
0x00, /* auto select mask high B */
0x00, /* auto select mask low B */
0x00, /* auto select pattern high B */
0x00, /* auto select pattern low B */
0x44, /* memory access time B */
0x00, /* invert input B */
0x00, /* RFU */
0x00, /* RFU */
0x00, /* auto select mask high Ext */
0x00, /* auto select mask low Ext */
0x00, /* auto select pattern high Ext */
0x00, /* auto select pattern low Ext */
0x00, /* RFU */
0x02, /* destination - module A */
0x01, /* power on (use it like store place) */
0x00, /* RFU */
0x00, /* int status read only */
ci_irq_flags() | NETUP_IRQ_DETAM, /* DETAM, IRQAM unmasked */
0x05, /* EXTINT=active-high, INT=push-pull */
0x00, /* USCG1 */
0x04, /* ack active low */
0x00, /* LOCK = 0 */
0x33, /* serial mode, rising in, rising out, MSB first*/
0x31, /* synchronization */
};
int ret;
ci_dbg_print("%s\n", __func__);
state = kzalloc(sizeof(struct netup_ci_state), GFP_KERNEL);
if (!state) {
ci_dbg_print("%s: Unable create CI structure!\n", __func__);
ret = -ENOMEM;
goto err;
}
port->port_priv = state;
switch (port->nr) {
case 1:
state->ci_i2c_addr = 0x40;
break;
case 2:
state->ci_i2c_addr = 0x41;
break;
}
state->i2c_adap = &port->dev->i2c_bus[0].i2c_adap;
state->ca.owner = THIS_MODULE;
state->ca.read_attribute_mem = netup_ci_read_attribute_mem;
state->ca.write_attribute_mem = netup_ci_write_attribute_mem;
state->ca.read_cam_control = netup_ci_read_cam_ctl;
state->ca.write_cam_control = netup_ci_write_cam_ctl;
state->ca.slot_reset = netup_ci_slot_reset;
state->ca.slot_shutdown = netup_ci_slot_shutdown;
state->ca.slot_ts_enable = netup_ci_slot_ts_ctl;
state->ca.poll_slot_status = netup_poll_ci_slot_status;
state->ca.data = state;
state->priv = port;
state->current_irq_mode = ci_irq_flags() | NETUP_IRQ_DETAM;
ret = netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0, &cimax_init[0], 34);
/* lock registers */
ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x1f, &cimax_init[0x18], 1);
/* power on slots */
ret |= netup_write_i2c(state->i2c_adap, state->ci_i2c_addr,
0x18, &cimax_init[0x18], 1);
if (0 != ret)
goto err;
ret = dvb_ca_en50221_init(&port->frontends.adapter,
&state->ca,
/* flags */ 0,
/* n_slots */ 1);
if (0 != ret)
goto err;
INIT_WORK(&state->work, netup_read_ci_status);
schedule_work(&state->work);
ci_dbg_print("%s: CI initialized!\n", __func__);
return 0;
err:
ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
kfree(state);
return ret;
}
void netup_ci_exit(struct cx23885_tsport *port)
{
struct netup_ci_state *state;
if (NULL == port)
return;
state = (struct netup_ci_state *)port->port_priv;
if (NULL == state)
return;
if (NULL == state->ca.data)
return;
dvb_ca_en50221_release(&state->ca);
kfree(state);
}
| gpl-2.0 |
zhengsjembest/linux_am335x | linux-3.2.0-psp04.06.00.08.sdk/drivers/spi/spi-omap-100k.c | 8387 | 15967 | /*
* OMAP7xx SPI 100k controller driver
* Author: Fabrice Crohas <fcrohas@gmail.com>
* from original omap1_mcspi driver
*
* Copyright (C) 2005, 2006 Nokia Corporation
* Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
* Juha Yrj�l� <juha.yrjola@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <plat/clock.h>
#define OMAP1_SPI100K_MAX_FREQ 48000000
#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12)
#define SPI_SETUP1 0x00
#define SPI_SETUP2 0x02
#define SPI_CTRL 0x04
#define SPI_STATUS 0x06
#define SPI_TX_LSB 0x08
#define SPI_TX_MSB 0x0a
#define SPI_RX_LSB 0x0c
#define SPI_RX_MSB 0x0e
#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5)
#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4)
#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1)
#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0)
#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0)
#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0)
#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5)
#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5)
#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10)
#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10)
#define SPI_CTRL_SEN(x) ((x) << 7)
#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2)
#define SPI_CTRL_WR (1UL << 1)
#define SPI_CTRL_RD (1UL << 0)
#define SPI_STATUS_WE (1UL << 1)
#define SPI_STATUS_RD (1UL << 0)
#define WRITE 0
#define READ 1
/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
* cache operations; better heuristics consider wordsize and bitrate.
*/
#define DMA_MIN_BYTES 8
#define SPI_RUNNING 0
#define SPI_SHUTDOWN 1
struct omap1_spi100k {
struct work_struct work;
/* lock protects queue and registers */
spinlock_t lock;
struct list_head msg_queue;
struct spi_master *master;
struct clk *ick;
struct clk *fck;
/* Virtual base address of the controller */
void __iomem *base;
/* State of the SPI */
unsigned int state;
};
struct omap1_spi100k_cs {
void __iomem *base;
int word_len;
};
static struct workqueue_struct *omap1_spi100k_wq;
#define MOD_REG_BIT(val, mask, set) do { \
if (set) \
val |= mask; \
else \
val &= ~mask; \
} while (0)
static void spi100k_enable_clock(struct spi_master *master)
{
unsigned int val;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* enable SPI */
val = readw(spi100k->base + SPI_SETUP1);
val |= SPI_SETUP1_CLOCK_ENABLE;
writew(val, spi100k->base + SPI_SETUP1);
}
static void spi100k_disable_clock(struct spi_master *master)
{
unsigned int val;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* disable SPI */
val = readw(spi100k->base + SPI_SETUP1);
val &= ~SPI_SETUP1_CLOCK_ENABLE;
writew(val, spi100k->base + SPI_SETUP1);
}
static void spi100k_write_data(struct spi_master *master, int len, int data)
{
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* write 16-bit word, shifting 8-bit data if necessary */
if (len <= 8) {
data <<= 8;
len = 16;
}
spi100k_enable_clock(master);
writew( data , spi100k->base + SPI_TX_MSB);
writew(SPI_CTRL_SEN(0) |
SPI_CTRL_WORD_SIZE(len) |
SPI_CTRL_WR,
spi100k->base + SPI_CTRL);
/* Wait for bit ack send change */
while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE);
udelay(1000);
spi100k_disable_clock(master);
}
static int spi100k_read_data(struct spi_master *master, int len)
{
int dataH,dataL;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* Always do at least 16 bits */
if (len <= 8)
len = 16;
spi100k_enable_clock(master);
writew(SPI_CTRL_SEN(0) |
SPI_CTRL_WORD_SIZE(len) |
SPI_CTRL_RD,
spi100k->base + SPI_CTRL);
while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD);
udelay(1000);
dataL = readw(spi100k->base + SPI_RX_LSB);
dataH = readw(spi100k->base + SPI_RX_MSB);
spi100k_disable_clock(master);
return dataL;
}
static void spi100k_open(struct spi_master *master)
{
/* get control of SPI */
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
writew(SPI_SETUP1_INT_READ_ENABLE |
SPI_SETUP1_INT_WRITE_ENABLE |
SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1);
/* configure clock and interrupts */
writew(SPI_SETUP2_ACTIVE_EDGE_FALLING |
SPI_SETUP2_NEGATIVE_LEVEL |
SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2);
}
static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
{
if (enable)
writew(0x05fc, spi100k->base + SPI_CTRL);
else
writew(0x05fd, spi100k->base + SPI_CTRL);
}
static unsigned
omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
{
struct omap1_spi100k *spi100k;
struct omap1_spi100k_cs *cs = spi->controller_state;
unsigned int count, c;
int word_len;
spi100k = spi_master_get_devdata(spi->master);
count = xfer->len;
c = count;
word_len = cs->word_len;
if (word_len <= 8) {
u8 *rx;
const u8 *tx;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
c-=1;
if (xfer->tx_buf != NULL)
spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
*rx++ = spi100k_read_data(spi->master, word_len);
} while(c);
} else if (word_len <= 16) {
u16 *rx;
const u16 *tx;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
c-=2;
if (xfer->tx_buf != NULL)
spi100k_write_data(spi->master,word_len, *tx++);
if (xfer->rx_buf != NULL)
*rx++ = spi100k_read_data(spi->master,word_len);
} while(c);
} else if (word_len <= 32) {
u32 *rx;
const u32 *tx;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
c-=4;
if (xfer->tx_buf != NULL)
spi100k_write_data(spi->master,word_len, *tx);
if (xfer->rx_buf != NULL)
*rx = spi100k_read_data(spi->master,word_len);
} while(c);
}
return count - c;
}
/* called only when no transfer is active to this device */
static int omap1_spi100k_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master);
struct omap1_spi100k_cs *cs = spi->controller_state;
u8 word_len = spi->bits_per_word;
if (t != NULL && t->bits_per_word)
word_len = t->bits_per_word;
if (!word_len)
word_len = 8;
if (spi->bits_per_word > 32)
return -EINVAL;
cs->word_len = word_len;
/* SPI init before transfer */
writew(0x3e , spi100k->base + SPI_SETUP1);
writew(0x00 , spi100k->base + SPI_STATUS);
writew(0x3e , spi100k->base + SPI_CTRL);
return 0;
}
/* the spi->mode bits understood by this driver: */
#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
static int omap1_spi100k_setup(struct spi_device *spi)
{
int ret;
struct omap1_spi100k *spi100k;
struct omap1_spi100k_cs *cs = spi->controller_state;
if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
spi->bits_per_word);
return -EINVAL;
}
spi100k = spi_master_get_devdata(spi->master);
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = spi100k->base + spi->chip_select * 0x14;
spi->controller_state = cs;
}
spi100k_open(spi->master);
clk_enable(spi100k->ick);
clk_enable(spi100k->fck);
ret = omap1_spi100k_setup_transfer(spi, NULL);
clk_disable(spi100k->ick);
clk_disable(spi100k->fck);
return ret;
}
static void omap1_spi100k_work(struct work_struct *work)
{
struct omap1_spi100k *spi100k;
int status = 0;
spi100k = container_of(work, struct omap1_spi100k, work);
spin_lock_irq(&spi100k->lock);
clk_enable(spi100k->ick);
clk_enable(spi100k->fck);
/* We only enable one channel at a time -- the one whose message is
* at the head of the queue -- although this controller would gladly
* arbitrate among multiple channels. This corresponds to "single
* channel" master mode. As a side effect, we need to manage the
* chipselect with the FORCE bit ... CS != channel enable.
*/
while (!list_empty(&spi100k->msg_queue)) {
struct spi_message *m;
struct spi_device *spi;
struct spi_transfer *t = NULL;
int cs_active = 0;
struct omap1_spi100k_cs *cs;
int par_override = 0;
m = container_of(spi100k->msg_queue.next, struct spi_message,
queue);
list_del_init(&m->queue);
spin_unlock_irq(&spi100k->lock);
spi = m->spi;
cs = spi->controller_state;
list_for_each_entry(t, &m->transfers, transfer_list) {
if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
status = -EINVAL;
break;
}
if (par_override || t->speed_hz || t->bits_per_word) {
par_override = 1;
status = omap1_spi100k_setup_transfer(spi, t);
if (status < 0)
break;
if (!t->speed_hz && !t->bits_per_word)
par_override = 0;
}
if (!cs_active) {
omap1_spi100k_force_cs(spi100k, 1);
cs_active = 1;
}
if (t->len) {
unsigned count;
count = omap1_spi100k_txrx_pio(spi, t);
m->actual_length += count;
if (count != t->len) {
status = -EIO;
break;
}
}
if (t->delay_usecs)
udelay(t->delay_usecs);
/* ignore the "leave it on after last xfer" hint */
if (t->cs_change) {
omap1_spi100k_force_cs(spi100k, 0);
cs_active = 0;
}
}
/* Restore defaults if they were overriden */
if (par_override) {
par_override = 0;
status = omap1_spi100k_setup_transfer(spi, NULL);
}
if (cs_active)
omap1_spi100k_force_cs(spi100k, 0);
m->status = status;
m->complete(m->context);
spin_lock_irq(&spi100k->lock);
}
clk_disable(spi100k->ick);
clk_disable(spi100k->fck);
spin_unlock_irq(&spi100k->lock);
if (status < 0)
printk(KERN_WARNING "spi transfer failed with %d\n", status);
}
static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m)
{
struct omap1_spi100k *spi100k;
unsigned long flags;
struct spi_transfer *t;
m->actual_length = 0;
m->status = -EINPROGRESS;
spi100k = spi_master_get_devdata(spi->master);
/* Don't accept new work if we're shutting down */
if (spi100k->state == SPI_SHUTDOWN)
return -ESHUTDOWN;
/* reject invalid messages and transfers */
if (list_empty(&m->transfers) || !m->complete)
return -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
const void *tx_buf = t->tx_buf;
void *rx_buf = t->rx_buf;
unsigned len = t->len;
if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ
|| (len && !(rx_buf || tx_buf))
|| (t->bits_per_word &&
( t->bits_per_word < 4
|| t->bits_per_word > 32))) {
dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
t->speed_hz,
len,
tx_buf ? "tx" : "",
rx_buf ? "rx" : "",
t->bits_per_word);
return -EINVAL;
}
if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) {
dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
t->speed_hz,
OMAP1_SPI100K_MAX_FREQ/(1<<16));
return -EINVAL;
}
}
spin_lock_irqsave(&spi100k->lock, flags);
list_add_tail(&m->queue, &spi100k->msg_queue);
queue_work(omap1_spi100k_wq, &spi100k->work);
spin_unlock_irqrestore(&spi100k->lock, flags);
return 0;
}
static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k)
{
return 0;
}
static int __devinit omap1_spi100k_probe(struct platform_device *pdev)
{
struct spi_master *master;
struct omap1_spi100k *spi100k;
int status = 0;
if (!pdev->id)
return -EINVAL;
master = spi_alloc_master(&pdev->dev, sizeof *spi100k);
if (master == NULL) {
dev_dbg(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
if (pdev->id != -1)
master->bus_num = pdev->id;
master->setup = omap1_spi100k_setup;
master->transfer = omap1_spi100k_transfer;
master->cleanup = NULL;
master->num_chipselect = 2;
master->mode_bits = MODEBITS;
dev_set_drvdata(&pdev->dev, master);
spi100k = spi_master_get_devdata(master);
spi100k->master = master;
/*
* The memory region base address is taken as the platform_data.
* You should allocate this with ioremap() before initializing
* the SPI.
*/
spi100k->base = (void __iomem *) pdev->dev.platform_data;
INIT_WORK(&spi100k->work, omap1_spi100k_work);
spin_lock_init(&spi100k->lock);
INIT_LIST_HEAD(&spi100k->msg_queue);
spi100k->ick = clk_get(&pdev->dev, "ick");
if (IS_ERR(spi100k->ick)) {
dev_dbg(&pdev->dev, "can't get spi100k_ick\n");
status = PTR_ERR(spi100k->ick);
goto err1;
}
spi100k->fck = clk_get(&pdev->dev, "fck");
if (IS_ERR(spi100k->fck)) {
dev_dbg(&pdev->dev, "can't get spi100k_fck\n");
status = PTR_ERR(spi100k->fck);
goto err2;
}
if (omap1_spi100k_reset(spi100k) < 0)
goto err3;
status = spi_register_master(master);
if (status < 0)
goto err3;
spi100k->state = SPI_RUNNING;
return status;
err3:
clk_put(spi100k->fck);
err2:
clk_put(spi100k->ick);
err1:
spi_master_put(master);
return status;
}
static int __exit omap1_spi100k_remove(struct platform_device *pdev)
{
struct spi_master *master;
struct omap1_spi100k *spi100k;
struct resource *r;
unsigned limit = 500;
unsigned long flags;
int status = 0;
master = dev_get_drvdata(&pdev->dev);
spi100k = spi_master_get_devdata(master);
spin_lock_irqsave(&spi100k->lock, flags);
spi100k->state = SPI_SHUTDOWN;
while (!list_empty(&spi100k->msg_queue) && limit--) {
spin_unlock_irqrestore(&spi100k->lock, flags);
msleep(10);
spin_lock_irqsave(&spi100k->lock, flags);
}
if (!list_empty(&spi100k->msg_queue))
status = -EBUSY;
spin_unlock_irqrestore(&spi100k->lock, flags);
if (status != 0)
return status;
clk_put(spi100k->fck);
clk_put(spi100k->ick);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi_unregister_master(master);
return 0;
}
static struct platform_driver omap1_spi100k_driver = {
.driver = {
.name = "omap1_spi100k",
.owner = THIS_MODULE,
},
.remove = __exit_p(omap1_spi100k_remove),
};
static int __init omap1_spi100k_init(void)
{
omap1_spi100k_wq = create_singlethread_workqueue(
omap1_spi100k_driver.driver.name);
if (omap1_spi100k_wq == NULL)
return -1;
return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe);
}
static void __exit omap1_spi100k_exit(void)
{
platform_driver_unregister(&omap1_spi100k_driver);
destroy_workqueue(omap1_spi100k_wq);
}
module_init(omap1_spi100k_init);
module_exit(omap1_spi100k_exit);
MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
garwynn/L720_MDL_Kernel | drivers/input/input-compat.c | 8899 | 3372 | /*
* 32bit compatibility wrappers for the input subsystem.
*
* Very heavily based on evdev.c - Copyright (c) 1999-2002 Vojtech Pavlik
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/export.h>
#include <asm/uaccess.h>
#include "input-compat.h"
#ifdef CONFIG_COMPAT
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
if (copy_from_user(&compat_event, buffer,
sizeof(struct input_event_compat)))
return -EFAULT;
event->time.tv_sec = compat_event.time.tv_sec;
event->time.tv_usec = compat_event.time.tv_usec;
event->type = compat_event.type;
event->code = compat_event.code;
event->value = compat_event.value;
} else {
if (copy_from_user(event, buffer, sizeof(struct input_event)))
return -EFAULT;
}
return 0;
}
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
if (INPUT_COMPAT_TEST && !COMPAT_USE_64BIT_TIME) {
struct input_event_compat compat_event;
compat_event.time.tv_sec = event->time.tv_sec;
compat_event.time.tv_usec = event->time.tv_usec;
compat_event.type = event->type;
compat_event.code = event->code;
compat_event.value = event->value;
if (copy_to_user(buffer, &compat_event,
sizeof(struct input_event_compat)))
return -EFAULT;
} else {
if (copy_to_user(buffer, event, sizeof(struct input_event)))
return -EFAULT;
}
return 0;
}
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
if (INPUT_COMPAT_TEST) {
struct ff_effect_compat *compat_effect;
if (size != sizeof(struct ff_effect_compat))
return -EINVAL;
/*
* It so happens that the pointer which needs to be changed
* is the last field in the structure, so we can retrieve the
* whole thing and replace just the pointer.
*/
compat_effect = (struct ff_effect_compat *)effect;
if (copy_from_user(compat_effect, buffer,
sizeof(struct ff_effect_compat)))
return -EFAULT;
if (compat_effect->type == FF_PERIODIC &&
compat_effect->u.periodic.waveform == FF_CUSTOM)
effect->u.periodic.custom_data =
compat_ptr(compat_effect->u.periodic.custom_data);
} else {
if (size != sizeof(struct ff_effect))
return -EINVAL;
if (copy_from_user(effect, buffer, sizeof(struct ff_effect)))
return -EFAULT;
}
return 0;
}
#else
int input_event_from_user(const char __user *buffer,
struct input_event *event)
{
if (copy_from_user(event, buffer, sizeof(struct input_event)))
return -EFAULT;
return 0;
}
int input_event_to_user(char __user *buffer,
const struct input_event *event)
{
if (copy_to_user(buffer, event, sizeof(struct input_event)))
return -EFAULT;
return 0;
}
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect)
{
if (size != sizeof(struct ff_effect))
return -EINVAL;
if (copy_from_user(effect, buffer, sizeof(struct ff_effect)))
return -EFAULT;
return 0;
}
#endif /* CONFIG_COMPAT */
EXPORT_SYMBOL_GPL(input_event_from_user);
EXPORT_SYMBOL_GPL(input_event_to_user);
EXPORT_SYMBOL_GPL(input_ff_effect_from_user);
| gpl-2.0 |
MaxiCM/android_kernel_google_msm | Documentation/ptp/testptp.c | 10435 | 8946 | /*
* PTP 1588 clock support - User space test program
*
* Copyright (C) 2010 OMICRON electronics GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/timex.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <linux/ptp_clock.h>
#define DEVICE "/dev/ptp0"
#ifndef ADJ_SETOFFSET
#define ADJ_SETOFFSET 0x0100
#endif
#ifndef CLOCK_INVALID
#define CLOCK_INVALID -1
#endif
/* When glibc offers the syscall, this will go away. */
#include <sys/syscall.h>
static int clock_adjtime(clockid_t id, struct timex *tx)
{
return syscall(__NR_clock_adjtime, id, tx);
}
static clockid_t get_clockid(int fd)
{
#define CLOCKFD 3
#define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD)
return FD_TO_CLOCKID(fd);
}
static void handle_alarm(int s)
{
printf("received signal %d\n", s);
}
static int install_handler(int signum, void (*handler)(int))
{
struct sigaction action;
sigset_t mask;
/* Unblock the signal. */
sigemptyset(&mask);
sigaddset(&mask, signum);
sigprocmask(SIG_UNBLOCK, &mask, NULL);
/* Install the signal handler. */
action.sa_handler = handler;
action.sa_flags = 0;
sigemptyset(&action.sa_mask);
sigaction(signum, &action, NULL);
return 0;
}
static long ppb_to_scaled_ppm(int ppb)
{
/*
* The 'freq' field in the 'struct timex' is in parts per
* million, but with a 16 bit binary fractional field.
* Instead of calculating either one of
*
* scaled_ppm = (ppb / 1000) << 16 [1]
* scaled_ppm = (ppb << 16) / 1000 [2]
*
* we simply use double precision math, in order to avoid the
* truncation in [1] and the possible overflow in [2].
*/
return (long) (ppb * 65.536);
}
static void usage(char *progname)
{
fprintf(stderr,
"usage: %s [options]\n"
" -a val request a one-shot alarm after 'val' seconds\n"
" -A val request a periodic alarm every 'val' seconds\n"
" -c query the ptp clock's capabilities\n"
" -d name device to open\n"
" -e val read 'val' external time stamp events\n"
" -f val adjust the ptp clock frequency by 'val' ppb\n"
" -g get the ptp clock time\n"
" -h prints this message\n"
" -p val enable output with a period of 'val' nanoseconds\n"
" -P val enable or disable (val=1|0) the system clock PPS\n"
" -s set the ptp clock time from the system time\n"
" -S set the system time from the ptp clock time\n"
" -t val shift the ptp clock time by 'val' seconds\n",
progname);
}
int main(int argc, char *argv[])
{
struct ptp_clock_caps caps;
struct ptp_extts_event event;
struct ptp_extts_request extts_request;
struct ptp_perout_request perout_request;
struct timespec ts;
struct timex tx;
static timer_t timerid;
struct itimerspec timeout;
struct sigevent sigevent;
char *progname;
int c, cnt, fd;
char *device = DEVICE;
clockid_t clkid;
int adjfreq = 0x7fffffff;
int adjtime = 0;
int capabilities = 0;
int extts = 0;
int gettime = 0;
int oneshot = 0;
int periodic = 0;
int perout = -1;
int pps = -1;
int settime = 0;
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghp:P:sSt:v"))) {
switch (c) {
case 'a':
oneshot = atoi(optarg);
break;
case 'A':
periodic = atoi(optarg);
break;
case 'c':
capabilities = 1;
break;
case 'd':
device = optarg;
break;
case 'e':
extts = atoi(optarg);
break;
case 'f':
adjfreq = atoi(optarg);
break;
case 'g':
gettime = 1;
break;
case 'p':
perout = atoi(optarg);
break;
case 'P':
pps = atoi(optarg);
break;
case 's':
settime = 1;
break;
case 'S':
settime = 2;
break;
case 't':
adjtime = atoi(optarg);
break;
case 'h':
usage(progname);
return 0;
case '?':
default:
usage(progname);
return -1;
}
}
fd = open(device, O_RDWR);
if (fd < 0) {
fprintf(stderr, "opening %s: %s\n", device, strerror(errno));
return -1;
}
clkid = get_clockid(fd);
if (CLOCK_INVALID == clkid) {
fprintf(stderr, "failed to read clock id\n");
return -1;
}
if (capabilities) {
if (ioctl(fd, PTP_CLOCK_GETCAPS, &caps)) {
perror("PTP_CLOCK_GETCAPS");
} else {
printf("capabilities:\n"
" %d maximum frequency adjustment (ppb)\n"
" %d programmable alarms\n"
" %d external time stamp channels\n"
" %d programmable periodic signals\n"
" %d pulse per second\n",
caps.max_adj,
caps.n_alarm,
caps.n_ext_ts,
caps.n_per_out,
caps.pps);
}
}
if (0x7fffffff != adjfreq) {
memset(&tx, 0, sizeof(tx));
tx.modes = ADJ_FREQUENCY;
tx.freq = ppb_to_scaled_ppm(adjfreq);
if (clock_adjtime(clkid, &tx)) {
perror("clock_adjtime");
} else {
puts("frequency adjustment okay");
}
}
if (adjtime) {
memset(&tx, 0, sizeof(tx));
tx.modes = ADJ_SETOFFSET;
tx.time.tv_sec = adjtime;
tx.time.tv_usec = 0;
if (clock_adjtime(clkid, &tx) < 0) {
perror("clock_adjtime");
} else {
puts("time shift okay");
}
}
if (gettime) {
if (clock_gettime(clkid, &ts)) {
perror("clock_gettime");
} else {
printf("clock time: %ld.%09ld or %s",
ts.tv_sec, ts.tv_nsec, ctime(&ts.tv_sec));
}
}
if (settime == 1) {
clock_gettime(CLOCK_REALTIME, &ts);
if (clock_settime(clkid, &ts)) {
perror("clock_settime");
} else {
puts("set time okay");
}
}
if (settime == 2) {
clock_gettime(clkid, &ts);
if (clock_settime(CLOCK_REALTIME, &ts)) {
perror("clock_settime");
} else {
puts("set time okay");
}
}
if (extts) {
memset(&extts_request, 0, sizeof(extts_request));
extts_request.index = 0;
extts_request.flags = PTP_ENABLE_FEATURE;
if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
perror("PTP_EXTTS_REQUEST");
extts = 0;
} else {
puts("external time stamp request okay");
}
for (; extts; extts--) {
cnt = read(fd, &event, sizeof(event));
if (cnt != sizeof(event)) {
perror("read");
break;
}
printf("event index %u at %lld.%09u\n", event.index,
event.t.sec, event.t.nsec);
fflush(stdout);
}
/* Disable the feature again. */
extts_request.flags = 0;
if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
perror("PTP_EXTTS_REQUEST");
}
}
if (oneshot) {
install_handler(SIGALRM, handle_alarm);
/* Create a timer. */
sigevent.sigev_notify = SIGEV_SIGNAL;
sigevent.sigev_signo = SIGALRM;
if (timer_create(clkid, &sigevent, &timerid)) {
perror("timer_create");
return -1;
}
/* Start the timer. */
memset(&timeout, 0, sizeof(timeout));
timeout.it_value.tv_sec = oneshot;
if (timer_settime(timerid, 0, &timeout, NULL)) {
perror("timer_settime");
return -1;
}
pause();
timer_delete(timerid);
}
if (periodic) {
install_handler(SIGALRM, handle_alarm);
/* Create a timer. */
sigevent.sigev_notify = SIGEV_SIGNAL;
sigevent.sigev_signo = SIGALRM;
if (timer_create(clkid, &sigevent, &timerid)) {
perror("timer_create");
return -1;
}
/* Start the timer. */
memset(&timeout, 0, sizeof(timeout));
timeout.it_interval.tv_sec = periodic;
timeout.it_value.tv_sec = periodic;
if (timer_settime(timerid, 0, &timeout, NULL)) {
perror("timer_settime");
return -1;
}
while (1) {
pause();
}
timer_delete(timerid);
}
if (perout >= 0) {
if (clock_gettime(clkid, &ts)) {
perror("clock_gettime");
return -1;
}
memset(&perout_request, 0, sizeof(perout_request));
perout_request.index = 0;
perout_request.start.sec = ts.tv_sec + 2;
perout_request.start.nsec = 0;
perout_request.period.sec = 0;
perout_request.period.nsec = perout;
if (ioctl(fd, PTP_PEROUT_REQUEST, &perout_request)) {
perror("PTP_PEROUT_REQUEST");
} else {
puts("periodic output request okay");
}
}
if (pps != -1) {
int enable = pps ? 1 : 0;
if (ioctl(fd, PTP_ENABLE_PPS, enable)) {
perror("PTP_ENABLE_PPS");
} else {
puts("pps for system time request okay");
}
}
close(fd);
return 0;
}
| gpl-2.0 |
richardtrip/Iconia_a500_3.2 | drivers/block/paride/aten.c | 15555 | 3343 | /*
aten.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
aten.c is a low-level protocol driver for the ATEN EH-100
parallel port adapter. The EH-100 supports 4-bit and 8-bit
modes only. There is also an EH-132 which supports EPP mode
transfers. The EH-132 is not yet supported.
*/
/* Changes:
1.01 GRG 1998.05.05 init_proto, release_proto
*/
#define ATEN_VERSION "1.01"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/types.h>
#include <asm/io.h>
#include "paride.h"
#define j44(a,b) ((((a>>4)&0x0f)|(b&0xf0))^0x88)
/* cont = 0 - access the IDE register file
cont = 1 - access the IDE command set
*/
static int cont_map[2] = { 0x08, 0x20 };
static void aten_write_regr( PIA *pi, int cont, int regr, int val)
{ int r;
r = regr + cont_map[cont] + 0x80;
w0(r); w2(0xe); w2(6); w0(val); w2(7); w2(6); w2(0xc);
}
static int aten_read_regr( PIA *pi, int cont, int regr )
{ int a, b, r;
r = regr + cont_map[cont] + 0x40;
switch (pi->mode) {
case 0: w0(r); w2(0xe); w2(6);
w2(7); w2(6); w2(0);
a = r1(); w0(0x10); b = r1(); w2(0xc);
return j44(a,b);
case 1: r |= 0x10;
w0(r); w2(0xe); w2(6); w0(0xff);
w2(0x27); w2(0x26); w2(0x20);
a = r0();
w2(0x26); w2(0xc);
return a;
}
return -1;
}
static void aten_read_block( PIA *pi, char * buf, int count )
{ int k, a, b, c, d;
switch (pi->mode) {
case 0: w0(0x48); w2(0xe); w2(6);
for (k=0;k<count/2;k++) {
w2(7); w2(6); w2(2);
a = r1(); w0(0x58); b = r1();
w2(0); d = r1(); w0(0x48); c = r1();
buf[2*k] = j44(c,d);
buf[2*k+1] = j44(a,b);
}
w2(0xc);
break;
case 1: w0(0x58); w2(0xe); w2(6);
for (k=0;k<count/2;k++) {
w2(0x27); w2(0x26); w2(0x22);
a = r0(); w2(0x20); b = r0();
buf[2*k] = b; buf[2*k+1] = a;
}
w2(0x26); w2(0xc);
break;
}
}
static void aten_write_block( PIA *pi, char * buf, int count )
{ int k;
w0(0x88); w2(0xe); w2(6);
for (k=0;k<count/2;k++) {
w0(buf[2*k+1]); w2(0xe); w2(6);
w0(buf[2*k]); w2(7); w2(6);
}
w2(0xc);
}
static void aten_connect ( PIA *pi )
{ pi->saved_r0 = r0();
pi->saved_r2 = r2();
w2(0xc);
}
static void aten_disconnect ( PIA *pi )
{ w0(pi->saved_r0);
w2(pi->saved_r2);
}
static void aten_log_adapter( PIA *pi, char * scratch, int verbose )
{ char *mode_string[2] = {"4-bit","8-bit"};
printk("%s: aten %s, ATEN EH-100 at 0x%x, ",
pi->device,ATEN_VERSION,pi->port);
printk("mode %d (%s), delay %d\n",pi->mode,
mode_string[pi->mode],pi->delay);
}
static struct pi_protocol aten = {
.owner = THIS_MODULE,
.name = "aten",
.max_mode = 2,
.epp_first = 2,
.default_delay = 1,
.max_units = 1,
.write_regr = aten_write_regr,
.read_regr = aten_read_regr,
.write_block = aten_write_block,
.read_block = aten_read_block,
.connect = aten_connect,
.disconnect = aten_disconnect,
.log_adapter = aten_log_adapter,
};
static int __init aten_init(void)
{
return paride_register(&aten);
}
static void __exit aten_exit(void)
{
paride_unregister( &aten );
}
MODULE_LICENSE("GPL");
module_init(aten_init)
module_exit(aten_exit)
| gpl-2.0 |
PAC-lge/lge-kernel-star | drivers/gpio/wm831x-gpio.c | 452 | 7647 | /*
* wm831x-gpio.c -- gpiolib support for Wolfson WM831x PMICs
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/mfd/core.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
#include <linux/mfd/wm831x/gpio.h>
#include <linux/mfd/wm831x/irq.h>
struct wm831x_gpio {
struct wm831x *wm831x;
struct gpio_chip gpio_chip;
};
static inline struct wm831x_gpio *to_wm831x_gpio(struct gpio_chip *chip)
{
return container_of(chip, struct wm831x_gpio, gpio_chip);
}
static int wm831x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int val = WM831X_GPN_DIR;
if (wm831x->has_gpio_ena)
val |= WM831X_GPN_TRI;
return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset,
WM831X_GPN_DIR | WM831X_GPN_TRI |
WM831X_GPN_FN_MASK, val);
}
static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int ret;
ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
if (ret < 0)
return ret;
if (ret & 1 << offset)
return 1;
else
return 0;
}
static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
value << offset);
}
static int wm831x_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int val = 0;
int ret;
if (wm831x->has_gpio_ena)
val |= WM831X_GPN_TRI;
ret = wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset,
WM831X_GPN_DIR | WM831X_GPN_TRI |
WM831X_GPN_FN_MASK, val);
if (ret < 0)
return ret;
/* Can only set GPIO state once it's in output mode */
wm831x_gpio_set(chip, offset, value);
return 0;
}
static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
if (!wm831x->irq_base)
return -EINVAL;
return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
}
static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
unsigned debounce)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int reg = WM831X_GPIO1_CONTROL + offset;
int ret, fn;
ret = wm831x_reg_read(wm831x, reg);
if (ret < 0)
return ret;
switch (ret & WM831X_GPN_FN_MASK) {
case 0:
case 1:
break;
default:
/* Not in GPIO mode */
return -EBUSY;
}
if (debounce >= 32 && debounce <= 64)
fn = 0;
else if (debounce >= 4000 && debounce <= 8000)
fn = 1;
else
return -EINVAL;
return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn);
}
#ifdef CONFIG_DEBUG_FS
static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
int i, tristated;
for (i = 0; i < chip->ngpio; i++) {
int gpio = i + chip->base;
int reg;
const char *label, *pull, *powerdomain;
/* We report the GPIO even if it's not requested since
* we're also reporting things like alternate
* functions which apply even when the GPIO is not in
* use as a GPIO.
*/
label = gpiochip_is_requested(chip, i);
if (!label)
label = "Unrequested";
seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label);
reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i);
if (reg < 0) {
dev_err(wm831x->dev,
"GPIO control %d read failed: %d\n",
gpio, reg);
seq_printf(s, "\n");
continue;
}
switch (reg & WM831X_GPN_PULL_MASK) {
case WM831X_GPIO_PULL_NONE:
pull = "nopull";
break;
case WM831X_GPIO_PULL_DOWN:
pull = "pulldown";
break;
case WM831X_GPIO_PULL_UP:
pull = "pullup";
default:
pull = "INVALID PULL";
break;
}
switch (i + 1) {
case 1 ... 3:
case 7 ... 9:
if (reg & WM831X_GPN_PWR_DOM)
powerdomain = "VPMIC";
else
powerdomain = "DBVDD";
break;
case 4 ... 6:
case 10 ... 12:
if (reg & WM831X_GPN_PWR_DOM)
powerdomain = "SYSVDD";
else
powerdomain = "DBVDD";
break;
case 13 ... 16:
powerdomain = "TPVDD";
break;
default:
BUG();
break;
}
tristated = reg & WM831X_GPN_TRI;
if (wm831x->has_gpio_ena)
tristated = !tristated;
seq_printf(s, " %s %s %s %s%s\n"
" %s%s (0x%4x)\n",
reg & WM831X_GPN_DIR ? "in" : "out",
wm831x_gpio_get(chip, i) ? "high" : "low",
pull,
powerdomain,
reg & WM831X_GPN_POL ? "" : " inverted",
reg & WM831X_GPN_OD ? "open-drain" : "CMOS",
tristated ? " tristated" : "",
reg);
}
}
#else
#define wm831x_gpio_dbg_show NULL
#endif
static struct gpio_chip template_chip = {
.label = "wm831x",
.owner = THIS_MODULE,
.direction_input = wm831x_gpio_direction_in,
.get = wm831x_gpio_get,
.direction_output = wm831x_gpio_direction_out,
.set = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
.set_debounce = wm831x_gpio_set_debounce,
.dbg_show = wm831x_gpio_dbg_show,
.can_sleep = 1,
};
static int __devinit wm831x_gpio_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_pdata *pdata = wm831x->dev->platform_data;
struct wm831x_gpio *wm831x_gpio;
int ret;
wm831x_gpio = kzalloc(sizeof(*wm831x_gpio), GFP_KERNEL);
if (wm831x_gpio == NULL)
return -ENOMEM;
wm831x_gpio->wm831x = wm831x;
wm831x_gpio->gpio_chip = template_chip;
wm831x_gpio->gpio_chip.ngpio = wm831x->num_gpio;
wm831x_gpio->gpio_chip.dev = &pdev->dev;
if (pdata && pdata->gpio_base)
wm831x_gpio->gpio_chip.base = pdata->gpio_base;
else
wm831x_gpio->gpio_chip.base = -1;
ret = gpiochip_add(&wm831x_gpio->gpio_chip);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
ret);
goto err;
}
platform_set_drvdata(pdev, wm831x_gpio);
return ret;
err:
kfree(wm831x_gpio);
return ret;
}
static int __devexit wm831x_gpio_remove(struct platform_device *pdev)
{
struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev);
int ret;
ret = gpiochip_remove(&wm831x_gpio->gpio_chip);
if (ret == 0)
kfree(wm831x_gpio);
return ret;
}
static struct platform_driver wm831x_gpio_driver = {
.driver.name = "wm831x-gpio",
.driver.owner = THIS_MODULE,
.probe = wm831x_gpio_probe,
.remove = __devexit_p(wm831x_gpio_remove),
};
static int __init wm831x_gpio_init(void)
{
return platform_driver_register(&wm831x_gpio_driver);
}
subsys_initcall(wm831x_gpio_init);
static void __exit wm831x_gpio_exit(void)
{
platform_driver_unregister(&wm831x_gpio_driver);
}
module_exit(wm831x_gpio_exit);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("GPIO interface for WM831x PMICs");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm831x-gpio");
| gpl-2.0 |
brinlyaus/um-test-thing | arch/arm/mach-at91/at91sam9261.c | 1732 | 9141 | /*
* arch/arm/mach-at91/at91sam9261.c
*
* Copyright (C) 2005 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/module.h>
#include <asm/proc-fns.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/system_misc.h>
#include <mach/cpu.h>
#include <mach/at91sam9261.h>
#include <mach/at91_pmc.h>
#include "at91_aic.h"
#include "at91_rstc.h"
#include "soc.h"
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
/* --------------------------------------------------------------------
* Clocks
* -------------------------------------------------------------------- */
/*
* The peripheral clocks.
*/
static struct clk pioA_clk = {
.name = "pioA_clk",
.pmc_mask = 1 << AT91SAM9261_ID_PIOA,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk pioB_clk = {
.name = "pioB_clk",
.pmc_mask = 1 << AT91SAM9261_ID_PIOB,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk pioC_clk = {
.name = "pioC_clk",
.pmc_mask = 1 << AT91SAM9261_ID_PIOC,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk usart0_clk = {
.name = "usart0_clk",
.pmc_mask = 1 << AT91SAM9261_ID_US0,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk usart1_clk = {
.name = "usart1_clk",
.pmc_mask = 1 << AT91SAM9261_ID_US1,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk usart2_clk = {
.name = "usart2_clk",
.pmc_mask = 1 << AT91SAM9261_ID_US2,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk mmc_clk = {
.name = "mci_clk",
.pmc_mask = 1 << AT91SAM9261_ID_MCI,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk udc_clk = {
.name = "udc_clk",
.pmc_mask = 1 << AT91SAM9261_ID_UDP,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk twi_clk = {
.name = "twi_clk",
.pmc_mask = 1 << AT91SAM9261_ID_TWI,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk spi0_clk = {
.name = "spi0_clk",
.pmc_mask = 1 << AT91SAM9261_ID_SPI0,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk spi1_clk = {
.name = "spi1_clk",
.pmc_mask = 1 << AT91SAM9261_ID_SPI1,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk ssc0_clk = {
.name = "ssc0_clk",
.pmc_mask = 1 << AT91SAM9261_ID_SSC0,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk ssc1_clk = {
.name = "ssc1_clk",
.pmc_mask = 1 << AT91SAM9261_ID_SSC1,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk ssc2_clk = {
.name = "ssc2_clk",
.pmc_mask = 1 << AT91SAM9261_ID_SSC2,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk tc0_clk = {
.name = "tc0_clk",
.pmc_mask = 1 << AT91SAM9261_ID_TC0,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk tc1_clk = {
.name = "tc1_clk",
.pmc_mask = 1 << AT91SAM9261_ID_TC1,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk tc2_clk = {
.name = "tc2_clk",
.pmc_mask = 1 << AT91SAM9261_ID_TC2,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk ohci_clk = {
.name = "ohci_clk",
.pmc_mask = 1 << AT91SAM9261_ID_UHP,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk lcdc_clk = {
.name = "lcdc_clk",
.pmc_mask = 1 << AT91SAM9261_ID_LCDC,
.type = CLK_TYPE_PERIPHERAL,
};
/* HClocks */
static struct clk hck0 = {
.name = "hck0",
.pmc_mask = AT91_PMC_HCK0,
.type = CLK_TYPE_SYSTEM,
.id = 0,
};
static struct clk hck1 = {
.name = "hck1",
.pmc_mask = AT91_PMC_HCK1,
.type = CLK_TYPE_SYSTEM,
.id = 1,
};
static struct clk *periph_clocks[] __initdata = {
&pioA_clk,
&pioB_clk,
&pioC_clk,
&usart0_clk,
&usart1_clk,
&usart2_clk,
&mmc_clk,
&udc_clk,
&twi_clk,
&spi0_clk,
&spi1_clk,
&ssc0_clk,
&ssc1_clk,
&ssc2_clk,
&tc0_clk,
&tc1_clk,
&tc2_clk,
&ohci_clk,
&lcdc_clk,
// irq0 .. irq2
};
static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("hclk", "at91sam9261-lcdfb.0", &hck1),
CLKDEV_CON_DEV_ID("hclk", "at91sam9g10-lcdfb.0", &hck1),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.0", &ssc0_clk),
CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID("pclk", "at91rm9200_ssc.2", &ssc2_clk),
CLKDEV_CON_DEV_ID("pclk", "fffbc000.ssc", &ssc0_clk),
CLKDEV_CON_DEV_ID("pclk", "fffc0000.ssc", &ssc1_clk),
CLKDEV_CON_DEV_ID("pclk", "fffc4000.ssc", &ssc2_clk),
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &hck0),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9261.0", &twi_clk),
CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g10.0", &twi_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
CLKDEV_CON_ID("pioB", &pioB_clk),
CLKDEV_CON_ID("pioC", &pioC_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
};
/*
* The four programmable clocks.
* You must configure pin multiplexing to bring these signals out.
*/
static struct clk pck0 = {
.name = "pck0",
.pmc_mask = AT91_PMC_PCK0,
.type = CLK_TYPE_PROGRAMMABLE,
.id = 0,
};
static struct clk pck1 = {
.name = "pck1",
.pmc_mask = AT91_PMC_PCK1,
.type = CLK_TYPE_PROGRAMMABLE,
.id = 1,
};
static struct clk pck2 = {
.name = "pck2",
.pmc_mask = AT91_PMC_PCK2,
.type = CLK_TYPE_PROGRAMMABLE,
.id = 2,
};
static struct clk pck3 = {
.name = "pck3",
.pmc_mask = AT91_PMC_PCK3,
.type = CLK_TYPE_PROGRAMMABLE,
.id = 3,
};
static void __init at91sam9261_register_clocks(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
clkdev_add_table(periph_clocks_lookups,
ARRAY_SIZE(periph_clocks_lookups));
clkdev_add_table(usart_clocks_lookups,
ARRAY_SIZE(usart_clocks_lookups));
clk_register(&pck0);
clk_register(&pck1);
clk_register(&pck2);
clk_register(&pck3);
clk_register(&hck0);
clk_register(&hck1);
}
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
static struct at91_gpio_bank at91sam9261_gpio[] __initdata = {
{
.id = AT91SAM9261_ID_PIOA,
.regbase = AT91SAM9261_BASE_PIOA,
}, {
.id = AT91SAM9261_ID_PIOB,
.regbase = AT91SAM9261_BASE_PIOB,
}, {
.id = AT91SAM9261_ID_PIOC,
.regbase = AT91SAM9261_BASE_PIOC,
}
};
/* --------------------------------------------------------------------
* AT91SAM9261 processor initialization
* -------------------------------------------------------------------- */
static void __init at91sam9261_map_io(void)
{
if (cpu_is_at91sam9g10())
at91_init_sram(0, AT91SAM9G10_SRAM_BASE, AT91SAM9G10_SRAM_SIZE);
else
at91_init_sram(0, AT91SAM9261_SRAM_BASE, AT91SAM9261_SRAM_SIZE);
}
static void __init at91sam9261_ioremap_registers(void)
{
at91_ioremap_shdwc(AT91SAM9261_BASE_SHDWC);
at91_ioremap_rstc(AT91SAM9261_BASE_RSTC);
at91_ioremap_ramc(0, AT91SAM9261_BASE_SDRAMC, 512);
at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC);
at91_ioremap_matrix(AT91SAM9261_BASE_MATRIX);
}
static void __init at91sam9261_initialize(void)
{
arm_pm_idle = at91sam9_idle;
arm_pm_restart = at91sam9_alt_restart;
at91_extern_irq = (1 << AT91SAM9261_ID_IRQ0) | (1 << AT91SAM9261_ID_IRQ1)
| (1 << AT91SAM9261_ID_IRQ2);
at91_sysirq_mask_rtt(AT91SAM9261_BASE_RTT);
/* Register GPIO subsystem */
at91_gpio_init(at91sam9261_gpio, 3);
}
/* --------------------------------------------------------------------
* Interrupt initialization
* -------------------------------------------------------------------- */
/*
* The default interrupt priority levels (0 = lowest, 7 = highest).
*/
static unsigned int at91sam9261_default_irq_priority[NR_AIC_IRQS] __initdata = {
7, /* Advanced Interrupt Controller */
7, /* System Peripherals */
1, /* Parallel IO Controller A */
1, /* Parallel IO Controller B */
1, /* Parallel IO Controller C */
0,
5, /* USART 0 */
5, /* USART 1 */
5, /* USART 2 */
0, /* Multimedia Card Interface */
2, /* USB Device Port */
6, /* Two-Wire Interface */
5, /* Serial Peripheral Interface 0 */
5, /* Serial Peripheral Interface 1 */
4, /* Serial Synchronous Controller 0 */
4, /* Serial Synchronous Controller 1 */
4, /* Serial Synchronous Controller 2 */
0, /* Timer Counter 0 */
0, /* Timer Counter 1 */
0, /* Timer Counter 2 */
2, /* USB Host port */
3, /* LCD Controller */
0,
0,
0,
0,
0,
0,
0,
0, /* Advanced Interrupt Controller */
0, /* Advanced Interrupt Controller */
0, /* Advanced Interrupt Controller */
};
AT91_SOC_START(at91sam9261)
.map_io = at91sam9261_map_io,
.default_irq_priority = at91sam9261_default_irq_priority,
.ioremap_registers = at91sam9261_ioremap_registers,
.register_clocks = at91sam9261_register_clocks,
.init = at91sam9261_initialize,
AT91_SOC_END
| gpl-2.0 |
karltsou/maxtouch-v3.x | drivers/mmc/host/pxamci.c | 2244 | 22119 | /*
* linux/drivers/mmc/host/pxa.c - PXA MMCI driver
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This hardware is really sick:
* - No way to clear interrupts.
* - Have to turn off the clock whenever we touch the device.
* - Doesn't tell you how many data blocks were transferred.
* Yuck!
*
* 1 and 3 byte data transfers not supported
* max block length up to 1023
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mmc/host.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio.h>
#include <linux/gfp.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "pxamci.h"
#define DRIVER_NAME "pxa2xx-mci"
#define NR_SG 1
#define CLKRT_OFF (~0)
#define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \
|| cpu_is_pxa935())
struct pxamci_host {
struct mmc_host *mmc;
spinlock_t lock;
struct resource *res;
void __iomem *base;
struct clk *clk;
unsigned long clkrate;
int irq;
int dma;
unsigned int clkrt;
unsigned int cmdat;
unsigned int imask;
unsigned int power_mode;
struct pxamci_platform_data *pdata;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
dma_addr_t sg_dma;
struct pxa_dma_desc *sg_cpu;
unsigned int dma_len;
unsigned int dma_dir;
unsigned int dma_drcmrrx;
unsigned int dma_drcmrtx;
struct regulator *vcc;
};
static inline void pxamci_init_ocr(struct pxamci_host *host)
{
#ifdef CONFIG_REGULATOR
host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
if (IS_ERR(host->vcc))
host->vcc = NULL;
else {
host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
if (host->pdata && host->pdata->ocr_mask)
dev_warn(mmc_dev(host->mmc),
"ocr_mask/setpower will not be used\n");
}
#endif
if (host->vcc == NULL) {
/* fall-back to platform data */
host->mmc->ocr_avail = host->pdata ?
host->pdata->ocr_mask :
MMC_VDD_32_33 | MMC_VDD_33_34;
}
}
static inline int pxamci_set_power(struct pxamci_host *host,
unsigned char power_mode,
unsigned int vdd)
{
int on;
if (host->vcc) {
int ret;
if (power_mode == MMC_POWER_UP) {
ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
if (ret)
return ret;
} else if (power_mode == MMC_POWER_OFF) {
ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
if (ret)
return ret;
}
}
if (!host->vcc && host->pdata &&
gpio_is_valid(host->pdata->gpio_power)) {
on = ((1 << vdd) & host->pdata->ocr_mask);
gpio_set_value(host->pdata->gpio_power,
!!on ^ host->pdata->gpio_power_invert);
}
if (!host->vcc && host->pdata && host->pdata->setpower)
host->pdata->setpower(mmc_dev(host->mmc), vdd);
return 0;
}
static void pxamci_stop_clock(struct pxamci_host *host)
{
if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
unsigned long timeout = 10000;
unsigned int v;
writel(STOP_CLOCK, host->base + MMC_STRPCL);
do {
v = readl(host->base + MMC_STAT);
if (!(v & STAT_CLK_EN))
break;
udelay(1);
} while (timeout--);
if (v & STAT_CLK_EN)
dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
}
}
static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->imask &= ~mask;
writel(host->imask, host->base + MMC_I_MASK);
spin_unlock_irqrestore(&host->lock, flags);
}
static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->imask |= mask;
writel(host->imask, host->base + MMC_I_MASK);
spin_unlock_irqrestore(&host->lock, flags);
}
static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
unsigned long long clks;
unsigned int timeout;
bool dalgn = 0;
u32 dcmd;
int i;
host->data = data;
if (data->flags & MMC_DATA_STREAM)
nob = 0xffff;
writel(nob, host->base + MMC_NOB);
writel(data->blksz, host->base + MMC_BLKLEN);
clks = (unsigned long long)data->timeout_ns * host->clkrate;
do_div(clks, 1000000000UL);
timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
writel((timeout + 255) / 256, host->base + MMC_RDTO);
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
DRCMR(host->dma_drcmrtx) = 0;
DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
} else {
host->dma_dir = DMA_TO_DEVICE;
dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
DRCMR(host->dma_drcmrrx) = 0;
DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
}
dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
host->dma_dir);
for (i = 0; i < host->dma_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
host->sg_cpu[i].dcmd = dcmd | length;
if (length & 31 && !(data->flags & MMC_DATA_READ))
host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
/* Not aligned to 8-byte boundary? */
if (sg_dma_address(&data->sg[i]) & 0x7)
dalgn = 1;
if (data->flags & MMC_DATA_READ) {
host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
} else {
host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
}
host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
sizeof(struct pxa_dma_desc);
}
host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
wmb();
/*
* The PXA27x DMA controller encounters overhead when working with
* unaligned (to 8-byte boundaries) data, so switch on byte alignment
* mode only if we have unaligned data.
*/
if (dalgn)
DALGN |= (1 << host->dma);
else
DALGN &= ~(1 << host->dma);
DDADR(host->dma) = host->sg_dma;
/*
* workaround for erratum #91:
* only start DMA now if we are doing a read,
* otherwise we wait until CMD/RESP has finished
* before starting DMA.
*/
if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
DCSR(host->dma) = DCSR_RUN;
}
static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
{
WARN_ON(host->cmd != NULL);
host->cmd = cmd;
if (cmd->flags & MMC_RSP_BUSY)
cmdat |= CMDAT_BUSY;
#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
switch (RSP_TYPE(mmc_resp_type(cmd))) {
case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
cmdat |= CMDAT_RESP_SHORT;
break;
case RSP_TYPE(MMC_RSP_R3):
cmdat |= CMDAT_RESP_R3;
break;
case RSP_TYPE(MMC_RSP_R2):
cmdat |= CMDAT_RESP_R2;
break;
default:
break;
}
writel(cmd->opcode, host->base + MMC_CMD);
writel(cmd->arg >> 16, host->base + MMC_ARGH);
writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
writel(cmdat, host->base + MMC_CMDAT);
writel(host->clkrt, host->base + MMC_CLKRT);
writel(START_CLOCK, host->base + MMC_STRPCL);
pxamci_enable_irq(host, END_CMD_RES);
}
static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
{
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
mmc_request_done(host->mmc, mrq);
}
static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
{
struct mmc_command *cmd = host->cmd;
int i;
u32 v;
if (!cmd)
return 0;
host->cmd = NULL;
/*
* Did I mention this is Sick. We always need to
* discard the upper 8 bits of the first 16-bit word.
*/
v = readl(host->base + MMC_RES) & 0xffff;
for (i = 0; i < 4; i++) {
u32 w1 = readl(host->base + MMC_RES) & 0xffff;
u32 w2 = readl(host->base + MMC_RES) & 0xffff;
cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
v = w2;
}
if (stat & STAT_TIME_OUT_RESPONSE) {
cmd->error = -ETIMEDOUT;
} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
/*
* workaround for erratum #42:
* Intel PXA27x Family Processor Specification Update Rev 001
* A bogus CRC error can appear if the msb of a 136 bit
* response is a one.
*/
if (cpu_is_pxa27x() &&
(cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
else
cmd->error = -EILSEQ;
}
pxamci_disable_irq(host, END_CMD_RES);
if (host->data && !cmd->error) {
pxamci_enable_irq(host, DATA_TRAN_DONE);
/*
* workaround for erratum #91, if doing write
* enable DMA late
*/
if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
DCSR(host->dma) = DCSR_RUN;
} else {
pxamci_finish_request(host, host->mrq);
}
return 1;
}
static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
if (!data)
return 0;
DCSR(host->dma) = 0;
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
host->dma_dir);
if (stat & STAT_READ_TIME_OUT)
data->error = -ETIMEDOUT;
else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
data->error = -EILSEQ;
/*
* There appears to be a hardware design bug here. There seems to
* be no way to find out how much data was transferred to the card.
* This means that if there was an error on any block, we mark all
* data blocks as being in error.
*/
if (!data->error)
data->bytes_xfered = data->blocks * data->blksz;
else
data->bytes_xfered = 0;
pxamci_disable_irq(host, DATA_TRAN_DONE);
host->data = NULL;
if (host->mrq->stop) {
pxamci_stop_clock(host);
pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
} else {
pxamci_finish_request(host, host->mrq);
}
return 1;
}
static irqreturn_t pxamci_irq(int irq, void *devid)
{
struct pxamci_host *host = devid;
unsigned int ireg;
int handled = 0;
ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
if (ireg) {
unsigned stat = readl(host->base + MMC_STAT);
pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
if (ireg & END_CMD_RES)
handled |= pxamci_cmd_done(host, stat);
if (ireg & DATA_TRAN_DONE)
handled |= pxamci_data_done(host, stat);
if (ireg & SDIO_INT) {
mmc_signal_sdio_irq(host->mmc);
handled = 1;
}
}
return IRQ_RETVAL(handled);
}
static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct pxamci_host *host = mmc_priv(mmc);
unsigned int cmdat;
WARN_ON(host->mrq != NULL);
host->mrq = mrq;
pxamci_stop_clock(host);
cmdat = host->cmdat;
host->cmdat &= ~CMDAT_INIT;
if (mrq->data) {
pxamci_setup_data(host, mrq->data);
cmdat &= ~CMDAT_BUSY;
cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
if (mrq->data->flags & MMC_DATA_WRITE)
cmdat |= CMDAT_WRITE;
if (mrq->data->flags & MMC_DATA_STREAM)
cmdat |= CMDAT_STREAM;
}
pxamci_start_cmd(host, mrq->cmd, cmdat);
}
static int pxamci_get_ro(struct mmc_host *mmc)
{
struct pxamci_host *host = mmc_priv(mmc);
if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
if (host->pdata->gpio_card_ro_invert)
return !gpio_get_value(host->pdata->gpio_card_ro);
else
return gpio_get_value(host->pdata->gpio_card_ro);
}
if (host->pdata && host->pdata->get_ro)
return !!host->pdata->get_ro(mmc_dev(mmc));
/*
* Board doesn't support read only detection; let the mmc core
* decide what to do.
*/
return -ENOSYS;
}
static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct pxamci_host *host = mmc_priv(mmc);
if (ios->clock) {
unsigned long rate = host->clkrate;
unsigned int clk = rate / ios->clock;
if (host->clkrt == CLKRT_OFF)
clk_enable(host->clk);
if (ios->clock == 26000000) {
/* to support 26MHz */
host->clkrt = 7;
} else {
/* to handle (19.5MHz, 26MHz) */
if (!clk)
clk = 1;
/*
* clk might result in a lower divisor than we
* desire. check for that condition and adjust
* as appropriate.
*/
if (rate / clk > ios->clock)
clk <<= 1;
host->clkrt = fls(clk) - 1;
}
/*
* we write clkrt on the next command
*/
} else {
pxamci_stop_clock(host);
if (host->clkrt != CLKRT_OFF) {
host->clkrt = CLKRT_OFF;
clk_disable(host->clk);
}
}
if (host->power_mode != ios->power_mode) {
int ret;
host->power_mode = ios->power_mode;
ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
if (ret) {
dev_err(mmc_dev(mmc), "unable to set power\n");
/*
* The .set_ios() function in the mmc_host_ops
* struct return void, and failing to set the
* power should be rare so we print an error and
* return here.
*/
return;
}
if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMDAT_INIT;
}
if (ios->bus_width == MMC_BUS_WIDTH_4)
host->cmdat |= CMDAT_SD_4DAT;
else
host->cmdat &= ~CMDAT_SD_4DAT;
dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
host->clkrt, host->cmdat);
}
static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
{
struct pxamci_host *pxa_host = mmc_priv(host);
if (enable)
pxamci_enable_irq(pxa_host, SDIO_INT);
else
pxamci_disable_irq(pxa_host, SDIO_INT);
}
static const struct mmc_host_ops pxamci_ops = {
.request = pxamci_request,
.get_ro = pxamci_get_ro,
.set_ios = pxamci_set_ios,
.enable_sdio_irq = pxamci_enable_sdio_irq,
};
static void pxamci_dma_irq(int dma, void *devid)
{
struct pxamci_host *host = devid;
int dcsr = DCSR(dma);
DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
if (dcsr & DCSR_ENDINTR) {
writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
} else {
pr_err("%s: DMA error on channel %d (DCSR=%#x)\n",
mmc_hostname(host->mmc), dma, dcsr);
host->data->error = -EIO;
pxamci_data_done(host, 0);
}
}
static irqreturn_t pxamci_detect_irq(int irq, void *devid)
{
struct pxamci_host *host = mmc_priv(devid);
mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms));
return IRQ_HANDLED;
}
#ifdef CONFIG_OF
static const struct of_device_id pxa_mmc_dt_ids[] = {
{ .compatible = "marvell,pxa-mmc" },
{ }
};
MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
static int pxamci_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct pxamci_platform_data *pdata;
u32 tmp;
if (!np)
return 0;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->gpio_card_detect =
of_get_named_gpio(np, "cd-gpios", 0);
pdata->gpio_card_ro =
of_get_named_gpio(np, "wp-gpios", 0);
/* pxa-mmc specific */
pdata->gpio_power =
of_get_named_gpio(np, "pxa-mmc,gpio-power", 0);
if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
pdata->detect_delay_ms = tmp;
pdev->dev.platform_data = pdata;
return 0;
}
#else
static int pxamci_of_init(struct platform_device *pdev)
{
return 0;
}
#endif
static int pxamci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct pxamci_host *host = NULL;
struct resource *r, *dmarx, *dmatx;
int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
ret = pxamci_of_init(pdev);
if (ret)
return ret;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq < 0)
return -ENXIO;
r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
if (!r)
return -EBUSY;
mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto out;
}
mmc->ops = &pxamci_ops;
/*
* We can do SG-DMA, but we don't because we never know how much
* data we successfully wrote to the card.
*/
mmc->max_segs = NR_SG;
/*
* Our hardware DMA can handle a maximum of one page per SG entry.
*/
mmc->max_seg_size = PAGE_SIZE;
/*
* Block length register is only 10 bits before PXA27x.
*/
mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
/*
* Block count register is 16 bits.
*/
mmc->max_blk_count = 65535;
host = mmc_priv(mmc);
host->mmc = mmc;
host->dma = -1;
host->pdata = pdev->dev.platform_data;
host->clkrt = CLKRT_OFF;
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
host->clk = NULL;
goto out;
}
host->clkrate = clk_get_rate(host->clk);
/*
* Calculate minimum clock rate, rounding up.
*/
mmc->f_min = (host->clkrate + 63) / 64;
mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
pxamci_init_ocr(host);
mmc->caps = 0;
host->cmdat = 0;
if (!cpu_is_pxa25x()) {
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
host->cmdat |= CMDAT_SDIO_INT_EN;
if (mmc_has_26MHz())
mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
MMC_CAP_SD_HIGHSPEED;
}
host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
if (!host->sg_cpu) {
ret = -ENOMEM;
goto out;
}
spin_lock_init(&host->lock);
host->res = r;
host->irq = irq;
host->imask = MMC_I_MASK_ALL;
host->base = ioremap(r->start, SZ_4K);
if (!host->base) {
ret = -ENOMEM;
goto out;
}
/*
* Ensure that the host controller is shut down, and setup
* with our defaults.
*/
pxamci_stop_clock(host);
writel(0, host->base + MMC_SPI);
writel(64, host->base + MMC_RESTO);
writel(host->imask, host->base + MMC_I_MASK);
host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
pxamci_dma_irq, host);
if (host->dma < 0) {
ret = -EBUSY;
goto out;
}
ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
if (ret)
goto out;
platform_set_drvdata(pdev, mmc);
dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!dmarx) {
ret = -ENXIO;
goto out;
}
host->dma_drcmrrx = dmarx->start;
dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!dmatx) {
ret = -ENXIO;
goto out;
}
host->dma_drcmrtx = dmatx->start;
if (host->pdata) {
gpio_cd = host->pdata->gpio_card_detect;
gpio_ro = host->pdata->gpio_card_ro;
gpio_power = host->pdata->gpio_power;
}
if (gpio_is_valid(gpio_power)) {
ret = gpio_request(gpio_power, "mmc card power");
if (ret) {
dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
goto out;
}
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
if (gpio_is_valid(gpio_ro)) {
ret = gpio_request(gpio_ro, "mmc card read only");
if (ret) {
dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
goto err_gpio_ro;
}
gpio_direction_input(gpio_ro);
}
if (gpio_is_valid(gpio_cd)) {
ret = gpio_request(gpio_cd, "mmc card detect");
if (ret) {
dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
goto err_gpio_cd;
}
gpio_direction_input(gpio_cd);
ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"mmc card detect", mmc);
if (ret) {
dev_err(&pdev->dev, "failed to request card detect IRQ\n");
goto err_request_irq;
}
}
if (host->pdata && host->pdata->init)
host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
if (gpio_is_valid(gpio_power) && host->pdata->setpower)
dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
mmc_add_host(mmc);
return 0;
err_request_irq:
gpio_free(gpio_cd);
err_gpio_cd:
gpio_free(gpio_ro);
err_gpio_ro:
gpio_free(gpio_power);
out:
if (host) {
if (host->dma >= 0)
pxa_free_dma(host->dma);
if (host->base)
iounmap(host->base);
if (host->sg_cpu)
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->clk)
clk_put(host->clk);
}
if (mmc)
mmc_free_host(mmc);
release_resource(r);
return ret;
}
static int pxamci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
platform_set_drvdata(pdev, NULL);
if (mmc) {
struct pxamci_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
if (host->pdata) {
gpio_cd = host->pdata->gpio_card_detect;
gpio_ro = host->pdata->gpio_card_ro;
gpio_power = host->pdata->gpio_power;
}
if (gpio_is_valid(gpio_cd)) {
free_irq(gpio_to_irq(gpio_cd), mmc);
gpio_free(gpio_cd);
}
if (gpio_is_valid(gpio_ro))
gpio_free(gpio_ro);
if (gpio_is_valid(gpio_power))
gpio_free(gpio_power);
if (host->vcc)
regulator_put(host->vcc);
if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc);
pxamci_stop_clock(host);
writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
host->base + MMC_I_MASK);
DRCMR(host->dma_drcmrrx) = 0;
DRCMR(host->dma_drcmrtx) = 0;
free_irq(host->irq, host);
pxa_free_dma(host->dma);
iounmap(host->base);
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
clk_put(host->clk);
release_resource(host->res);
mmc_free_host(mmc);
}
return 0;
}
#ifdef CONFIG_PM
static int pxamci_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
int ret = 0;
if (mmc)
ret = mmc_suspend_host(mmc);
return ret;
}
static int pxamci_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
int ret = 0;
if (mmc)
ret = mmc_resume_host(mmc);
return ret;
}
static const struct dev_pm_ops pxamci_pm_ops = {
.suspend = pxamci_suspend,
.resume = pxamci_resume,
};
#endif
static struct platform_driver pxamci_driver = {
.probe = pxamci_probe,
.remove = pxamci_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
#ifdef CONFIG_PM
.pm = &pxamci_pm_ops,
#endif
},
};
module_platform_driver(pxamci_driver);
MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-mci");
| gpl-2.0 |
ztemt/A476_V1B_5.1_kernel | drivers/input/keyboard/twl4030_keypad.c | 2244 | 12303 | /*
* twl4030_keypad.c - driver for 8x8 keypad controller in twl4030 chips
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Copyright (C) 2008 Nokia Corporation
*
* Code re-written for 2430SDP by:
* Syed Mohammed Khasim <x0khasim@ti.com>
*
* Initial Code:
* Manjunatha G K <manjugk@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/i2c/twl.h>
#include <linux/slab.h>
/*
* The TWL4030 family chips include a keypad controller that supports
* up to an 8x8 switch matrix. The controller can issue system wakeup
* events, since it uses only the always-on 32KiHz oscillator, and has
* an internal state machine that decodes pressed keys, including
* multi-key combinations.
*
* This driver lets boards define what keycodes they wish to report for
* which scancodes, as part of the "struct twl4030_keypad_data" used in
* the probe() routine.
*
* See the TPS65950 documentation; that's the general availability
* version of the TWL5030 second generation part.
*/
#define TWL4030_MAX_ROWS 8 /* TWL4030 hard limit */
#define TWL4030_MAX_COLS 8
/*
* Note that we add space for an extra column so that we can handle
* row lines connected to the gnd (see twl4030_col_xlate()).
*/
#define TWL4030_ROW_SHIFT 4
#define TWL4030_KEYMAP_SIZE (TWL4030_MAX_ROWS << TWL4030_ROW_SHIFT)
struct twl4030_keypad {
unsigned short keymap[TWL4030_KEYMAP_SIZE];
u16 kp_state[TWL4030_MAX_ROWS];
unsigned n_rows;
unsigned n_cols;
unsigned irq;
struct device *dbg_dev;
struct input_dev *input;
};
/*----------------------------------------------------------------------*/
/* arbitrary prescaler value 0..7 */
#define PTV_PRESCALER 4
/* Register Offsets */
#define KEYP_CTRL 0x00
#define KEYP_DEB 0x01
#define KEYP_LONG_KEY 0x02
#define KEYP_LK_PTV 0x03
#define KEYP_TIMEOUT_L 0x04
#define KEYP_TIMEOUT_H 0x05
#define KEYP_KBC 0x06
#define KEYP_KBR 0x07
#define KEYP_SMS 0x08
#define KEYP_FULL_CODE_7_0 0x09 /* row 0 column status */
#define KEYP_FULL_CODE_15_8 0x0a /* ... row 1 ... */
#define KEYP_FULL_CODE_23_16 0x0b
#define KEYP_FULL_CODE_31_24 0x0c
#define KEYP_FULL_CODE_39_32 0x0d
#define KEYP_FULL_CODE_47_40 0x0e
#define KEYP_FULL_CODE_55_48 0x0f
#define KEYP_FULL_CODE_63_56 0x10
#define KEYP_ISR1 0x11
#define KEYP_IMR1 0x12
#define KEYP_ISR2 0x13
#define KEYP_IMR2 0x14
#define KEYP_SIR 0x15
#define KEYP_EDR 0x16 /* edge triggers */
#define KEYP_SIH_CTRL 0x17
/* KEYP_CTRL_REG Fields */
#define KEYP_CTRL_SOFT_NRST BIT(0)
#define KEYP_CTRL_SOFTMODEN BIT(1)
#define KEYP_CTRL_LK_EN BIT(2)
#define KEYP_CTRL_TOE_EN BIT(3)
#define KEYP_CTRL_TOLE_EN BIT(4)
#define KEYP_CTRL_RP_EN BIT(5)
#define KEYP_CTRL_KBD_ON BIT(6)
/* KEYP_DEB, KEYP_LONG_KEY, KEYP_TIMEOUT_x*/
#define KEYP_PERIOD_US(t, prescale) ((t) / (31 << (prescale + 1)) - 1)
/* KEYP_LK_PTV_REG Fields */
#define KEYP_LK_PTV_PTV_SHIFT 5
/* KEYP_{IMR,ISR,SIR} Fields */
#define KEYP_IMR1_MIS BIT(3)
#define KEYP_IMR1_TO BIT(2)
#define KEYP_IMR1_LK BIT(1)
#define KEYP_IMR1_KP BIT(0)
/* KEYP_EDR Fields */
#define KEYP_EDR_KP_FALLING 0x01
#define KEYP_EDR_KP_RISING 0x02
#define KEYP_EDR_KP_BOTH 0x03
#define KEYP_EDR_LK_FALLING 0x04
#define KEYP_EDR_LK_RISING 0x08
#define KEYP_EDR_TO_FALLING 0x10
#define KEYP_EDR_TO_RISING 0x20
#define KEYP_EDR_MIS_FALLING 0x40
#define KEYP_EDR_MIS_RISING 0x80
/*----------------------------------------------------------------------*/
static int twl4030_kpread(struct twl4030_keypad *kp,
u8 *data, u32 reg, u8 num_bytes)
{
int ret = twl_i2c_read(TWL4030_MODULE_KEYPAD, data, reg, num_bytes);
if (ret < 0)
dev_warn(kp->dbg_dev,
"Couldn't read TWL4030: %X - ret %d[%x]\n",
reg, ret, ret);
return ret;
}
static int twl4030_kpwrite_u8(struct twl4030_keypad *kp, u8 data, u32 reg)
{
int ret = twl_i2c_write_u8(TWL4030_MODULE_KEYPAD, data, reg);
if (ret < 0)
dev_warn(kp->dbg_dev,
"Could not write TWL4030: %X - ret %d[%x]\n",
reg, ret, ret);
return ret;
}
static inline u16 twl4030_col_xlate(struct twl4030_keypad *kp, u8 col)
{
/* If all bits in a row are active for all coloumns then
* we have that row line connected to gnd. Mark this
* key on as if it was on matrix position n_cols (ie
* one higher than the size of the matrix).
*/
if (col == 0xFF)
return 1 << kp->n_cols;
else
return col & ((1 << kp->n_cols) - 1);
}
static int twl4030_read_kp_matrix_state(struct twl4030_keypad *kp, u16 *state)
{
u8 new_state[TWL4030_MAX_ROWS];
int row;
int ret = twl4030_kpread(kp, new_state,
KEYP_FULL_CODE_7_0, kp->n_rows);
if (ret >= 0)
for (row = 0; row < kp->n_rows; row++)
state[row] = twl4030_col_xlate(kp, new_state[row]);
return ret;
}
static bool twl4030_is_in_ghost_state(struct twl4030_keypad *kp, u16 *key_state)
{
int i;
u16 check = 0;
for (i = 0; i < kp->n_rows; i++) {
u16 col = key_state[i];
if ((col & check) && hweight16(col) > 1)
return true;
check |= col;
}
return false;
}
static void twl4030_kp_scan(struct twl4030_keypad *kp, bool release_all)
{
struct input_dev *input = kp->input;
u16 new_state[TWL4030_MAX_ROWS];
int col, row;
if (release_all)
memset(new_state, 0, sizeof(new_state));
else {
/* check for any changes */
int ret = twl4030_read_kp_matrix_state(kp, new_state);
if (ret < 0) /* panic ... */
return;
if (twl4030_is_in_ghost_state(kp, new_state))
return;
}
/* check for changes and print those */
for (row = 0; row < kp->n_rows; row++) {
int changed = new_state[row] ^ kp->kp_state[row];
if (!changed)
continue;
/* Extra column handles "all gnd" rows */
for (col = 0; col < kp->n_cols + 1; col++) {
int code;
if (!(changed & (1 << col)))
continue;
dev_dbg(kp->dbg_dev, "key [%d:%d] %s\n", row, col,
(new_state[row] & (1 << col)) ?
"press" : "release");
code = MATRIX_SCAN_CODE(row, col, TWL4030_ROW_SHIFT);
input_event(input, EV_MSC, MSC_SCAN, code);
input_report_key(input, kp->keymap[code],
new_state[row] & (1 << col));
}
kp->kp_state[row] = new_state[row];
}
input_sync(input);
}
/*
* Keypad interrupt handler
*/
static irqreturn_t do_kp_irq(int irq, void *_kp)
{
struct twl4030_keypad *kp = _kp;
u8 reg;
int ret;
/* Read & Clear TWL4030 pending interrupt */
ret = twl4030_kpread(kp, ®, KEYP_ISR1, 1);
/* Release all keys if I2C has gone bad or
* the KEYP has gone to idle state */
if (ret >= 0 && (reg & KEYP_IMR1_KP))
twl4030_kp_scan(kp, false);
else
twl4030_kp_scan(kp, true);
return IRQ_HANDLED;
}
static int twl4030_kp_program(struct twl4030_keypad *kp)
{
u8 reg;
int i;
/* Enable controller, with hardware decoding but not autorepeat */
reg = KEYP_CTRL_SOFT_NRST | KEYP_CTRL_SOFTMODEN
| KEYP_CTRL_TOE_EN | KEYP_CTRL_KBD_ON;
if (twl4030_kpwrite_u8(kp, reg, KEYP_CTRL) < 0)
return -EIO;
/* NOTE: we could use sih_setup() here to package keypad
* event sources as four different IRQs ... but we don't.
*/
/* Enable TO rising and KP rising and falling edge detection */
reg = KEYP_EDR_KP_BOTH | KEYP_EDR_TO_RISING;
if (twl4030_kpwrite_u8(kp, reg, KEYP_EDR) < 0)
return -EIO;
/* Set PTV prescaler Field */
reg = (PTV_PRESCALER << KEYP_LK_PTV_PTV_SHIFT);
if (twl4030_kpwrite_u8(kp, reg, KEYP_LK_PTV) < 0)
return -EIO;
/* Set key debounce time to 20 ms */
i = KEYP_PERIOD_US(20000, PTV_PRESCALER);
if (twl4030_kpwrite_u8(kp, i, KEYP_DEB) < 0)
return -EIO;
/* Set timeout period to 200 ms */
i = KEYP_PERIOD_US(200000, PTV_PRESCALER);
if (twl4030_kpwrite_u8(kp, (i & 0xFF), KEYP_TIMEOUT_L) < 0)
return -EIO;
if (twl4030_kpwrite_u8(kp, (i >> 8), KEYP_TIMEOUT_H) < 0)
return -EIO;
/*
* Enable Clear-on-Read; disable remembering events that fire
* after the IRQ but before our handler acks (reads) them,
*/
reg = TWL4030_SIH_CTRL_COR_MASK | TWL4030_SIH_CTRL_PENDDIS_MASK;
if (twl4030_kpwrite_u8(kp, reg, KEYP_SIH_CTRL) < 0)
return -EIO;
/* initialize key state; irqs update it from here on */
if (twl4030_read_kp_matrix_state(kp, kp->kp_state) < 0)
return -EIO;
return 0;
}
/*
* Registers keypad device with input subsystem
* and configures TWL4030 keypad registers
*/
static int twl4030_kp_probe(struct platform_device *pdev)
{
struct twl4030_keypad_data *pdata = pdev->dev.platform_data;
const struct matrix_keymap_data *keymap_data;
struct twl4030_keypad *kp;
struct input_dev *input;
u8 reg;
int error;
if (!pdata || !pdata->rows || !pdata->cols || !pdata->keymap_data ||
pdata->rows > TWL4030_MAX_ROWS || pdata->cols > TWL4030_MAX_COLS) {
dev_err(&pdev->dev, "Invalid platform_data\n");
return -EINVAL;
}
keymap_data = pdata->keymap_data;
kp = kzalloc(sizeof(*kp), GFP_KERNEL);
input = input_allocate_device();
if (!kp || !input) {
error = -ENOMEM;
goto err1;
}
/* Get the debug Device */
kp->dbg_dev = &pdev->dev;
kp->input = input;
kp->n_rows = pdata->rows;
kp->n_cols = pdata->cols;
kp->irq = platform_get_irq(pdev, 0);
/* setup input device */
input->name = "TWL4030 Keypad";
input->phys = "twl4030_keypad/input0";
input->dev.parent = &pdev->dev;
input->id.bustype = BUS_HOST;
input->id.vendor = 0x0001;
input->id.product = 0x0001;
input->id.version = 0x0003;
error = matrix_keypad_build_keymap(keymap_data, NULL,
TWL4030_MAX_ROWS,
1 << TWL4030_ROW_SHIFT,
kp->keymap, input);
if (error) {
dev_err(kp->dbg_dev, "Failed to build keymap\n");
goto err1;
}
input_set_capability(input, EV_MSC, MSC_SCAN);
/* Enable auto repeat feature of Linux input subsystem */
if (pdata->rep)
__set_bit(EV_REP, input->evbit);
error = input_register_device(input);
if (error) {
dev_err(kp->dbg_dev,
"Unable to register twl4030 keypad device\n");
goto err1;
}
error = twl4030_kp_program(kp);
if (error)
goto err2;
/*
* This ISR will always execute in kernel thread context because of
* the need to access the TWL4030 over the I2C bus.
*
* NOTE: we assume this host is wired to TWL4040 INT1, not INT2 ...
*/
error = request_threaded_irq(kp->irq, NULL, do_kp_irq,
0, pdev->name, kp);
if (error) {
dev_info(kp->dbg_dev, "request_irq failed for irq no=%d\n",
kp->irq);
goto err2;
}
/* Enable KP and TO interrupts now. */
reg = (u8) ~(KEYP_IMR1_KP | KEYP_IMR1_TO);
if (twl4030_kpwrite_u8(kp, reg, KEYP_IMR1)) {
error = -EIO;
goto err3;
}
platform_set_drvdata(pdev, kp);
return 0;
err3:
/* mask all events - we don't care about the result */
(void) twl4030_kpwrite_u8(kp, 0xff, KEYP_IMR1);
free_irq(kp->irq, NULL);
err2:
input_unregister_device(input);
input = NULL;
err1:
input_free_device(input);
kfree(kp);
return error;
}
static int twl4030_kp_remove(struct platform_device *pdev)
{
struct twl4030_keypad *kp = platform_get_drvdata(pdev);
free_irq(kp->irq, kp);
input_unregister_device(kp->input);
platform_set_drvdata(pdev, NULL);
kfree(kp);
return 0;
}
/*
* NOTE: twl4030 are multi-function devices connected via I2C.
* So this device is a child of an I2C parent, thus it needs to
* support unplug/replug (which most platform devices don't).
*/
static struct platform_driver twl4030_kp_driver = {
.probe = twl4030_kp_probe,
.remove = twl4030_kp_remove,
.driver = {
.name = "twl4030_keypad",
.owner = THIS_MODULE,
},
};
module_platform_driver(twl4030_kp_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("TWL4030 Keypad Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:twl4030_keypad");
| gpl-2.0 |
varunchitre15/thunderzap_tomato | drivers/video/backlight/tps65217_bl.c | 2244 | 8126 | /*
* tps65217_bl.c
*
* TPS65217 backlight driver
*
* Copyright (C) 2012 Matthias Kaehlcke
* Author: Matthias Kaehlcke <matthias@kaehlcke.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/fb.h>
#include <linux/mfd/tps65217.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
struct tps65217_bl {
struct tps65217 *tps;
struct device *dev;
struct backlight_device *bl;
bool is_enabled;
};
static int tps65217_bl_enable(struct tps65217_bl *tps65217_bl)
{
int rc;
rc = tps65217_set_bits(tps65217_bl->tps, TPS65217_REG_WLEDCTRL1,
TPS65217_WLEDCTRL1_ISINK_ENABLE,
TPS65217_WLEDCTRL1_ISINK_ENABLE, TPS65217_PROTECT_NONE);
if (rc) {
dev_err(tps65217_bl->dev,
"failed to enable backlight: %d\n", rc);
return rc;
}
tps65217_bl->is_enabled = true;
dev_dbg(tps65217_bl->dev, "backlight enabled\n");
return 0;
}
static int tps65217_bl_disable(struct tps65217_bl *tps65217_bl)
{
int rc;
rc = tps65217_clear_bits(tps65217_bl->tps,
TPS65217_REG_WLEDCTRL1,
TPS65217_WLEDCTRL1_ISINK_ENABLE,
TPS65217_PROTECT_NONE);
if (rc) {
dev_err(tps65217_bl->dev,
"failed to disable backlight: %d\n", rc);
return rc;
}
tps65217_bl->is_enabled = false;
dev_dbg(tps65217_bl->dev, "backlight disabled\n");
return 0;
}
static int tps65217_bl_update_status(struct backlight_device *bl)
{
struct tps65217_bl *tps65217_bl = bl_get_data(bl);
int rc;
int brightness = bl->props.brightness;
if (bl->props.state & BL_CORE_SUSPENDED)
brightness = 0;
if ((bl->props.power != FB_BLANK_UNBLANK) ||
(bl->props.fb_blank != FB_BLANK_UNBLANK))
/* framebuffer in low power mode or blanking active */
brightness = 0;
if (brightness > 0) {
rc = tps65217_reg_write(tps65217_bl->tps,
TPS65217_REG_WLEDCTRL2,
brightness - 1,
TPS65217_PROTECT_NONE);
if (rc) {
dev_err(tps65217_bl->dev,
"failed to set brightness level: %d\n", rc);
return rc;
}
dev_dbg(tps65217_bl->dev, "brightness set to %d\n", brightness);
if (!tps65217_bl->is_enabled)
rc = tps65217_bl_enable(tps65217_bl);
} else {
rc = tps65217_bl_disable(tps65217_bl);
}
return rc;
}
static int tps65217_bl_get_brightness(struct backlight_device *bl)
{
return bl->props.brightness;
}
static const struct backlight_ops tps65217_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = tps65217_bl_update_status,
.get_brightness = tps65217_bl_get_brightness
};
static int tps65217_bl_hw_init(struct tps65217_bl *tps65217_bl,
struct tps65217_bl_pdata *pdata)
{
int rc;
rc = tps65217_bl_disable(tps65217_bl);
if (rc)
return rc;
switch (pdata->isel) {
case TPS65217_BL_ISET1:
/* select ISET_1 current level */
rc = tps65217_clear_bits(tps65217_bl->tps,
TPS65217_REG_WLEDCTRL1,
TPS65217_WLEDCTRL1_ISEL,
TPS65217_PROTECT_NONE);
if (rc) {
dev_err(tps65217_bl->dev,
"failed to select ISET1 current level: %d)\n",
rc);
return rc;
}
dev_dbg(tps65217_bl->dev, "selected ISET1 current level\n");
break;
case TPS65217_BL_ISET2:
/* select ISET2 current level */
rc = tps65217_set_bits(tps65217_bl->tps, TPS65217_REG_WLEDCTRL1,
TPS65217_WLEDCTRL1_ISEL,
TPS65217_WLEDCTRL1_ISEL, TPS65217_PROTECT_NONE);
if (rc) {
dev_err(tps65217_bl->dev,
"failed to select ISET2 current level: %d\n",
rc);
return rc;
}
dev_dbg(tps65217_bl->dev, "selected ISET2 current level\n");
break;
default:
dev_err(tps65217_bl->dev,
"invalid value for current level: %d\n", pdata->isel);
return -EINVAL;
}
/* set PWM frequency */
rc = tps65217_set_bits(tps65217_bl->tps,
TPS65217_REG_WLEDCTRL1,
TPS65217_WLEDCTRL1_FDIM_MASK,
pdata->fdim,
TPS65217_PROTECT_NONE);
if (rc) {
dev_err(tps65217_bl->dev,
"failed to select PWM dimming frequency: %d\n",
rc);
return rc;
}
return 0;
}
#ifdef CONFIG_OF
static struct tps65217_bl_pdata *
tps65217_bl_parse_dt(struct platform_device *pdev)
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct device_node *node = of_node_get(tps->dev->of_node);
struct tps65217_bl_pdata *pdata, *err;
u32 val;
node = of_find_node_by_name(node, "backlight");
if (!node)
return ERR_PTR(-ENODEV);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "failed to allocate platform data\n");
err = ERR_PTR(-ENOMEM);
goto err;
}
pdata->isel = TPS65217_BL_ISET1;
if (!of_property_read_u32(node, "isel", &val)) {
if (val < TPS65217_BL_ISET1 ||
val > TPS65217_BL_ISET2) {
dev_err(&pdev->dev,
"invalid 'isel' value in the device tree\n");
err = ERR_PTR(-EINVAL);
goto err;
}
pdata->isel = val;
}
pdata->fdim = TPS65217_BL_FDIM_200HZ;
if (!of_property_read_u32(node, "fdim", &val)) {
switch (val) {
case 100:
pdata->fdim = TPS65217_BL_FDIM_100HZ;
break;
case 200:
pdata->fdim = TPS65217_BL_FDIM_200HZ;
break;
case 500:
pdata->fdim = TPS65217_BL_FDIM_500HZ;
break;
case 1000:
pdata->fdim = TPS65217_BL_FDIM_1000HZ;
break;
default:
dev_err(&pdev->dev,
"invalid 'fdim' value in the device tree\n");
err = ERR_PTR(-EINVAL);
goto err;
}
}
if (!of_property_read_u32(node, "default-brightness", &val)) {
if (val < 0 ||
val > 100) {
dev_err(&pdev->dev,
"invalid 'default-brightness' value in the device tree\n");
err = ERR_PTR(-EINVAL);
goto err;
}
pdata->dft_brightness = val;
}
of_node_put(node);
return pdata;
err:
of_node_put(node);
return err;
}
#else
static struct tps65217_bl_pdata *
tps65217_bl_parse_dt(struct platform_device *pdev)
{
return NULL;
}
#endif
static int tps65217_bl_probe(struct platform_device *pdev)
{
int rc;
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65217_bl *tps65217_bl;
struct tps65217_bl_pdata *pdata;
struct backlight_properties bl_props;
if (tps->dev->of_node) {
pdata = tps65217_bl_parse_dt(pdev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
} else {
if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "no platform data provided\n");
return -EINVAL;
}
pdata = pdev->dev.platform_data;
}
tps65217_bl = devm_kzalloc(&pdev->dev, sizeof(*tps65217_bl),
GFP_KERNEL);
if (tps65217_bl == NULL) {
dev_err(&pdev->dev, "allocation of struct tps65217_bl failed\n");
return -ENOMEM;
}
tps65217_bl->tps = tps;
tps65217_bl->dev = &pdev->dev;
tps65217_bl->is_enabled = false;
rc = tps65217_bl_hw_init(tps65217_bl, pdata);
if (rc)
return rc;
memset(&bl_props, 0, sizeof(struct backlight_properties));
bl_props.type = BACKLIGHT_RAW;
bl_props.max_brightness = 100;
tps65217_bl->bl = backlight_device_register(pdev->name,
tps65217_bl->dev, tps65217_bl,
&tps65217_bl_ops, &bl_props);
if (IS_ERR(tps65217_bl->bl)) {
dev_err(tps65217_bl->dev,
"registration of backlight device failed: %d\n", rc);
return PTR_ERR(tps65217_bl->bl);
}
tps65217_bl->bl->props.brightness = pdata->dft_brightness;
backlight_update_status(tps65217_bl->bl);
platform_set_drvdata(pdev, tps65217_bl);
return 0;
}
static int tps65217_bl_remove(struct platform_device *pdev)
{
struct tps65217_bl *tps65217_bl = platform_get_drvdata(pdev);
backlight_device_unregister(tps65217_bl->bl);
return 0;
}
static struct platform_driver tps65217_bl_driver = {
.probe = tps65217_bl_probe,
.remove = tps65217_bl_remove,
.driver = {
.owner = THIS_MODULE,
.name = "tps65217-bl",
},
};
module_platform_driver(tps65217_bl_driver);
MODULE_DESCRIPTION("TPS65217 Backlight driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Matthias Kaehlcke <matthias@kaehlcke.net>");
| gpl-2.0 |
OpenLD/linux-wetek-3.10.y | drivers/mmc/host/pxamci.c | 2244 | 22119 | /*
* linux/drivers/mmc/host/pxa.c - PXA MMCI driver
*
* Copyright (C) 2003 Russell King, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This hardware is really sick:
* - No way to clear interrupts.
* - Have to turn off the clock whenever we touch the device.
* - Doesn't tell you how many data blocks were transferred.
* Yuck!
*
* 1 and 3 byte data transfers not supported
* max block length up to 1023
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mmc/host.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio.h>
#include <linux/gfp.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_device.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#include <linux/platform_data/mmc-pxamci.h>
#include "pxamci.h"
#define DRIVER_NAME "pxa2xx-mci"
#define NR_SG 1
#define CLKRT_OFF (~0)
#define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \
|| cpu_is_pxa935())
struct pxamci_host {
struct mmc_host *mmc;
spinlock_t lock;
struct resource *res;
void __iomem *base;
struct clk *clk;
unsigned long clkrate;
int irq;
int dma;
unsigned int clkrt;
unsigned int cmdat;
unsigned int imask;
unsigned int power_mode;
struct pxamci_platform_data *pdata;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
dma_addr_t sg_dma;
struct pxa_dma_desc *sg_cpu;
unsigned int dma_len;
unsigned int dma_dir;
unsigned int dma_drcmrrx;
unsigned int dma_drcmrtx;
struct regulator *vcc;
};
static inline void pxamci_init_ocr(struct pxamci_host *host)
{
#ifdef CONFIG_REGULATOR
host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
if (IS_ERR(host->vcc))
host->vcc = NULL;
else {
host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
if (host->pdata && host->pdata->ocr_mask)
dev_warn(mmc_dev(host->mmc),
"ocr_mask/setpower will not be used\n");
}
#endif
if (host->vcc == NULL) {
/* fall-back to platform data */
host->mmc->ocr_avail = host->pdata ?
host->pdata->ocr_mask :
MMC_VDD_32_33 | MMC_VDD_33_34;
}
}
static inline int pxamci_set_power(struct pxamci_host *host,
unsigned char power_mode,
unsigned int vdd)
{
int on;
if (host->vcc) {
int ret;
if (power_mode == MMC_POWER_UP) {
ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
if (ret)
return ret;
} else if (power_mode == MMC_POWER_OFF) {
ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
if (ret)
return ret;
}
}
if (!host->vcc && host->pdata &&
gpio_is_valid(host->pdata->gpio_power)) {
on = ((1 << vdd) & host->pdata->ocr_mask);
gpio_set_value(host->pdata->gpio_power,
!!on ^ host->pdata->gpio_power_invert);
}
if (!host->vcc && host->pdata && host->pdata->setpower)
host->pdata->setpower(mmc_dev(host->mmc), vdd);
return 0;
}
static void pxamci_stop_clock(struct pxamci_host *host)
{
if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
unsigned long timeout = 10000;
unsigned int v;
writel(STOP_CLOCK, host->base + MMC_STRPCL);
do {
v = readl(host->base + MMC_STAT);
if (!(v & STAT_CLK_EN))
break;
udelay(1);
} while (timeout--);
if (v & STAT_CLK_EN)
dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
}
}
static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->imask &= ~mask;
writel(host->imask, host->base + MMC_I_MASK);
spin_unlock_irqrestore(&host->lock, flags);
}
static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
{
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
host->imask |= mask;
writel(host->imask, host->base + MMC_I_MASK);
spin_unlock_irqrestore(&host->lock, flags);
}
static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
{
unsigned int nob = data->blocks;
unsigned long long clks;
unsigned int timeout;
bool dalgn = 0;
u32 dcmd;
int i;
host->data = data;
if (data->flags & MMC_DATA_STREAM)
nob = 0xffff;
writel(nob, host->base + MMC_NOB);
writel(data->blksz, host->base + MMC_BLKLEN);
clks = (unsigned long long)data->timeout_ns * host->clkrate;
do_div(clks, 1000000000UL);
timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
writel((timeout + 255) / 256, host->base + MMC_RDTO);
if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
DRCMR(host->dma_drcmrtx) = 0;
DRCMR(host->dma_drcmrrx) = host->dma | DRCMR_MAPVLD;
} else {
host->dma_dir = DMA_TO_DEVICE;
dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
DRCMR(host->dma_drcmrrx) = 0;
DRCMR(host->dma_drcmrtx) = host->dma | DRCMR_MAPVLD;
}
dcmd |= DCMD_BURST32 | DCMD_WIDTH1;
host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
host->dma_dir);
for (i = 0; i < host->dma_len; i++) {
unsigned int length = sg_dma_len(&data->sg[i]);
host->sg_cpu[i].dcmd = dcmd | length;
if (length & 31 && !(data->flags & MMC_DATA_READ))
host->sg_cpu[i].dcmd |= DCMD_ENDIRQEN;
/* Not aligned to 8-byte boundary? */
if (sg_dma_address(&data->sg[i]) & 0x7)
dalgn = 1;
if (data->flags & MMC_DATA_READ) {
host->sg_cpu[i].dsadr = host->res->start + MMC_RXFIFO;
host->sg_cpu[i].dtadr = sg_dma_address(&data->sg[i]);
} else {
host->sg_cpu[i].dsadr = sg_dma_address(&data->sg[i]);
host->sg_cpu[i].dtadr = host->res->start + MMC_TXFIFO;
}
host->sg_cpu[i].ddadr = host->sg_dma + (i + 1) *
sizeof(struct pxa_dma_desc);
}
host->sg_cpu[host->dma_len - 1].ddadr = DDADR_STOP;
wmb();
/*
* The PXA27x DMA controller encounters overhead when working with
* unaligned (to 8-byte boundaries) data, so switch on byte alignment
* mode only if we have unaligned data.
*/
if (dalgn)
DALGN |= (1 << host->dma);
else
DALGN &= ~(1 << host->dma);
DDADR(host->dma) = host->sg_dma;
/*
* workaround for erratum #91:
* only start DMA now if we are doing a read,
* otherwise we wait until CMD/RESP has finished
* before starting DMA.
*/
if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
DCSR(host->dma) = DCSR_RUN;
}
static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
{
WARN_ON(host->cmd != NULL);
host->cmd = cmd;
if (cmd->flags & MMC_RSP_BUSY)
cmdat |= CMDAT_BUSY;
#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
switch (RSP_TYPE(mmc_resp_type(cmd))) {
case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
cmdat |= CMDAT_RESP_SHORT;
break;
case RSP_TYPE(MMC_RSP_R3):
cmdat |= CMDAT_RESP_R3;
break;
case RSP_TYPE(MMC_RSP_R2):
cmdat |= CMDAT_RESP_R2;
break;
default:
break;
}
writel(cmd->opcode, host->base + MMC_CMD);
writel(cmd->arg >> 16, host->base + MMC_ARGH);
writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
writel(cmdat, host->base + MMC_CMDAT);
writel(host->clkrt, host->base + MMC_CLKRT);
writel(START_CLOCK, host->base + MMC_STRPCL);
pxamci_enable_irq(host, END_CMD_RES);
}
static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
{
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
mmc_request_done(host->mmc, mrq);
}
static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
{
struct mmc_command *cmd = host->cmd;
int i;
u32 v;
if (!cmd)
return 0;
host->cmd = NULL;
/*
* Did I mention this is Sick. We always need to
* discard the upper 8 bits of the first 16-bit word.
*/
v = readl(host->base + MMC_RES) & 0xffff;
for (i = 0; i < 4; i++) {
u32 w1 = readl(host->base + MMC_RES) & 0xffff;
u32 w2 = readl(host->base + MMC_RES) & 0xffff;
cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
v = w2;
}
if (stat & STAT_TIME_OUT_RESPONSE) {
cmd->error = -ETIMEDOUT;
} else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
/*
* workaround for erratum #42:
* Intel PXA27x Family Processor Specification Update Rev 001
* A bogus CRC error can appear if the msb of a 136 bit
* response is a one.
*/
if (cpu_is_pxa27x() &&
(cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
else
cmd->error = -EILSEQ;
}
pxamci_disable_irq(host, END_CMD_RES);
if (host->data && !cmd->error) {
pxamci_enable_irq(host, DATA_TRAN_DONE);
/*
* workaround for erratum #91, if doing write
* enable DMA late
*/
if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
DCSR(host->dma) = DCSR_RUN;
} else {
pxamci_finish_request(host, host->mrq);
}
return 1;
}
static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
{
struct mmc_data *data = host->data;
if (!data)
return 0;
DCSR(host->dma) = 0;
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
host->dma_dir);
if (stat & STAT_READ_TIME_OUT)
data->error = -ETIMEDOUT;
else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
data->error = -EILSEQ;
/*
* There appears to be a hardware design bug here. There seems to
* be no way to find out how much data was transferred to the card.
* This means that if there was an error on any block, we mark all
* data blocks as being in error.
*/
if (!data->error)
data->bytes_xfered = data->blocks * data->blksz;
else
data->bytes_xfered = 0;
pxamci_disable_irq(host, DATA_TRAN_DONE);
host->data = NULL;
if (host->mrq->stop) {
pxamci_stop_clock(host);
pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
} else {
pxamci_finish_request(host, host->mrq);
}
return 1;
}
static irqreturn_t pxamci_irq(int irq, void *devid)
{
struct pxamci_host *host = devid;
unsigned int ireg;
int handled = 0;
ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
if (ireg) {
unsigned stat = readl(host->base + MMC_STAT);
pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
if (ireg & END_CMD_RES)
handled |= pxamci_cmd_done(host, stat);
if (ireg & DATA_TRAN_DONE)
handled |= pxamci_data_done(host, stat);
if (ireg & SDIO_INT) {
mmc_signal_sdio_irq(host->mmc);
handled = 1;
}
}
return IRQ_RETVAL(handled);
}
static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct pxamci_host *host = mmc_priv(mmc);
unsigned int cmdat;
WARN_ON(host->mrq != NULL);
host->mrq = mrq;
pxamci_stop_clock(host);
cmdat = host->cmdat;
host->cmdat &= ~CMDAT_INIT;
if (mrq->data) {
pxamci_setup_data(host, mrq->data);
cmdat &= ~CMDAT_BUSY;
cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
if (mrq->data->flags & MMC_DATA_WRITE)
cmdat |= CMDAT_WRITE;
if (mrq->data->flags & MMC_DATA_STREAM)
cmdat |= CMDAT_STREAM;
}
pxamci_start_cmd(host, mrq->cmd, cmdat);
}
static int pxamci_get_ro(struct mmc_host *mmc)
{
struct pxamci_host *host = mmc_priv(mmc);
if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
if (host->pdata->gpio_card_ro_invert)
return !gpio_get_value(host->pdata->gpio_card_ro);
else
return gpio_get_value(host->pdata->gpio_card_ro);
}
if (host->pdata && host->pdata->get_ro)
return !!host->pdata->get_ro(mmc_dev(mmc));
/*
* Board doesn't support read only detection; let the mmc core
* decide what to do.
*/
return -ENOSYS;
}
static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct pxamci_host *host = mmc_priv(mmc);
if (ios->clock) {
unsigned long rate = host->clkrate;
unsigned int clk = rate / ios->clock;
if (host->clkrt == CLKRT_OFF)
clk_enable(host->clk);
if (ios->clock == 26000000) {
/* to support 26MHz */
host->clkrt = 7;
} else {
/* to handle (19.5MHz, 26MHz) */
if (!clk)
clk = 1;
/*
* clk might result in a lower divisor than we
* desire. check for that condition and adjust
* as appropriate.
*/
if (rate / clk > ios->clock)
clk <<= 1;
host->clkrt = fls(clk) - 1;
}
/*
* we write clkrt on the next command
*/
} else {
pxamci_stop_clock(host);
if (host->clkrt != CLKRT_OFF) {
host->clkrt = CLKRT_OFF;
clk_disable(host->clk);
}
}
if (host->power_mode != ios->power_mode) {
int ret;
host->power_mode = ios->power_mode;
ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
if (ret) {
dev_err(mmc_dev(mmc), "unable to set power\n");
/*
* The .set_ios() function in the mmc_host_ops
* struct return void, and failing to set the
* power should be rare so we print an error and
* return here.
*/
return;
}
if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMDAT_INIT;
}
if (ios->bus_width == MMC_BUS_WIDTH_4)
host->cmdat |= CMDAT_SD_4DAT;
else
host->cmdat &= ~CMDAT_SD_4DAT;
dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
host->clkrt, host->cmdat);
}
static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
{
struct pxamci_host *pxa_host = mmc_priv(host);
if (enable)
pxamci_enable_irq(pxa_host, SDIO_INT);
else
pxamci_disable_irq(pxa_host, SDIO_INT);
}
static const struct mmc_host_ops pxamci_ops = {
.request = pxamci_request,
.get_ro = pxamci_get_ro,
.set_ios = pxamci_set_ios,
.enable_sdio_irq = pxamci_enable_sdio_irq,
};
static void pxamci_dma_irq(int dma, void *devid)
{
struct pxamci_host *host = devid;
int dcsr = DCSR(dma);
DCSR(dma) = dcsr & ~DCSR_STOPIRQEN;
if (dcsr & DCSR_ENDINTR) {
writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
} else {
pr_err("%s: DMA error on channel %d (DCSR=%#x)\n",
mmc_hostname(host->mmc), dma, dcsr);
host->data->error = -EIO;
pxamci_data_done(host, 0);
}
}
static irqreturn_t pxamci_detect_irq(int irq, void *devid)
{
struct pxamci_host *host = mmc_priv(devid);
mmc_detect_change(devid, msecs_to_jiffies(host->pdata->detect_delay_ms));
return IRQ_HANDLED;
}
#ifdef CONFIG_OF
static const struct of_device_id pxa_mmc_dt_ids[] = {
{ .compatible = "marvell,pxa-mmc" },
{ }
};
MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
static int pxamci_of_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct pxamci_platform_data *pdata;
u32 tmp;
if (!np)
return 0;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
pdata->gpio_card_detect =
of_get_named_gpio(np, "cd-gpios", 0);
pdata->gpio_card_ro =
of_get_named_gpio(np, "wp-gpios", 0);
/* pxa-mmc specific */
pdata->gpio_power =
of_get_named_gpio(np, "pxa-mmc,gpio-power", 0);
if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
pdata->detect_delay_ms = tmp;
pdev->dev.platform_data = pdata;
return 0;
}
#else
static int pxamci_of_init(struct platform_device *pdev)
{
return 0;
}
#endif
static int pxamci_probe(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct pxamci_host *host = NULL;
struct resource *r, *dmarx, *dmatx;
int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
ret = pxamci_of_init(pdev);
if (ret)
return ret;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq < 0)
return -ENXIO;
r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
if (!r)
return -EBUSY;
mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto out;
}
mmc->ops = &pxamci_ops;
/*
* We can do SG-DMA, but we don't because we never know how much
* data we successfully wrote to the card.
*/
mmc->max_segs = NR_SG;
/*
* Our hardware DMA can handle a maximum of one page per SG entry.
*/
mmc->max_seg_size = PAGE_SIZE;
/*
* Block length register is only 10 bits before PXA27x.
*/
mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
/*
* Block count register is 16 bits.
*/
mmc->max_blk_count = 65535;
host = mmc_priv(mmc);
host->mmc = mmc;
host->dma = -1;
host->pdata = pdev->dev.platform_data;
host->clkrt = CLKRT_OFF;
host->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
host->clk = NULL;
goto out;
}
host->clkrate = clk_get_rate(host->clk);
/*
* Calculate minimum clock rate, rounding up.
*/
mmc->f_min = (host->clkrate + 63) / 64;
mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
pxamci_init_ocr(host);
mmc->caps = 0;
host->cmdat = 0;
if (!cpu_is_pxa25x()) {
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
host->cmdat |= CMDAT_SDIO_INT_EN;
if (mmc_has_26MHz())
mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
MMC_CAP_SD_HIGHSPEED;
}
host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL);
if (!host->sg_cpu) {
ret = -ENOMEM;
goto out;
}
spin_lock_init(&host->lock);
host->res = r;
host->irq = irq;
host->imask = MMC_I_MASK_ALL;
host->base = ioremap(r->start, SZ_4K);
if (!host->base) {
ret = -ENOMEM;
goto out;
}
/*
* Ensure that the host controller is shut down, and setup
* with our defaults.
*/
pxamci_stop_clock(host);
writel(0, host->base + MMC_SPI);
writel(64, host->base + MMC_RESTO);
writel(host->imask, host->base + MMC_I_MASK);
host->dma = pxa_request_dma(DRIVER_NAME, DMA_PRIO_LOW,
pxamci_dma_irq, host);
if (host->dma < 0) {
ret = -EBUSY;
goto out;
}
ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
if (ret)
goto out;
platform_set_drvdata(pdev, mmc);
dmarx = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!dmarx) {
ret = -ENXIO;
goto out;
}
host->dma_drcmrrx = dmarx->start;
dmatx = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!dmatx) {
ret = -ENXIO;
goto out;
}
host->dma_drcmrtx = dmatx->start;
if (host->pdata) {
gpio_cd = host->pdata->gpio_card_detect;
gpio_ro = host->pdata->gpio_card_ro;
gpio_power = host->pdata->gpio_power;
}
if (gpio_is_valid(gpio_power)) {
ret = gpio_request(gpio_power, "mmc card power");
if (ret) {
dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
goto out;
}
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
if (gpio_is_valid(gpio_ro)) {
ret = gpio_request(gpio_ro, "mmc card read only");
if (ret) {
dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
goto err_gpio_ro;
}
gpio_direction_input(gpio_ro);
}
if (gpio_is_valid(gpio_cd)) {
ret = gpio_request(gpio_cd, "mmc card detect");
if (ret) {
dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
goto err_gpio_cd;
}
gpio_direction_input(gpio_cd);
ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"mmc card detect", mmc);
if (ret) {
dev_err(&pdev->dev, "failed to request card detect IRQ\n");
goto err_request_irq;
}
}
if (host->pdata && host->pdata->init)
host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
if (gpio_is_valid(gpio_power) && host->pdata->setpower)
dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
mmc_add_host(mmc);
return 0;
err_request_irq:
gpio_free(gpio_cd);
err_gpio_cd:
gpio_free(gpio_ro);
err_gpio_ro:
gpio_free(gpio_power);
out:
if (host) {
if (host->dma >= 0)
pxa_free_dma(host->dma);
if (host->base)
iounmap(host->base);
if (host->sg_cpu)
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->clk)
clk_put(host->clk);
}
if (mmc)
mmc_free_host(mmc);
release_resource(r);
return ret;
}
static int pxamci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
platform_set_drvdata(pdev, NULL);
if (mmc) {
struct pxamci_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
if (host->pdata) {
gpio_cd = host->pdata->gpio_card_detect;
gpio_ro = host->pdata->gpio_card_ro;
gpio_power = host->pdata->gpio_power;
}
if (gpio_is_valid(gpio_cd)) {
free_irq(gpio_to_irq(gpio_cd), mmc);
gpio_free(gpio_cd);
}
if (gpio_is_valid(gpio_ro))
gpio_free(gpio_ro);
if (gpio_is_valid(gpio_power))
gpio_free(gpio_power);
if (host->vcc)
regulator_put(host->vcc);
if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc);
pxamci_stop_clock(host);
writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
host->base + MMC_I_MASK);
DRCMR(host->dma_drcmrrx) = 0;
DRCMR(host->dma_drcmrtx) = 0;
free_irq(host->irq, host);
pxa_free_dma(host->dma);
iounmap(host->base);
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
clk_put(host->clk);
release_resource(host->res);
mmc_free_host(mmc);
}
return 0;
}
#ifdef CONFIG_PM
static int pxamci_suspend(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
int ret = 0;
if (mmc)
ret = mmc_suspend_host(mmc);
return ret;
}
static int pxamci_resume(struct device *dev)
{
struct mmc_host *mmc = dev_get_drvdata(dev);
int ret = 0;
if (mmc)
ret = mmc_resume_host(mmc);
return ret;
}
static const struct dev_pm_ops pxamci_pm_ops = {
.suspend = pxamci_suspend,
.resume = pxamci_resume,
};
#endif
static struct platform_driver pxamci_driver = {
.probe = pxamci_probe,
.remove = pxamci_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(pxa_mmc_dt_ids),
#ifdef CONFIG_PM
.pm = &pxamci_pm_ops,
#endif
},
};
module_platform_driver(pxamci_driver);
MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-mci");
| gpl-2.0 |
TeamWin/android_kernel_oppo_r7f | drivers/net/irda/ep7211-sir.c | 2500 | 1565 | /*
* IR port driver for the Cirrus Logic CLPS711X processors
*
* Copyright 2001, Blue Mug Inc. All rights reserved.
* Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <mach/hardware.h>
#include "sir-dev.h"
static int clps711x_dongle_open(struct sir_dev *dev)
{
unsigned int syscon;
/* Turn on the SIR encoder. */
syscon = clps_readl(SYSCON1);
syscon |= SYSCON1_SIREN;
clps_writel(syscon, SYSCON1);
return 0;
}
static int clps711x_dongle_close(struct sir_dev *dev)
{
unsigned int syscon;
/* Turn off the SIR encoder. */
syscon = clps_readl(SYSCON1);
syscon &= ~SYSCON1_SIREN;
clps_writel(syscon, SYSCON1);
return 0;
}
static struct dongle_driver clps711x_dongle = {
.owner = THIS_MODULE,
.driver_name = "EP7211 IR driver",
.type = IRDA_EP7211_DONGLE,
.open = clps711x_dongle_open,
.close = clps711x_dongle_close,
};
static int clps711x_sir_probe(struct platform_device *pdev)
{
return irda_register_dongle(&clps711x_dongle);
}
static int clps711x_sir_remove(struct platform_device *pdev)
{
return irda_unregister_dongle(&clps711x_dongle);
}
static struct platform_driver clps711x_sir_driver = {
.driver = {
.name = "sir-clps711x",
.owner = THIS_MODULE,
},
.probe = clps711x_sir_probe,
.remove = clps711x_sir_remove,
};
module_platform_driver(clps711x_sir_driver);
MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
MODULE_DESCRIPTION("EP7211 IR dongle driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
| gpl-2.0 |
Coldwindofnowhere/android_kernel_samsung_aries | drivers/gpu/drm/mga/mga_dma.c | 2756 | 29837 | /* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/**
* \file mga_dma.c
* DMA support for MGA G200 / G400.
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Jeff Hartmann <jhartmann@valinux.com>
* \author Keith Whitwell <keith@tungstengraphics.com>
* \author Gareth Hughes <gareth@valinux.com>
*/
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
#include "mga_drm.h"
#include "mga_drv.h"
#define MGA_DEFAULT_USEC_TIMEOUT 10000
#define MGA_FREELIST_DEBUG 0
#define MINIMAL_CLEANUP 0
#define FULL_CLEANUP 1
static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
/* ================================================================
* Engine control
*/
int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
{
u32 status = 0;
int i;
DRM_DEBUG("\n");
for (i = 0; i < dev_priv->usec_timeout; i++) {
status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
if (status == MGA_ENDPRDMASTS) {
MGA_WRITE8(MGA_CRTC_INDEX, 0);
return 0;
}
DRM_UDELAY(1);
}
#if MGA_DMA_DEBUG
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
return -EBUSY;
}
static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
DRM_DEBUG("\n");
/* The primary DMA stream should look like new right about now.
*/
primary->tail = 0;
primary->space = primary->size;
primary->last_flush = 0;
sarea_priv->last_wrap = 0;
/* FIXME: Reset counters, buffer ages etc...
*/
/* FIXME: What else do we need to reinitialize? WARP stuff?
*/
return 0;
}
/* ================================================================
* Primary DMA stream
*/
void mga_do_dma_flush(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
u32 status = 0;
int i;
DMA_LOCALS;
DRM_DEBUG("\n");
/* We need to wait so that we can do an safe flush */
for (i = 0; i < dev_priv->usec_timeout; i++) {
status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
if (status == MGA_ENDPRDMASTS)
break;
DRM_UDELAY(1);
}
if (primary->tail == primary->last_flush) {
DRM_DEBUG(" bailing out...\n");
return;
}
tail = primary->tail + dev_priv->primary->offset;
/* We need to pad the stream between flushes, as the card
* actually (partially?) reads the first of these commands.
* See page 4-16 in the G400 manual, middle of the page or so.
*/
BEGIN_DMA(1);
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
ADVANCE_DMA();
primary->last_flush = primary->tail;
head = MGA_READ(MGA_PRIMADDRESS);
if (head <= tail)
primary->space = primary->size - primary->tail;
else
primary->space = head - tail;
DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
DRM_DEBUG(" space = 0x%06x\n", primary->space);
mga_flush_write_combine();
MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
DRM_DEBUG("done.\n");
}
void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
DMA_LOCALS;
DRM_DEBUG("\n");
BEGIN_DMA_WRAP();
DMA_BLOCK(MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000,
MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
ADVANCE_DMA();
tail = primary->tail + dev_priv->primary->offset;
primary->tail = 0;
primary->last_flush = 0;
primary->last_wrap++;
head = MGA_READ(MGA_PRIMADDRESS);
if (head == dev_priv->primary->offset)
primary->space = primary->size;
else
primary->space = head - dev_priv->primary->offset;
DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
DRM_DEBUG(" wrap = %d\n", primary->last_wrap);
DRM_DEBUG(" space = 0x%06x\n", primary->space);
mga_flush_write_combine();
MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
set_bit(0, &primary->wrapped);
DRM_DEBUG("done.\n");
}
void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
u32 head = dev_priv->primary->offset;
DRM_DEBUG("\n");
sarea_priv->last_wrap++;
DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap);
mga_flush_write_combine();
MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
clear_bit(0, &primary->wrapped);
DRM_DEBUG("done.\n");
}
/* ================================================================
* Freelist management
*/
#define MGA_BUFFER_USED (~0)
#define MGA_BUFFER_FREE 0
#if MGA_FREELIST_DEBUG
static void mga_freelist_print(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
DRM_INFO("\n");
DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
dev_priv->sarea_priv->last_dispatch,
(unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
dev_priv->primary->offset));
DRM_INFO("current freelist:\n");
for (entry = dev_priv->head->next; entry; entry = entry->next) {
DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n",
entry, entry->buf->idx, entry->age.head,
(unsigned long)(entry->age.head - dev_priv->primary->offset));
}
DRM_INFO("\n");
}
#endif
static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_freelist_t *entry;
int i;
DRM_DEBUG("count=%d\n", dma->buf_count);
dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
if (dev_priv->head == NULL)
return -ENOMEM;
SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
if (entry == NULL)
return -ENOMEM;
entry->next = dev_priv->head->next;
entry->prev = dev_priv->head;
SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
entry->buf = buf;
if (dev_priv->head->next != NULL)
dev_priv->head->next->prev = entry;
if (entry->next == NULL)
dev_priv->tail = entry;
buf_priv->list_entry = entry;
buf_priv->discard = 0;
buf_priv->dispatched = 0;
dev_priv->head->next = entry;
}
return 0;
}
static void mga_freelist_cleanup(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
drm_mga_freelist_t *next;
DRM_DEBUG("\n");
entry = dev_priv->head;
while (entry) {
next = entry->next;
kfree(entry);
entry = next;
}
dev_priv->head = dev_priv->tail = NULL;
}
#if 0
/* FIXME: Still needed?
*/
static void mga_freelist_reset(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
int i;
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
}
}
#endif
static struct drm_buf *mga_freelist_get(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *next;
drm_mga_freelist_t *prev;
drm_mga_freelist_t *tail = dev_priv->tail;
u32 head, wrap;
DRM_DEBUG("\n");
head = MGA_READ(MGA_PRIMADDRESS);
wrap = dev_priv->sarea_priv->last_wrap;
DRM_DEBUG(" tail=0x%06lx %d\n",
tail->age.head ?
(unsigned long)(tail->age.head - dev_priv->primary->offset) : 0,
tail->age.wrap);
DRM_DEBUG(" head=0x%06lx %d\n",
(unsigned long)(head - dev_priv->primary->offset), wrap);
if (TEST_AGE(&tail->age, head, wrap)) {
prev = dev_priv->tail->prev;
next = dev_priv->tail;
prev->next = NULL;
next->prev = next->next = NULL;
dev_priv->tail = prev;
SET_AGE(&next->age, MGA_BUFFER_USED, 0);
return next->buf;
}
DRM_DEBUG("returning NULL!\n");
return NULL;
}
int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
drm_mga_freelist_t *head, *entry, *prev;
DRM_DEBUG("age=0x%06lx wrap=%d\n",
(unsigned long)(buf_priv->list_entry->age.head -
dev_priv->primary->offset),
buf_priv->list_entry->age.wrap);
entry = buf_priv->list_entry;
head = dev_priv->head;
if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
prev = dev_priv->tail;
prev->next = entry;
entry->prev = prev;
entry->next = NULL;
} else {
prev = head->next;
head->next = entry;
prev->prev = entry;
entry->prev = head;
entry->next = prev;
}
return 0;
}
/* ================================================================
* DMA initialization, cleanup
*/
int mga_driver_load(struct drm_device *dev, unsigned long flags)
{
drm_mga_private_t *dev_priv;
int ret;
dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
if (!dev_priv)
return -ENOMEM;
dev->dev_private = (void *)dev_priv;
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
dev->counters += 3;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
ret = drm_vblank_init(dev, 1);
if (ret) {
(void) mga_driver_unload(dev);
return ret;
}
return 0;
}
#if __OS_HAS_AGP
/**
* Bootstrap the driver for AGP DMA.
*
* \todo
* Investigate whether there is any benefit to storing the WARP microcode in
* AGP memory. If not, the microcode may as well always be put in PCI
* memory.
*
* \todo
* This routine needs to set dma_bs->agp_mode to the mode actually configured
* in the hardware. Looking just at the Linux AGP driver code, I don't see
* an easy way to determine this.
*
* \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
*/
static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
unsigned int warp_size = MGA_WARP_UCODE_SIZE;
int err;
unsigned offset;
const unsigned secondary_size = dma_bs->secondary_bin_count
* dma_bs->secondary_bin_size;
const unsigned agp_size = (dma_bs->agp_size << 20);
struct drm_buf_desc req;
struct drm_agp_mode mode;
struct drm_agp_info info;
struct drm_agp_buffer agp_req;
struct drm_agp_binding bind_req;
/* Acquire AGP. */
err = drm_agp_acquire(dev);
if (err) {
DRM_ERROR("Unable to acquire AGP: %d\n", err);
return err;
}
err = drm_agp_info(dev, &info);
if (err) {
DRM_ERROR("Unable to get AGP info: %d\n", err);
return err;
}
mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
err = drm_agp_enable(dev, mode);
if (err) {
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
return err;
}
/* In addition to the usual AGP mode configuration, the G200 AGP cards
* need to have the AGP mode "manually" set.
*/
if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
if (mode.mode & 0x02)
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
else
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
}
/* Allocate and bind AGP memory. */
agp_req.size = agp_size;
agp_req.type = 0;
err = drm_agp_alloc(dev, &agp_req);
if (err) {
dev_priv->agp_size = 0;
DRM_ERROR("Unable to allocate %uMB AGP memory\n",
dma_bs->agp_size);
return err;
}
dev_priv->agp_size = agp_size;
dev_priv->agp_handle = agp_req.handle;
bind_req.handle = agp_req.handle;
bind_req.offset = 0;
err = drm_agp_bind(dev, &bind_req);
if (err) {
DRM_ERROR("Unable to bind AGP memory: %d\n", err);
return err;
}
/* Make drm_addbufs happy by not trying to create a mapping for less
* than a page.
*/
if (warp_size < PAGE_SIZE)
warp_size = PAGE_SIZE;
offset = 0;
err = drm_addmap(dev, offset, warp_size,
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
if (err) {
DRM_ERROR("Unable to map WARP microcode: %d\n", err);
return err;
}
offset += warp_size;
err = drm_addmap(dev, offset, dma_bs->primary_size,
_DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
if (err) {
DRM_ERROR("Unable to map primary DMA region: %d\n", err);
return err;
}
offset += dma_bs->primary_size;
err = drm_addmap(dev, offset, secondary_size,
_DRM_AGP, 0, &dev->agp_buffer_map);
if (err) {
DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
return err;
}
(void)memset(&req, 0, sizeof(req));
req.count = dma_bs->secondary_bin_count;
req.size = dma_bs->secondary_bin_size;
req.flags = _DRM_AGP_BUFFER;
req.agp_start = offset;
err = drm_addbufs_agp(dev, &req);
if (err) {
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
return err;
}
{
struct drm_map_list *_entry;
unsigned long agp_token = 0;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == dev->agp_buffer_map)
agp_token = _entry->user_token;
}
if (!agp_token)
return -EFAULT;
dev->agp_buffer_token = agp_token;
}
offset += secondary_size;
err = drm_addmap(dev, offset, agp_size - offset,
_DRM_AGP, 0, &dev_priv->agp_textures);
if (err) {
DRM_ERROR("Unable to map AGP texture region %d\n", err);
return err;
}
drm_core_ioremap(dev_priv->warp, dev);
drm_core_ioremap(dev_priv->primary, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev_priv->warp->handle ||
!dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
dev_priv->warp->handle, dev_priv->primary->handle,
dev->agp_buffer_map->handle);
return -ENOMEM;
}
dev_priv->dma_access = MGA_PAGPXFER;
dev_priv->wagp_enable = MGA_WAGP_ENABLE;
DRM_INFO("Initialized card for AGP DMA.\n");
return 0;
}
#else
static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
return -EINVAL;
}
#endif
/**
* Bootstrap the driver for PCI DMA.
*
* \todo
* The algorithm for decreasing the size of the primary DMA buffer could be
* better. The size should be rounded up to the nearest page size, then
* decrease the request size by a single page each pass through the loop.
*
* \todo
* Determine whether the maximum address passed to drm_pci_alloc is correct.
* The same goes for drm_addbufs_pci.
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
unsigned int warp_size = MGA_WARP_UCODE_SIZE;
unsigned int primary_size;
unsigned int bin_count;
int err;
struct drm_buf_desc req;
if (dev->dma == NULL) {
DRM_ERROR("dev->dma is NULL\n");
return -EFAULT;
}
/* Make drm_addbufs happy by not trying to create a mapping for less
* than a page.
*/
if (warp_size < PAGE_SIZE)
warp_size = PAGE_SIZE;
/* The proper alignment is 0x100 for this mapping */
err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->warp);
if (err != 0) {
DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
err);
return err;
}
/* Other than the bottom two bits being used to encode other
* information, there don't appear to be any restrictions on the
* alignment of the primary or secondary DMA buffers.
*/
for (primary_size = dma_bs->primary_size; primary_size != 0;
primary_size >>= 1) {
/* The proper alignment for this mapping is 0x04 */
err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
_DRM_READ_ONLY, &dev_priv->primary);
if (!err)
break;
}
if (err != 0) {
DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
return -ENOMEM;
}
if (dev_priv->primary->size != dma_bs->primary_size) {
DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
dma_bs->primary_size,
(unsigned)dev_priv->primary->size);
dma_bs->primary_size = dev_priv->primary->size;
}
for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
bin_count--) {
(void)memset(&req, 0, sizeof(req));
req.count = bin_count;
req.size = dma_bs->secondary_bin_size;
err = drm_addbufs_pci(dev, &req);
if (!err)
break;
}
if (bin_count == 0) {
DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
return err;
}
if (bin_count != dma_bs->secondary_bin_count) {
DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
"to %u.\n", dma_bs->secondary_bin_count, bin_count);
dma_bs->secondary_bin_count = bin_count;
}
dev_priv->dma_access = 0;
dev_priv->wagp_enable = 0;
dma_bs->agp_mode = 0;
DRM_INFO("Initialized card for PCI DMA.\n");
return 0;
}
static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
int err;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
dev_priv->used_new_dma_init = 1;
/* The first steps are the same for both PCI and AGP based DMA. Map
* the cards MMIO registers and map a status page.
*/
err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
_DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
if (err) {
DRM_ERROR("Unable to map MMIO region: %d\n", err);
return err;
}
err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
_DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
&dev_priv->status);
if (err) {
DRM_ERROR("Unable to map status region: %d\n", err);
return err;
}
/* The DMA initialization procedure is slightly different for PCI and
* AGP cards. AGP cards just allocate a large block of AGP memory and
* carve off portions of it for internal uses. The remaining memory
* is returned to user-mode to be used for AGP textures.
*/
if (is_agp)
err = mga_do_agp_dma_bootstrap(dev, dma_bs);
/* If we attempted to initialize the card for AGP DMA but failed,
* clean-up any mess that may have been created.
*/
if (err)
mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
/* Not only do we want to try and initialized PCI cards for PCI DMA,
* but we also try to initialized AGP cards that could not be
* initialized for AGP DMA. This covers the case where we have an AGP
* card in a system with an unsupported AGP chipset. In that case the
* card will be detected as AGP, but we won't be able to allocate any
* AGP memory, etc.
*/
if (!is_agp || err)
err = mga_do_pci_dma_bootstrap(dev, dma_bs);
return err;
}
int mga_dma_bootstrap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_mga_dma_bootstrap_t *bootstrap = data;
int err;
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
const drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
err = mga_do_dma_bootstrap(dev, bootstrap);
if (err) {
mga_do_cleanup_dma(dev, FULL_CLEANUP);
return err;
}
if (dev_priv->agp_textures != NULL) {
bootstrap->texture_handle = dev_priv->agp_textures->offset;
bootstrap->texture_size = dev_priv->agp_textures->size;
} else {
bootstrap->texture_handle = 0;
bootstrap->texture_size = 0;
}
bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
return err;
}
static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
{
drm_mga_private_t *dev_priv;
int ret;
DRM_DEBUG("\n");
dev_priv = dev->dev_private;
if (init->sgram)
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
else
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
dev_priv->maccess = init->maccess;
dev_priv->fb_cpp = init->fb_cpp;
dev_priv->front_offset = init->front_offset;
dev_priv->front_pitch = init->front_pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->back_pitch = init->back_pitch;
dev_priv->depth_cpp = init->depth_cpp;
dev_priv->depth_offset = init->depth_offset;
dev_priv->depth_pitch = init->depth_pitch;
/* FIXME: Need to support AGP textures...
*/
dev_priv->texture_offset = init->texture_offset[0];
dev_priv->texture_size = init->texture_size[0];
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("failed to find sarea!\n");
return -EINVAL;
}
if (!dev_priv->used_new_dma_init) {
dev_priv->dma_access = MGA_PAGPXFER;
dev_priv->wagp_enable = MGA_WAGP_ENABLE;
dev_priv->status = drm_core_findmap(dev, init->status_offset);
if (!dev_priv->status) {
DRM_ERROR("failed to find status page!\n");
return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio) {
DRM_ERROR("failed to find mmio region!\n");
return -EINVAL;
}
dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
if (!dev_priv->warp) {
DRM_ERROR("failed to find warp microcode region!\n");
return -EINVAL;
}
dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
if (!dev_priv->primary) {
DRM_ERROR("failed to find primary dma region!\n");
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map =
drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("failed to find dma buffer region!\n");
return -EINVAL;
}
drm_core_ioremap(dev_priv->warp, dev);
drm_core_ioremap(dev_priv->primary, dev);
drm_core_ioremap(dev->agp_buffer_map, dev);
}
dev_priv->sarea_priv =
(drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
init->sarea_priv_offset);
if (!dev_priv->warp->handle ||
!dev_priv->primary->handle ||
((dev_priv->dma_access != 0) &&
((dev->agp_buffer_map == NULL) ||
(dev->agp_buffer_map->handle == NULL)))) {
DRM_ERROR("failed to ioremap agp regions!\n");
return -ENOMEM;
}
ret = mga_warp_install_microcode(dev_priv);
if (ret < 0) {
DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
return ret;
}
ret = mga_warp_init(dev_priv);
if (ret < 0) {
DRM_ERROR("failed to init WARP engine!: %d\n", ret);
return ret;
}
dev_priv->prim.status = (u32 *) dev_priv->status->handle;
mga_do_wait_for_idle(dev_priv);
/* Init the primary DMA registers.
*/
MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
#if 0
MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
MGA_PRIMPTREN1); /* DWGSYNC */
#endif
dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
+ dev_priv->primary->size);
dev_priv->prim.size = dev_priv->primary->size;
dev_priv->prim.tail = 0;
dev_priv->prim.space = dev_priv->prim.size;
dev_priv->prim.wrapped = 0;
dev_priv->prim.last_flush = 0;
dev_priv->prim.last_wrap = 0;
dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
dev_priv->prim.status[0] = dev_priv->primary->offset;
dev_priv->prim.status[1] = 0;
dev_priv->sarea_priv->last_wrap = 0;
dev_priv->sarea_priv->last_frame.head = 0;
dev_priv->sarea_priv->last_frame.wrap = 0;
if (mga_freelist_init(dev, dev_priv) < 0) {
DRM_ERROR("could not initialize freelist\n");
return -ENOMEM;
}
return 0;
}
static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
{
int err = 0;
DRM_DEBUG("\n");
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev->dev_private) {
drm_mga_private_t *dev_priv = dev->dev_private;
if ((dev_priv->warp != NULL)
&& (dev_priv->warp->type != _DRM_CONSISTENT))
drm_core_ioremapfree(dev_priv->warp, dev);
if ((dev_priv->primary != NULL)
&& (dev_priv->primary->type != _DRM_CONSISTENT))
drm_core_ioremapfree(dev_priv->primary, dev);
if (dev->agp_buffer_map != NULL)
drm_core_ioremapfree(dev->agp_buffer_map, dev);
if (dev_priv->used_new_dma_init) {
#if __OS_HAS_AGP
if (dev_priv->agp_handle != 0) {
struct drm_agp_binding unbind_req;
struct drm_agp_buffer free_req;
unbind_req.handle = dev_priv->agp_handle;
drm_agp_unbind(dev, &unbind_req);
free_req.handle = dev_priv->agp_handle;
drm_agp_free(dev, &free_req);
dev_priv->agp_textures = NULL;
dev_priv->agp_size = 0;
dev_priv->agp_handle = 0;
}
if ((dev->agp != NULL) && dev->agp->acquired)
err = drm_agp_release(dev);
#endif
}
dev_priv->warp = NULL;
dev_priv->primary = NULL;
dev_priv->sarea = NULL;
dev_priv->sarea_priv = NULL;
dev->agp_buffer_map = NULL;
if (full_cleanup) {
dev_priv->mmio = NULL;
dev_priv->status = NULL;
dev_priv->used_new_dma_init = 0;
}
memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
dev_priv->warp_pipe = 0;
memset(dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys));
if (dev_priv->head != NULL)
mga_freelist_cleanup(dev);
}
return err;
}
int mga_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_mga_init_t *init = data;
int err;
LOCK_TEST_WITH_RETURN(dev, file_priv);
switch (init->func) {
case MGA_INIT_DMA:
err = mga_do_init_dma(dev, init);
if (err)
(void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
return err;
case MGA_CLEANUP_DMA:
return mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
return -EINVAL;
}
/* ================================================================
* Primary DMA stream management
*/
int mga_dma_flush(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
struct drm_lock *lock = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("%s%s%s\n",
(lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
(lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
(lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
WRAP_WAIT_WITH_RETURN(dev_priv);
if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
mga_do_dma_flush(dev_priv);
if (lock->flags & _DRM_LOCK_QUIESCENT) {
#if MGA_DMA_DEBUG
int ret = mga_do_wait_for_idle(dev_priv);
if (ret < 0)
DRM_INFO("-EBUSY\n");
return ret;
#else
return mga_do_wait_for_idle(dev_priv);
#endif
} else {
return 0;
}
}
int mga_dma_reset(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
LOCK_TEST_WITH_RETURN(dev, file_priv);
return mga_do_dma_reset(dev_priv);
}
/* ================================================================
* DMA buffer management
*/
static int mga_dma_get_buffers(struct drm_device *dev,
struct drm_file *file_priv, struct drm_dma *d)
{
struct drm_buf *buf;
int i;
for (i = d->granted_count; i < d->request_count; i++) {
buf = mga_freelist_get(dev);
if (!buf)
return -EAGAIN;
buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
return -EFAULT;
d->granted_count++;
}
return 0;
}
int mga_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
struct drm_dma *d = data;
int ret = 0;
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d->send_count);
return -EINVAL;
}
/* We'll send you buffers.
*/
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d->request_count, dma->buf_count);
return -EINVAL;
}
WRAP_TEST_WITH_RETURN(dev_priv);
d->granted_count = 0;
if (d->request_count)
ret = mga_dma_get_buffers(dev, file_priv, d);
return ret;
}
/**
* Called just before the module is unloaded.
*/
int mga_driver_unload(struct drm_device *dev)
{
kfree(dev->dev_private);
dev->dev_private = NULL;
return 0;
}
/**
* Called when the last opener of the device is closed.
*/
void mga_driver_lastclose(struct drm_device *dev)
{
mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
int mga_driver_dma_quiescent(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
return mga_do_wait_for_idle(dev_priv);
}
| gpl-2.0 |
Slim80/Fulgor_Kernel_Lollipop | lib/nlattr.c | 3780 | 12556 | /*
* NETLINK Netlink attributes
*
* Authors: Thomas Graf <tgraf@suug.ch>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
#include <net/netlink.h>
static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
[NLA_U8] = sizeof(u8),
[NLA_U16] = sizeof(u16),
[NLA_U32] = sizeof(u32),
[NLA_U64] = sizeof(u64),
[NLA_MSECS] = sizeof(u64),
[NLA_NESTED] = NLA_HDRLEN,
};
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy)
{
const struct nla_policy *pt;
int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
if (type <= 0 || type > maxtype)
return 0;
pt = &policy[type];
BUG_ON(pt->type > NLA_TYPE_MAX);
switch (pt->type) {
case NLA_FLAG:
if (attrlen > 0)
return -ERANGE;
break;
case NLA_NUL_STRING:
if (pt->len)
minlen = min_t(int, attrlen, pt->len + 1);
else
minlen = attrlen;
if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
return -EINVAL;
/* fall through */
case NLA_STRING:
if (attrlen < 1)
return -ERANGE;
if (pt->len) {
char *buf = nla_data(nla);
if (buf[attrlen - 1] == '\0')
attrlen--;
if (attrlen > pt->len)
return -ERANGE;
}
break;
case NLA_BINARY:
if (pt->len && attrlen > pt->len)
return -ERANGE;
break;
case NLA_NESTED_COMPAT:
if (attrlen < pt->len)
return -ERANGE;
if (attrlen < NLA_ALIGN(pt->len))
break;
if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
return -ERANGE;
nla = nla_data(nla) + NLA_ALIGN(pt->len);
if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
return -ERANGE;
break;
case NLA_NESTED:
/* a nested attributes is allowed to be empty; if its not,
* it must have a size of at least NLA_HDRLEN.
*/
if (attrlen == 0)
break;
default:
if (pt->len)
minlen = pt->len;
else if (pt->type != NLA_UNSPEC)
minlen = nla_attr_minlen[pt->type];
if (attrlen < minlen)
return -ERANGE;
}
return 0;
}
/**
* nla_validate - Validate a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
*
* Validates all attributes in the specified attribute stream against the
* specified policy. Attributes with a type exceeding maxtype will be
* ignored. See documenation of struct nla_policy for more details.
*
* Returns 0 on success or a negative error code.
*/
int nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy)
{
const struct nlattr *nla;
int rem, err;
nla_for_each_attr(nla, head, len, rem) {
err = validate_nla(nla, maxtype, policy);
if (err < 0)
goto errout;
}
err = 0;
errout:
return err;
}
/**
* nla_policy_len - Determin the max. length of a policy
* @policy: policy to use
* @n: number of policies
*
* Determines the max. length of the policy. It is currently used
* to allocated Netlink buffers roughly the size of the actual
* message.
*
* Returns 0 on success or a negative error code.
*/
int
nla_policy_len(const struct nla_policy *p, int n)
{
int i, len = 0;
for (i = 0; i < n; i++, p++) {
if (p->len)
len += nla_total_size(p->len);
else if (nla_attr_minlen[p->type])
len += nla_total_size(nla_attr_minlen[p->type]);
}
return len;
}
/**
* nla_parse - Parse a stream of attributes into a tb buffer
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @head: head of attribute stream
* @len: length of attribute stream
* @policy: validation policy
*
* Parses a stream of attributes and stores a pointer to each attribute in
* the tb array accessible via the attribute type. Attributes with a type
* exceeding maxtype will be silently ignored for backwards compatibility
* reasons. policy may be set to NULL if no validation is required.
*
* Returns 0 on success or a negative error code.
*/
int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
int len, const struct nla_policy *policy)
{
const struct nlattr *nla;
int rem, err;
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
nla_for_each_attr(nla, head, len, rem) {
u16 type = nla_type(nla);
if (type > 0 && type <= maxtype) {
if (policy) {
err = validate_nla(nla, maxtype, policy);
if (err < 0)
goto errout;
}
tb[type] = (struct nlattr *)nla;
}
}
if (unlikely(rem > 0))
printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
"attributes.\n", rem);
err = 0;
errout:
return err;
}
/**
* nla_find - Find a specific attribute in a stream of attributes
* @head: head of attribute stream
* @len: length of attribute stream
* @attrtype: type of attribute to look for
*
* Returns the first attribute in the stream matching the specified type.
*/
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
{
const struct nlattr *nla;
int rem;
nla_for_each_attr(nla, head, len, rem)
if (nla_type(nla) == attrtype)
return (struct nlattr *)nla;
return NULL;
}
/**
* nla_strlcpy - Copy string attribute payload into a sized buffer
* @dst: where to copy the string to
* @nla: attribute to copy the string from
* @dstsize: size of destination buffer
*
* Copies at most dstsize - 1 bytes into the destination buffer.
* The result is always a valid NUL-terminated string. Unlike
* strlcpy the destination buffer is always padded out.
*
* Returns the length of the source buffer.
*/
size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
{
size_t srclen = nla_len(nla);
char *src = nla_data(nla);
if (srclen > 0 && src[srclen - 1] == '\0')
srclen--;
if (dstsize > 0) {
size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
memset(dst, 0, dstsize);
memcpy(dst, src, len);
}
return srclen;
}
/**
* nla_memcpy - Copy a netlink attribute into another memory area
* @dest: where to copy to memcpy
* @src: netlink attribute to copy from
* @count: size of the destination area
*
* Note: The number of bytes copied is limited by the length of
* attribute's payload. memcpy
*
* Returns the number of bytes copied.
*/
int nla_memcpy(void *dest, const struct nlattr *src, int count)
{
int minlen = min_t(int, count, nla_len(src));
memcpy(dest, nla_data(src), minlen);
return minlen;
}
/**
* nla_memcmp - Compare an attribute with sized memory area
* @nla: netlink attribute
* @data: memory area
* @size: size of memory area
*/
int nla_memcmp(const struct nlattr *nla, const void *data,
size_t size)
{
int d = nla_len(nla) - size;
if (d == 0)
d = memcmp(nla_data(nla), data, size);
return d;
}
/**
* nla_strcmp - Compare a string attribute against a string
* @nla: netlink string attribute
* @str: another string
*/
int nla_strcmp(const struct nlattr *nla, const char *str)
{
int len = strlen(str) + 1;
int d = nla_len(nla) - len;
if (d == 0)
d = memcmp(nla_data(nla), str, len);
return d;
}
#ifdef CONFIG_NET
/**
* __nla_reserve - reserve room for attribute on the skb
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
struct nlattr *nla;
nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
nla->nla_type = attrtype;
nla->nla_len = nla_attr_size(attrlen);
memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
return nla;
}
EXPORT_SYMBOL(__nla_reserve);
/**
* __nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
*
* Reserves room for attribute payload without a header.
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the payload.
*/
void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
void *start;
start = skb_put(skb, NLA_ALIGN(attrlen));
memset(start, 0, NLA_ALIGN(attrlen));
return start;
}
EXPORT_SYMBOL(__nla_reserve_nohdr);
/**
* nla_reserve - reserve room for attribute on the skb
* @skb: socket buffer to reserve room on
* @attrtype: attribute type
* @attrlen: length of attribute payload
*
* Adds a netlink attribute header to a socket buffer and reserves
* room for the payload but does not copy it.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
{
if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
return NULL;
return __nla_reserve(skb, attrtype, attrlen);
}
EXPORT_SYMBOL(nla_reserve);
/**
* nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
*
* Reserves room for attribute payload without a header.
*
* Returns NULL if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return NULL;
return __nla_reserve_nohdr(skb, attrlen);
}
EXPORT_SYMBOL(nla_reserve_nohdr);
/**
* __nla_put - Add a netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute header and payload.
*/
void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
const void *data)
{
struct nlattr *nla;
nla = __nla_reserve(skb, attrtype, attrlen);
memcpy(nla_data(nla), data, attrlen);
}
EXPORT_SYMBOL(__nla_put);
/**
* __nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* The caller is responsible to ensure that the skb provides enough
* tailroom for the attribute payload.
*/
void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
{
void *start;
start = __nla_reserve_nohdr(skb, attrlen);
memcpy(start, data, attrlen);
}
EXPORT_SYMBOL(__nla_put_nohdr);
/**
* nla_put - Add a netlink attribute to a socket buffer
* @skb: socket buffer to add attribute to
* @attrtype: attribute type
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute header and payload.
*/
int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
return -EMSGSIZE;
__nla_put(skb, attrtype, attrlen, data);
return 0;
}
EXPORT_SYMBOL(nla_put);
/**
* nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
__nla_put_nohdr(skb, attrlen, data);
return 0;
}
EXPORT_SYMBOL(nla_put_nohdr);
/**
* nla_append - Add a netlink attribute without header or padding
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
* @data: head of attribute payload
*
* Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
* the attribute payload.
*/
int nla_append(struct sk_buff *skb, int attrlen, const void *data)
{
if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
return -EMSGSIZE;
memcpy(skb_put(skb, attrlen), data, attrlen);
return 0;
}
EXPORT_SYMBOL(nla_append);
#endif
EXPORT_SYMBOL(nla_validate);
EXPORT_SYMBOL(nla_policy_len);
EXPORT_SYMBOL(nla_parse);
EXPORT_SYMBOL(nla_find);
EXPORT_SYMBOL(nla_strlcpy);
EXPORT_SYMBOL(nla_memcpy);
EXPORT_SYMBOL(nla_memcmp);
EXPORT_SYMBOL(nla_strcmp);
| gpl-2.0 |
sjp333/android_kernel_htc_leo | net/lapb/lapb_out.c | 4292 | 5325 | /*
* LAPB release 002
*
* This code REQUIRES 2.1.15 or higher/ NET3.038
*
* This module:
* This module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* History
* LAPB 001 Jonathan Naylor Started Coding
* LAPB 002 Jonathan Naylor New timer architecture.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <net/lapb.h>
/*
* This procedure is passed a buffer descriptor for an iframe. It builds
* the rest of the control part of the frame and then writes it out.
*/
static void lapb_send_iframe(struct lapb_cb *lapb, struct sk_buff *skb, int poll_bit)
{
unsigned char *frame;
if (!skb)
return;
if (lapb->mode & LAPB_EXTENDED) {
frame = skb_push(skb, 2);
frame[0] = LAPB_I;
frame[0] |= lapb->vs << 1;
frame[1] = poll_bit ? LAPB_EPF : 0;
frame[1] |= lapb->vr << 1;
} else {
frame = skb_push(skb, 1);
*frame = LAPB_I;
*frame |= poll_bit ? LAPB_SPF : 0;
*frame |= lapb->vr << 5;
*frame |= lapb->vs << 1;
}
#if LAPB_DEBUG > 1
printk(KERN_DEBUG "lapb: (%p) S%d TX I(%d) S%d R%d\n",
lapb->dev, lapb->state, poll_bit, lapb->vs, lapb->vr);
#endif
lapb_transmit_buffer(lapb, skb, LAPB_COMMAND);
}
void lapb_kick(struct lapb_cb *lapb)
{
struct sk_buff *skb, *skbn;
unsigned short modulus, start, end;
modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS;
start = !skb_peek(&lapb->ack_queue) ? lapb->va : lapb->vs;
end = (lapb->va + lapb->window) % modulus;
if (!(lapb->condition & LAPB_PEER_RX_BUSY_CONDITION) &&
start != end && skb_peek(&lapb->write_queue)) {
lapb->vs = start;
/*
* Dequeue the frame and copy it.
*/
skb = skb_dequeue(&lapb->write_queue);
do {
if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
skb_queue_head(&lapb->write_queue, skb);
break;
}
if (skb->sk)
skb_set_owner_w(skbn, skb->sk);
/*
* Transmit the frame copy.
*/
lapb_send_iframe(lapb, skbn, LAPB_POLLOFF);
lapb->vs = (lapb->vs + 1) % modulus;
/*
* Requeue the original data frame.
*/
skb_queue_tail(&lapb->ack_queue, skb);
} while (lapb->vs != end && (skb = skb_dequeue(&lapb->write_queue)) != NULL);
lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
if (!lapb_t1timer_running(lapb))
lapb_start_t1timer(lapb);
}
}
void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type)
{
unsigned char *ptr;
ptr = skb_push(skb, 1);
if (lapb->mode & LAPB_MLP) {
if (lapb->mode & LAPB_DCE) {
if (type == LAPB_COMMAND)
*ptr = LAPB_ADDR_C;
if (type == LAPB_RESPONSE)
*ptr = LAPB_ADDR_D;
} else {
if (type == LAPB_COMMAND)
*ptr = LAPB_ADDR_D;
if (type == LAPB_RESPONSE)
*ptr = LAPB_ADDR_C;
}
} else {
if (lapb->mode & LAPB_DCE) {
if (type == LAPB_COMMAND)
*ptr = LAPB_ADDR_A;
if (type == LAPB_RESPONSE)
*ptr = LAPB_ADDR_B;
} else {
if (type == LAPB_COMMAND)
*ptr = LAPB_ADDR_B;
if (type == LAPB_RESPONSE)
*ptr = LAPB_ADDR_A;
}
}
#if LAPB_DEBUG > 2
printk(KERN_DEBUG "lapb: (%p) S%d TX %02X %02X %02X\n",
lapb->dev, lapb->state,
skb->data[0], skb->data[1], skb->data[2]);
#endif
if (!lapb_data_transmit(lapb, skb))
kfree_skb(skb);
}
void lapb_establish_data_link(struct lapb_cb *lapb)
{
lapb->condition = 0x00;
lapb->n2count = 0;
if (lapb->mode & LAPB_EXTENDED) {
#if LAPB_DEBUG > 1
printk(KERN_DEBUG "lapb: (%p) S%d TX SABME(1)\n",
lapb->dev, lapb->state);
#endif
lapb_send_control(lapb, LAPB_SABME, LAPB_POLLON, LAPB_COMMAND);
} else {
#if LAPB_DEBUG > 1
printk(KERN_DEBUG "lapb: (%p) S%d TX SABM(1)\n",
lapb->dev, lapb->state);
#endif
lapb_send_control(lapb, LAPB_SABM, LAPB_POLLON, LAPB_COMMAND);
}
lapb_start_t1timer(lapb);
lapb_stop_t2timer(lapb);
}
void lapb_enquiry_response(struct lapb_cb *lapb)
{
#if LAPB_DEBUG > 1
printk(KERN_DEBUG "lapb: (%p) S%d TX RR(1) R%d\n",
lapb->dev, lapb->state, lapb->vr);
#endif
lapb_send_control(lapb, LAPB_RR, LAPB_POLLON, LAPB_RESPONSE);
lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
}
void lapb_timeout_response(struct lapb_cb *lapb)
{
#if LAPB_DEBUG > 1
printk(KERN_DEBUG "lapb: (%p) S%d TX RR(0) R%d\n",
lapb->dev, lapb->state, lapb->vr);
#endif
lapb_send_control(lapb, LAPB_RR, LAPB_POLLOFF, LAPB_RESPONSE);
lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
}
void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short nr)
{
if (lapb->vs == nr) {
lapb_frames_acked(lapb, nr);
lapb_stop_t1timer(lapb);
lapb->n2count = 0;
} else if (lapb->va != nr) {
lapb_frames_acked(lapb, nr);
lapb_start_t1timer(lapb);
}
}
void lapb_check_need_response(struct lapb_cb *lapb, int type, int pf)
{
if (type == LAPB_COMMAND && pf)
lapb_enquiry_response(lapb);
}
| gpl-2.0 |
muntahar/GT-S5660-KERNEL | arch/arm/mach-mv78xx0/addr-map.c | 4804 | 3725 | /*
* arch/arm/mach-mv78xx0/addr-map.c
*
* Address map functions for Marvell MV78xx0 SoCs
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mbus.h>
#include <linux/io.h>
#include "common.h"
/*
* Generic Address Decode Windows bit settings
*/
#define TARGET_DDR 0
#define TARGET_DEV_BUS 1
#define TARGET_PCIE0 4
#define TARGET_PCIE1 8
#define TARGET_PCIE(i) ((i) ? TARGET_PCIE1 : TARGET_PCIE0)
#define ATTR_DEV_SPI_ROM 0x1f
#define ATTR_DEV_BOOT 0x2f
#define ATTR_DEV_CS3 0x37
#define ATTR_DEV_CS2 0x3b
#define ATTR_DEV_CS1 0x3d
#define ATTR_DEV_CS0 0x3e
#define ATTR_PCIE_IO(l) (0xf0 & ~(0x10 << (l)))
#define ATTR_PCIE_MEM(l) (0xf8 & ~(0x10 << (l)))
/*
* Helpers to get DDR bank info
*/
#define DDR_BASE_CS_OFF(n) (0x0000 + ((n) << 3))
#define DDR_SIZE_CS_OFF(n) (0x0004 + ((n) << 3))
/*
* CPU Address Decode Windows registers
*/
#define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4))
#define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4))
#define WIN_CTRL_OFF 0x0000
#define WIN_BASE_OFF 0x0004
#define WIN_REMAP_LO_OFF 0x0008
#define WIN_REMAP_HI_OFF 0x000c
struct mbus_dram_target_info mv78xx0_mbus_dram_info;
static void __init __iomem *win_cfg_base(int win)
{
/*
* Find the control register base address for this window.
*
* BRIDGE_VIRT_BASE points to the right (CPU0's or CPU1's)
* MBUS bridge depending on which CPU core we're running on,
* so we don't need to take that into account here.
*/
return (void __iomem *)((win < 8) ? WIN0_OFF(win) : WIN8_OFF(win));
}
static int __init cpu_win_can_remap(int win)
{
if (win < 8)
return 1;
return 0;
}
static void __init setup_cpu_win(int win, u32 base, u32 size,
u8 target, u8 attr, int remap)
{
void __iomem *addr = win_cfg_base(win);
u32 ctrl;
base &= 0xffff0000;
ctrl = ((size - 1) & 0xffff0000) | (attr << 8) | (target << 4) | 1;
writel(base, addr + WIN_BASE_OFF);
writel(ctrl, addr + WIN_CTRL_OFF);
if (cpu_win_can_remap(win)) {
if (remap < 0)
remap = base;
writel(remap & 0xffff0000, addr + WIN_REMAP_LO_OFF);
writel(0, addr + WIN_REMAP_HI_OFF);
}
}
void __init mv78xx0_setup_cpu_mbus(void)
{
void __iomem *addr;
int i;
int cs;
/*
* First, disable and clear windows.
*/
for (i = 0; i < 14; i++) {
addr = win_cfg_base(i);
writel(0, addr + WIN_BASE_OFF);
writel(0, addr + WIN_CTRL_OFF);
if (cpu_win_can_remap(i)) {
writel(0, addr + WIN_REMAP_LO_OFF);
writel(0, addr + WIN_REMAP_HI_OFF);
}
}
/*
* Setup MBUS dram target info.
*/
mv78xx0_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
if (mv78xx0_core_index() == 0)
addr = (void __iomem *)DDR_WINDOW_CPU0_BASE;
else
addr = (void __iomem *)DDR_WINDOW_CPU1_BASE;
for (i = 0, cs = 0; i < 4; i++) {
u32 base = readl(addr + DDR_BASE_CS_OFF(i));
u32 size = readl(addr + DDR_SIZE_CS_OFF(i));
/*
* Chip select enabled?
*/
if (size & 1) {
struct mbus_dram_window *w;
w = &mv78xx0_mbus_dram_info.cs[cs++];
w->cs_index = i;
w->mbus_attr = 0xf & ~(1 << i);
w->base = base & 0xffff0000;
w->size = (size | 0x0000ffff) + 1;
}
}
mv78xx0_mbus_dram_info.num_cs = cs;
}
void __init mv78xx0_setup_pcie_io_win(int window, u32 base, u32 size,
int maj, int min)
{
setup_cpu_win(window, base, size, TARGET_PCIE(maj),
ATTR_PCIE_IO(min), -1);
}
void __init mv78xx0_setup_pcie_mem_win(int window, u32 base, u32 size,
int maj, int min)
{
setup_cpu_win(window, base, size, TARGET_PCIE(maj),
ATTR_PCIE_MEM(min), -1);
}
| gpl-2.0 |
sub77/android_kernel_samsung_matissewifi | drivers/net/ethernet/dec/tulip/xircom_cb.c | 4804 | 29883 | /*
* xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
*
* This software is (C) by the respective authors, and licensed under the GPL
* License.
*
* Written by Arjan van de Ven for Red Hat, Inc.
* Based on work by Jeff Garzik, Doug Ledford and Donald Becker
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*
* $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#ifdef CONFIG_NET_POLL_CONTROLLER
#include <asm/irq.h>
#endif
MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
MODULE_LICENSE("GPL");
/* IO registers on the card, offsets */
#define CSR0 0x00
#define CSR1 0x08
#define CSR2 0x10
#define CSR3 0x18
#define CSR4 0x20
#define CSR5 0x28
#define CSR6 0x30
#define CSR7 0x38
#define CSR8 0x40
#define CSR9 0x48
#define CSR10 0x50
#define CSR11 0x58
#define CSR12 0x60
#define CSR13 0x68
#define CSR14 0x70
#define CSR15 0x78
#define CSR16 0x80
/* PCI registers */
#define PCI_POWERMGMT 0x40
/* Offsets of the buffers within the descriptor pages, in bytes */
#define NUMDESCRIPTORS 4
static int bufferoffsets[NUMDESCRIPTORS] = {128,2048,4096,6144};
struct xircom_private {
/* Send and receive buffers, kernel-addressable and dma addressable forms */
__le32 *rx_buffer;
__le32 *tx_buffer;
dma_addr_t rx_dma_handle;
dma_addr_t tx_dma_handle;
struct sk_buff *tx_skb[4];
unsigned long io_port;
int open;
/* transmit_used is the rotating counter that indicates which transmit
descriptor has to be used next */
int transmit_used;
/* Spinlock to serialize register operations.
It must be helt while manipulating the following registers:
CSR0, CSR6, CSR7, CSR9, CSR10, CSR15
*/
spinlock_t lock;
struct pci_dev *pdev;
struct net_device *dev;
};
/* Function prototypes */
static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id);
static void xircom_remove(struct pci_dev *pdev);
static irqreturn_t xircom_interrupt(int irq, void *dev_instance);
static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int xircom_open(struct net_device *dev);
static int xircom_close(struct net_device *dev);
static void xircom_up(struct xircom_private *card);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void xircom_poll_controller(struct net_device *dev);
#endif
static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset);
static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset);
static void read_mac_address(struct xircom_private *card);
static void transceiver_voodoo(struct xircom_private *card);
static void initialize_card(struct xircom_private *card);
static void trigger_transmit(struct xircom_private *card);
static void trigger_receive(struct xircom_private *card);
static void setup_descriptors(struct xircom_private *card);
static void remove_descriptors(struct xircom_private *card);
static int link_status_changed(struct xircom_private *card);
static void activate_receiver(struct xircom_private *card);
static void deactivate_receiver(struct xircom_private *card);
static void activate_transmitter(struct xircom_private *card);
static void deactivate_transmitter(struct xircom_private *card);
static void enable_transmit_interrupt(struct xircom_private *card);
static void enable_receive_interrupt(struct xircom_private *card);
static void enable_link_interrupt(struct xircom_private *card);
static void disable_all_interrupts(struct xircom_private *card);
static int link_status(struct xircom_private *card);
static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
{0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
{0,},
};
MODULE_DEVICE_TABLE(pci, xircom_pci_table);
static struct pci_driver xircom_ops = {
.name = "xircom_cb",
.id_table = xircom_pci_table,
.probe = xircom_probe,
.remove = xircom_remove,
.suspend =NULL,
.resume =NULL
};
#if defined DEBUG && DEBUG > 1
static void print_binary(unsigned int number)
{
int i,i2;
char buffer[64];
memset(buffer,0,64);
i2=0;
for (i=31;i>=0;i--) {
if (number & (1<<i))
buffer[i2++]='1';
else
buffer[i2++]='0';
if ((i&3)==0)
buffer[i2++]=' ';
}
pr_debug("%s\n",buffer);
}
#endif
static const struct net_device_ops netdev_ops = {
.ndo_open = xircom_open,
.ndo_stop = xircom_close,
.ndo_start_xmit = xircom_start_xmit,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xircom_poll_controller,
#endif
};
/* xircom_probe is the code that gets called on device insertion.
it sets up the hardware and registers the device to the networklayer.
TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the
first two packets that get send, and pump hates that.
*/
static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct net_device *dev = NULL;
struct xircom_private *private;
unsigned long flags;
unsigned short tmp16;
/* First do the PCI initialisation */
if (pci_enable_device(pdev))
return -ENODEV;
/* disable all powermanagement */
pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/
/* clear PCI status, if any */
pci_read_config_word (pdev,PCI_STATUS, &tmp16);
pci_write_config_word (pdev, PCI_STATUS,tmp16);
if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
pr_err("%s: failed to allocate io-region\n", __func__);
return -ENODEV;
}
/*
Before changing the hardware, allocate the memory.
This way, we can fail gracefully if not enough memory
is available.
*/
dev = alloc_etherdev(sizeof(struct xircom_private));
if (!dev)
goto device_fail;
private = netdev_priv(dev);
/* Allocate the send/receive buffers */
private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
if (private->rx_buffer == NULL) {
pr_err("%s: no memory for rx buffer\n", __func__);
goto rx_buf_fail;
}
private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
if (private->tx_buffer == NULL) {
pr_err("%s: no memory for tx buffer\n", __func__);
goto tx_buf_fail;
}
SET_NETDEV_DEV(dev, &pdev->dev);
private->dev = dev;
private->pdev = pdev;
private->io_port = pci_resource_start(pdev, 0);
spin_lock_init(&private->lock);
dev->irq = pdev->irq;
dev->base_addr = private->io_port;
initialize_card(private);
read_mac_address(private);
setup_descriptors(private);
dev->netdev_ops = &netdev_ops;
pci_set_drvdata(pdev, dev);
if (register_netdev(dev)) {
pr_err("%s: netdevice registration failed\n", __func__);
goto reg_fail;
}
netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
pdev->revision, pdev->irq);
/* start the transmitter to get a heartbeat */
/* TODO: send 2 dummy packets here */
transceiver_voodoo(private);
spin_lock_irqsave(&private->lock,flags);
activate_transmitter(private);
activate_receiver(private);
spin_unlock_irqrestore(&private->lock,flags);
trigger_receive(private);
return 0;
reg_fail:
kfree(private->tx_buffer);
tx_buf_fail:
kfree(private->rx_buffer);
rx_buf_fail:
free_netdev(dev);
device_fail:
return -ENODEV;
}
/*
xircom_remove is called on module-unload or on device-eject.
it unregisters the irq, io-region and network device.
Interrupts and such are already stopped in the "ifconfig ethX down"
code.
*/
static void __devexit xircom_remove(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct xircom_private *card = netdev_priv(dev);
pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
release_region(dev->base_addr, 128);
unregister_netdev(dev);
free_netdev(dev);
pci_set_drvdata(pdev, NULL);
}
static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *) dev_instance;
struct xircom_private *card = netdev_priv(dev);
unsigned int status;
int i;
spin_lock(&card->lock);
status = inl(card->io_port+CSR5);
#if defined DEBUG && DEBUG > 1
print_binary(status);
pr_debug("tx status 0x%08x 0x%08x\n",
card->tx_buffer[0], card->tx_buffer[4]);
pr_debug("rx status 0x%08x 0x%08x\n",
card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
if (status == 0 || status == 0xffffffff) {
spin_unlock(&card->lock);
return IRQ_NONE;
}
if (link_status_changed(card)) {
int newlink;
netdev_dbg(dev, "Link status has changed\n");
newlink = link_status(card);
netdev_info(dev, "Link is %d mbit\n", newlink);
if (newlink)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
}
/* Clear all remaining interrupts */
status |= 0xffffffff; /* FIXME: make this clear only the
real existing bits */
outl(status,card->io_port+CSR5);
for (i=0;i<NUMDESCRIPTORS;i++)
investigate_write_descriptor(dev,card,i,bufferoffsets[i]);
for (i=0;i<NUMDESCRIPTORS;i++)
investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
spin_unlock(&card->lock);
return IRQ_HANDLED;
}
static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct xircom_private *card;
unsigned long flags;
int nextdescriptor;
int desc;
card = netdev_priv(dev);
spin_lock_irqsave(&card->lock,flags);
/* First see if we can free some descriptors */
for (desc=0;desc<NUMDESCRIPTORS;desc++)
investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]);
nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS);
desc = card->transmit_used;
/* only send the packet if the descriptor is free */
if (card->tx_buffer[4*desc]==0) {
/* Copy the packet data; zero the memory first as the card
sometimes sends more than you ask it to. */
memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
skb_copy_from_linear_data(skb,
&(card->tx_buffer[bufferoffsets[desc] / 4]),
skb->len);
/* FIXME: The specification tells us that the length we send HAS to be a multiple of
4 bytes. */
card->tx_buffer[4*desc+1] = cpu_to_le32(skb->len);
if (desc == NUMDESCRIPTORS - 1) /* bit 25: last descriptor of the ring */
card->tx_buffer[4*desc+1] |= cpu_to_le32(1<<25);
card->tx_buffer[4*desc+1] |= cpu_to_le32(0xF0000000);
/* 0xF0... means want interrupts*/
card->tx_skb[desc] = skb;
wmb();
/* This gives the descriptor to the card */
card->tx_buffer[4*desc] = cpu_to_le32(0x80000000);
trigger_transmit(card);
if (card->tx_buffer[nextdescriptor*4] & cpu_to_le32(0x8000000)) {
/* next descriptor is occupied... */
netif_stop_queue(dev);
}
card->transmit_used = nextdescriptor;
spin_unlock_irqrestore(&card->lock,flags);
return NETDEV_TX_OK;
}
/* Uh oh... no free descriptor... drop the packet */
netif_stop_queue(dev);
spin_unlock_irqrestore(&card->lock,flags);
trigger_transmit(card);
return NETDEV_TX_BUSY;
}
static int xircom_open(struct net_device *dev)
{
struct xircom_private *xp = netdev_priv(dev);
int retval;
netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n",
dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
if (retval)
return retval;
xircom_up(xp);
xp->open = 1;
return 0;
}
static int xircom_close(struct net_device *dev)
{
struct xircom_private *card;
unsigned long flags;
card = netdev_priv(dev);
netif_stop_queue(dev); /* we don't want new packets */
spin_lock_irqsave(&card->lock,flags);
disable_all_interrupts(card);
#if 0
/* We can enable this again once we send dummy packets on ifconfig ethX up */
deactivate_receiver(card);
deactivate_transmitter(card);
#endif
remove_descriptors(card);
spin_unlock_irqrestore(&card->lock,flags);
card->open = 0;
free_irq(dev->irq,dev);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void xircom_poll_controller(struct net_device *dev)
{
disable_irq(dev->irq);
xircom_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
static void initialize_card(struct xircom_private *card)
{
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
/* First: reset the card */
val = inl(card->io_port + CSR0);
val |= 0x01; /* Software reset */
outl(val, card->io_port + CSR0);
udelay(100); /* give the card some time to reset */
val = inl(card->io_port + CSR0);
val &= ~0x01; /* disable Software reset */
outl(val, card->io_port + CSR0);
val = 0; /* Value 0x00 is a safe and conservative value
for the PCI configuration settings */
outl(val, card->io_port + CSR0);
disable_all_interrupts(card);
deactivate_receiver(card);
deactivate_transmitter(card);
spin_unlock_irqrestore(&card->lock, flags);
}
/*
trigger_transmit causes the card to check for frames to be transmitted.
This is accomplished by writing to the CSR1 port. The documentation
claims that the act of writing is sufficient and that the value is
ignored; I chose zero.
*/
static void trigger_transmit(struct xircom_private *card)
{
unsigned int val;
val = 0;
outl(val, card->io_port + CSR1);
}
/*
trigger_receive causes the card to check for empty frames in the
descriptor list in which packets can be received.
This is accomplished by writing to the CSR2 port. The documentation
claims that the act of writing is sufficient and that the value is
ignored; I chose zero.
*/
static void trigger_receive(struct xircom_private *card)
{
unsigned int val;
val = 0;
outl(val, card->io_port + CSR2);
}
/*
setup_descriptors initializes the send and receive buffers to be valid
descriptors and programs the addresses into the card.
*/
static void setup_descriptors(struct xircom_private *card)
{
u32 address;
int i;
BUG_ON(card->rx_buffer == NULL);
BUG_ON(card->tx_buffer == NULL);
/* Receive descriptors */
memset(card->rx_buffer, 0, 128); /* clear the descriptors */
for (i=0;i<NUMDESCRIPTORS;i++ ) {
/* Rx Descr0: It's empty, let the card own it, no errors -> 0x80000000 */
card->rx_buffer[i*4 + 0] = cpu_to_le32(0x80000000);
/* Rx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */
card->rx_buffer[i*4 + 1] = cpu_to_le32(1536);
if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */
card->rx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25);
/* Rx Descr2: address of the buffer
we store the buffer at the 2nd half of the page */
address = card->rx_dma_handle;
card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
/* Rx Desc3: address of 2nd buffer -> 0 */
card->rx_buffer[i*4 + 3] = 0;
}
wmb();
/* Write the receive descriptor ring address to the card */
address = card->rx_dma_handle;
outl(address, card->io_port + CSR3); /* Receive descr list address */
/* transmit descriptors */
memset(card->tx_buffer, 0, 128); /* clear the descriptors */
for (i=0;i<NUMDESCRIPTORS;i++ ) {
/* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */
card->tx_buffer[i*4 + 0] = 0x00000000;
/* Tx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */
card->tx_buffer[i*4 + 1] = cpu_to_le32(1536);
if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */
card->tx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25);
/* Tx Descr2: address of the buffer
we store the buffer at the 2nd half of the page */
address = card->tx_dma_handle;
card->tx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
/* Tx Desc3: address of 2nd buffer -> 0 */
card->tx_buffer[i*4 + 3] = 0;
}
wmb();
/* wite the transmit descriptor ring to the card */
address = card->tx_dma_handle;
outl(address, card->io_port + CSR4); /* xmit descr list address */
}
/*
remove_descriptors informs the card the descriptors are no longer
valid by setting the address in the card to 0x00.
*/
static void remove_descriptors(struct xircom_private *card)
{
unsigned int val;
val = 0;
outl(val, card->io_port + CSR3); /* Receive descriptor address */
outl(val, card->io_port + CSR4); /* Send descriptor address */
}
/*
link_status_changed returns 1 if the card has indicated that
the link status has changed. The new link status has to be read from CSR12.
This function also clears the status-bit.
*/
static int link_status_changed(struct xircom_private *card)
{
unsigned int val;
val = inl(card->io_port + CSR5); /* Status register */
if ((val & (1 << 27)) == 0) /* no change */
return 0;
/* clear the event by writing a 1 to the bit in the
status register. */
val = (1 << 27);
outl(val, card->io_port + CSR5);
return 1;
}
/*
transmit_active returns 1 if the transmitter on the card is
in a non-stopped state.
*/
static int transmit_active(struct xircom_private *card)
{
unsigned int val;
val = inl(card->io_port + CSR5); /* Status register */
if ((val & (7 << 20)) == 0) /* transmitter disabled */
return 0;
return 1;
}
/*
receive_active returns 1 if the receiver on the card is
in a non-stopped state.
*/
static int receive_active(struct xircom_private *card)
{
unsigned int val;
val = inl(card->io_port + CSR5); /* Status register */
if ((val & (7 << 17)) == 0) /* receiver disabled */
return 0;
return 1;
}
/*
activate_receiver enables the receiver on the card.
Before being allowed to active the receiver, the receiver
must be completely de-activated. To achieve this,
this code actually disables the receiver first; then it waits for the
receiver to become inactive, then it activates the receiver and then
it waits for the receiver to be active.
must be called with the lock held and interrupts disabled.
*/
static void activate_receiver(struct xircom_private *card)
{
unsigned int val;
int counter;
val = inl(card->io_port + CSR6); /* Operation mode */
/* If the "active" bit is set and the receiver is already
active, no need to do the expensive thing */
if ((val&2) && (receive_active(card)))
return;
val = val & ~2; /* disable the receiver */
outl(val, card->io_port + CSR6);
counter = 10;
while (counter > 0) {
if (!receive_active(card))
break;
/* wait a while */
udelay(50);
counter--;
if (counter <= 0)
netdev_err(card->dev, "Receiver failed to deactivate\n");
}
/* enable the receiver */
val = inl(card->io_port + CSR6); /* Operation mode */
val = val | 2; /* enable the receiver */
outl(val, card->io_port + CSR6);
/* now wait for the card to activate again */
counter = 10;
while (counter > 0) {
if (receive_active(card))
break;
/* wait a while */
udelay(50);
counter--;
if (counter <= 0)
netdev_err(card->dev,
"Receiver failed to re-activate\n");
}
}
/*
deactivate_receiver disables the receiver on the card.
To achieve this this code disables the receiver first;
then it waits for the receiver to become inactive.
must be called with the lock held and interrupts disabled.
*/
static void deactivate_receiver(struct xircom_private *card)
{
unsigned int val;
int counter;
val = inl(card->io_port + CSR6); /* Operation mode */
val = val & ~2; /* disable the receiver */
outl(val, card->io_port + CSR6);
counter = 10;
while (counter > 0) {
if (!receive_active(card))
break;
/* wait a while */
udelay(50);
counter--;
if (counter <= 0)
netdev_err(card->dev, "Receiver failed to deactivate\n");
}
}
/*
activate_transmitter enables the transmitter on the card.
Before being allowed to active the transmitter, the transmitter
must be completely de-activated. To achieve this,
this code actually disables the transmitter first; then it waits for the
transmitter to become inactive, then it activates the transmitter and then
it waits for the transmitter to be active again.
must be called with the lock held and interrupts disabled.
*/
static void activate_transmitter(struct xircom_private *card)
{
unsigned int val;
int counter;
val = inl(card->io_port + CSR6); /* Operation mode */
/* If the "active" bit is set and the receiver is already
active, no need to do the expensive thing */
if ((val&(1<<13)) && (transmit_active(card)))
return;
val = val & ~(1 << 13); /* disable the transmitter */
outl(val, card->io_port + CSR6);
counter = 10;
while (counter > 0) {
if (!transmit_active(card))
break;
/* wait a while */
udelay(50);
counter--;
if (counter <= 0)
netdev_err(card->dev,
"Transmitter failed to deactivate\n");
}
/* enable the transmitter */
val = inl(card->io_port + CSR6); /* Operation mode */
val = val | (1 << 13); /* enable the transmitter */
outl(val, card->io_port + CSR6);
/* now wait for the card to activate again */
counter = 10;
while (counter > 0) {
if (transmit_active(card))
break;
/* wait a while */
udelay(50);
counter--;
if (counter <= 0)
netdev_err(card->dev,
"Transmitter failed to re-activate\n");
}
}
/*
deactivate_transmitter disables the transmitter on the card.
To achieve this this code disables the transmitter first;
then it waits for the transmitter to become inactive.
must be called with the lock held and interrupts disabled.
*/
static void deactivate_transmitter(struct xircom_private *card)
{
unsigned int val;
int counter;
val = inl(card->io_port + CSR6); /* Operation mode */
val = val & ~2; /* disable the transmitter */
outl(val, card->io_port + CSR6);
counter = 20;
while (counter > 0) {
if (!transmit_active(card))
break;
/* wait a while */
udelay(50);
counter--;
if (counter <= 0)
netdev_err(card->dev,
"Transmitter failed to deactivate\n");
}
}
/*
enable_transmit_interrupt enables the transmit interrupt
must be called with the lock held and interrupts disabled.
*/
static void enable_transmit_interrupt(struct xircom_private *card)
{
unsigned int val;
val = inl(card->io_port + CSR7); /* Interrupt enable register */
val |= 1; /* enable the transmit interrupt */
outl(val, card->io_port + CSR7);
}
/*
enable_receive_interrupt enables the receive interrupt
must be called with the lock held and interrupts disabled.
*/
static void enable_receive_interrupt(struct xircom_private *card)
{
unsigned int val;
val = inl(card->io_port + CSR7); /* Interrupt enable register */
val = val | (1 << 6); /* enable the receive interrupt */
outl(val, card->io_port + CSR7);
}
/*
enable_link_interrupt enables the link status change interrupt
must be called with the lock held and interrupts disabled.
*/
static void enable_link_interrupt(struct xircom_private *card)
{
unsigned int val;
val = inl(card->io_port + CSR7); /* Interrupt enable register */
val = val | (1 << 27); /* enable the link status chage interrupt */
outl(val, card->io_port + CSR7);
}
/*
disable_all_interrupts disables all interrupts
must be called with the lock held and interrupts disabled.
*/
static void disable_all_interrupts(struct xircom_private *card)
{
unsigned int val;
val = 0; /* disable all interrupts */
outl(val, card->io_port + CSR7);
}
/*
enable_common_interrupts enables several weird interrupts
must be called with the lock held and interrupts disabled.
*/
static void enable_common_interrupts(struct xircom_private *card)
{
unsigned int val;
val = inl(card->io_port + CSR7); /* Interrupt enable register */
val |= (1<<16); /* Normal Interrupt Summary */
val |= (1<<15); /* Abnormal Interrupt Summary */
val |= (1<<13); /* Fatal bus error */
val |= (1<<8); /* Receive Process Stopped */
val |= (1<<7); /* Receive Buffer Unavailable */
val |= (1<<5); /* Transmit Underflow */
val |= (1<<2); /* Transmit Buffer Unavailable */
val |= (1<<1); /* Transmit Process Stopped */
outl(val, card->io_port + CSR7);
}
/*
enable_promisc starts promisc mode
must be called with the lock held and interrupts disabled.
*/
static int enable_promisc(struct xircom_private *card)
{
unsigned int val;
val = inl(card->io_port + CSR6);
val = val | (1 << 6);
outl(val, card->io_port + CSR6);
return 1;
}
/*
link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
Must be called in locked state with interrupts disabled
*/
static int link_status(struct xircom_private *card)
{
unsigned int val;
val = inb(card->io_port + CSR12);
if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
return 10;
if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
return 100;
/* If we get here -> no link at all */
return 0;
}
/*
read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure.
This function will take the spinlock itself and can, as a result, not be called with the lock helt.
*/
static void read_mac_address(struct xircom_private *card)
{
unsigned char j, tuple, link, data_id, data_count;
unsigned long flags;
int i;
spin_lock_irqsave(&card->lock, flags);
outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
for (i = 0x100; i < 0x1f7; i += link + 2) {
outl(i, card->io_port + CSR10);
tuple = inl(card->io_port + CSR9) & 0xff;
outl(i + 1, card->io_port + CSR10);
link = inl(card->io_port + CSR9) & 0xff;
outl(i + 2, card->io_port + CSR10);
data_id = inl(card->io_port + CSR9) & 0xff;
outl(i + 3, card->io_port + CSR10);
data_count = inl(card->io_port + CSR9) & 0xff;
if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
/*
* This is it. We have the data we want.
*/
for (j = 0; j < 6; j++) {
outl(i + j + 4, card->io_port + CSR10);
card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff;
}
break;
} else if (link == 0) {
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
pr_debug(" %pM\n", card->dev->dev_addr);
}
/*
transceiver_voodoo() enables the external UTP plug thingy.
it's called voodoo as I stole this code and cannot cross-reference
it with the specification.
*/
static void transceiver_voodoo(struct xircom_private *card)
{
unsigned long flags;
/* disable all powermanagement */
pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
setup_descriptors(card);
spin_lock_irqsave(&card->lock, flags);
outl(0x0008, card->io_port + CSR15);
udelay(25);
outl(0xa8050000, card->io_port + CSR15);
udelay(25);
outl(0xa00f0000, card->io_port + CSR15);
udelay(25);
spin_unlock_irqrestore(&card->lock, flags);
netif_start_queue(card->dev);
}
static void xircom_up(struct xircom_private *card)
{
unsigned long flags;
int i;
/* disable all powermanagement */
pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
setup_descriptors(card);
spin_lock_irqsave(&card->lock, flags);
enable_link_interrupt(card);
enable_transmit_interrupt(card);
enable_receive_interrupt(card);
enable_common_interrupts(card);
enable_promisc(card);
/* The card can have received packets already, read them away now */
for (i=0;i<NUMDESCRIPTORS;i++)
investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]);
spin_unlock_irqrestore(&card->lock, flags);
trigger_receive(card);
trigger_transmit(card);
netif_start_queue(card->dev);
}
/* Bufferoffset is in BYTES */
static void
investigate_read_descriptor(struct net_device *dev, struct xircom_private *card,
int descnr, unsigned int bufferoffset)
{
int status;
status = le32_to_cpu(card->rx_buffer[4*descnr]);
if (status > 0) { /* packet received */
/* TODO: discard error packets */
short pkt_len = ((status >> 16) & 0x7ff) - 4;
/* minus 4, we don't want the CRC */
struct sk_buff *skb;
if (pkt_len > 1518) {
netdev_err(dev, "Packet length %i is bogus\n", pkt_len);
pkt_len = 1518;
}
skb = netdev_alloc_skb(dev, pkt_len + 2);
if (skb == NULL) {
dev->stats.rx_dropped++;
goto out;
}
skb_reserve(skb, 2);
skb_copy_to_linear_data(skb,
&card->rx_buffer[bufferoffset / 4],
pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
out:
/* give the buffer back to the card */
card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000);
trigger_receive(card);
}
}
/* Bufferoffset is in BYTES */
static void
investigate_write_descriptor(struct net_device *dev,
struct xircom_private *card,
int descnr, unsigned int bufferoffset)
{
int status;
status = le32_to_cpu(card->tx_buffer[4*descnr]);
#if 0
if (status & 0x8000) { /* Major error */
pr_err("Major transmit error status %x\n", status);
card->tx_buffer[4*descnr] = 0;
netif_wake_queue (dev);
}
#endif
if (status > 0) { /* bit 31 is 0 when done */
if (card->tx_skb[descnr]!=NULL) {
dev->stats.tx_bytes += card->tx_skb[descnr]->len;
dev_kfree_skb_irq(card->tx_skb[descnr]);
}
card->tx_skb[descnr] = NULL;
/* Bit 8 in the status field is 1 if there was a collision */
if (status & (1 << 8))
dev->stats.collisions++;
card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
netif_wake_queue (dev);
dev->stats.tx_packets++;
}
}
static int __init xircom_init(void)
{
return pci_register_driver(&xircom_ops);
}
static void __exit xircom_exit(void)
{
pci_unregister_driver(&xircom_ops);
}
module_init(xircom_init)
module_exit(xircom_exit)
| gpl-2.0 |
Metallium-Devices/android_kernel_lge_msm8974 | drivers/usb/serial/symbolserial.c | 4804 | 7880 | /*
* Symbol USB barcode to serial driver
*
* Copyright (C) 2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2009 Novell Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/tty.h>
#include <linux/slab.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05e0, 0x0600) },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
/* This structure holds all of the individual device information */
struct symbol_private {
struct usb_device *udev;
struct usb_serial *serial;
struct usb_serial_port *port;
unsigned char *int_buffer;
struct urb *int_urb;
int buffer_size;
u8 bInterval;
u8 int_address;
spinlock_t lock; /* protects the following flags */
bool throttled;
bool actually_throttled;
bool rts;
};
static void symbol_int_callback(struct urb *urb)
{
struct symbol_private *priv = urb->context;
unsigned char *data = urb->transfer_buffer;
struct usb_serial_port *port = priv->port;
int status = urb->status;
struct tty_struct *tty;
int result;
int data_length;
dbg("%s - port %d", __func__, port->number);
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d",
__func__, status);
return;
default:
dbg("%s - nonzero urb status received: %d",
__func__, status);
goto exit;
}
usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length,
data);
if (urb->actual_length > 1) {
data_length = urb->actual_length - 1;
/*
* Data from the device comes with a 1 byte header:
*
* <size of data>data...
* This is real data to be sent to the tty layer
* we pretty much just ignore the size and send everything
* else to the tty layer.
*/
tty = tty_port_tty_get(&port->port);
if (tty) {
tty_insert_flip_string(tty, &data[1], data_length);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
} else {
dev_dbg(&priv->udev->dev,
"Improper amount of data received from the device, "
"%d bytes", urb->actual_length);
}
exit:
spin_lock(&priv->lock);
/* Continue trying to always read if we should */
if (!priv->throttled) {
usb_fill_int_urb(priv->int_urb, priv->udev,
usb_rcvintpipe(priv->udev,
priv->int_address),
priv->int_buffer, priv->buffer_size,
symbol_int_callback, priv, priv->bInterval);
result = usb_submit_urb(priv->int_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, result);
} else
priv->actually_throttled = true;
spin_unlock(&priv->lock);
}
static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct symbol_private *priv = usb_get_serial_data(port->serial);
unsigned long flags;
int result = 0;
dbg("%s - port %d", __func__, port->number);
spin_lock_irqsave(&priv->lock, flags);
priv->throttled = false;
priv->actually_throttled = false;
priv->port = port;
spin_unlock_irqrestore(&priv->lock, flags);
/* Start reading from the device */
usb_fill_int_urb(priv->int_urb, priv->udev,
usb_rcvintpipe(priv->udev, priv->int_address),
priv->int_buffer, priv->buffer_size,
symbol_int_callback, priv, priv->bInterval);
result = usb_submit_urb(priv->int_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
"%s - failed resubmitting read urb, error %d\n",
__func__, result);
return result;
}
static void symbol_close(struct usb_serial_port *port)
{
struct symbol_private *priv = usb_get_serial_data(port->serial);
dbg("%s - port %d", __func__, port->number);
/* shutdown our urbs */
usb_kill_urb(priv->int_urb);
}
static void symbol_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct symbol_private *priv = usb_get_serial_data(port->serial);
dbg("%s - port %d", __func__, port->number);
spin_lock_irq(&priv->lock);
priv->throttled = true;
spin_unlock_irq(&priv->lock);
}
static void symbol_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct symbol_private *priv = usb_get_serial_data(port->serial);
int result;
bool was_throttled;
dbg("%s - port %d", __func__, port->number);
spin_lock_irq(&priv->lock);
priv->throttled = false;
was_throttled = priv->actually_throttled;
priv->actually_throttled = false;
spin_unlock_irq(&priv->lock);
if (was_throttled) {
result = usb_submit_urb(priv->int_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
__func__, result);
}
}
static int symbol_startup(struct usb_serial *serial)
{
struct symbol_private *priv;
struct usb_host_interface *intf;
int i;
int retval = -ENOMEM;
bool int_in_found = false;
/* create our private serial structure */
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
dev_err(&serial->dev->dev, "%s - Out of memory\n", __func__);
return -ENOMEM;
}
spin_lock_init(&priv->lock);
priv->serial = serial;
priv->port = serial->port[0];
priv->udev = serial->dev;
/* find our interrupt endpoint */
intf = serial->interface->altsetting;
for (i = 0; i < intf->desc.bNumEndpoints; ++i) {
struct usb_endpoint_descriptor *endpoint;
endpoint = &intf->endpoint[i].desc;
if (!usb_endpoint_is_int_in(endpoint))
continue;
priv->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->int_urb) {
dev_err(&priv->udev->dev, "out of memory\n");
goto error;
}
priv->buffer_size = usb_endpoint_maxp(endpoint) * 2;
priv->int_buffer = kmalloc(priv->buffer_size, GFP_KERNEL);
if (!priv->int_buffer) {
dev_err(&priv->udev->dev, "out of memory\n");
goto error;
}
priv->int_address = endpoint->bEndpointAddress;
priv->bInterval = endpoint->bInterval;
/* set up our int urb */
usb_fill_int_urb(priv->int_urb, priv->udev,
usb_rcvintpipe(priv->udev,
endpoint->bEndpointAddress),
priv->int_buffer, priv->buffer_size,
symbol_int_callback, priv, priv->bInterval);
int_in_found = true;
break;
}
if (!int_in_found) {
dev_err(&priv->udev->dev,
"Error - the proper endpoints were not found!\n");
goto error;
}
usb_set_serial_data(serial, priv);
return 0;
error:
usb_free_urb(priv->int_urb);
kfree(priv->int_buffer);
kfree(priv);
return retval;
}
static void symbol_disconnect(struct usb_serial *serial)
{
struct symbol_private *priv = usb_get_serial_data(serial);
dbg("%s", __func__);
usb_kill_urb(priv->int_urb);
usb_free_urb(priv->int_urb);
}
static void symbol_release(struct usb_serial *serial)
{
struct symbol_private *priv = usb_get_serial_data(serial);
dbg("%s", __func__);
kfree(priv->int_buffer);
kfree(priv);
}
static struct usb_driver symbol_driver = {
.name = "symbol",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
};
static struct usb_serial_driver symbol_device = {
.driver = {
.owner = THIS_MODULE,
.name = "symbol",
},
.id_table = id_table,
.num_ports = 1,
.attach = symbol_startup,
.open = symbol_open,
.close = symbol_close,
.disconnect = symbol_disconnect,
.release = symbol_release,
.throttle = symbol_throttle,
.unthrottle = symbol_unthrottle,
};
static struct usb_serial_driver * const serial_drivers[] = {
&symbol_device, NULL
};
module_usb_serial_driver(symbol_driver, serial_drivers);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
| gpl-2.0 |
jsr-d10/android_kernel_jsr_msm8226 | drivers/mtd/maps/lantiq-flash.c | 4804 | 5974 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <lantiq_soc.h>
#include <lantiq_platform.h>
/*
* The NOR flash is connected to the same external bus unit (EBU) as PCI.
* To make PCI work we need to enable the endianness swapping for the address
* written to the EBU. This endianness swapping works for PCI correctly but
* fails for attached NOR devices. To workaround this we need to use a complex
* map. The workaround involves swapping all addresses whilst probing the chip.
* Once probing is complete we stop swapping the addresses but swizzle the
* unlock addresses to ensure that access to the NOR device works correctly.
*/
enum {
LTQ_NOR_PROBING,
LTQ_NOR_NORMAL
};
struct ltq_mtd {
struct resource *res;
struct mtd_info *mtd;
struct map_info *map;
};
static char ltq_map_name[] = "ltq_nor";
static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
{
unsigned long flags;
map_word temp;
if (map->map_priv_1 == LTQ_NOR_PROBING)
adr ^= 2;
spin_lock_irqsave(&ebu_lock, flags);
temp.x[0] = *(u16 *)(map->virt + adr);
spin_unlock_irqrestore(&ebu_lock, flags);
return temp;
}
static void
ltq_write16(struct map_info *map, map_word d, unsigned long adr)
{
unsigned long flags;
if (map->map_priv_1 == LTQ_NOR_PROBING)
adr ^= 2;
spin_lock_irqsave(&ebu_lock, flags);
*(u16 *)(map->virt + adr) = d.x[0];
spin_unlock_irqrestore(&ebu_lock, flags);
}
/*
* The following 2 functions copy data between iomem and a cached memory
* section. As memcpy() makes use of pre-fetching we cannot use it here.
* The normal alternative of using memcpy_{to,from}io also makes use of
* memcpy() on MIPS so it is not applicable either. We are therefore stuck
* with having to use our own loop.
*/
static void
ltq_copy_from(struct map_info *map, void *to,
unsigned long from, ssize_t len)
{
unsigned char *f = (unsigned char *)map->virt + from;
unsigned char *t = (unsigned char *)to;
unsigned long flags;
spin_lock_irqsave(&ebu_lock, flags);
while (len--)
*t++ = *f++;
spin_unlock_irqrestore(&ebu_lock, flags);
}
static void
ltq_copy_to(struct map_info *map, unsigned long to,
const void *from, ssize_t len)
{
unsigned char *f = (unsigned char *)from;
unsigned char *t = (unsigned char *)map->virt + to;
unsigned long flags;
spin_lock_irqsave(&ebu_lock, flags);
while (len--)
*t++ = *f++;
spin_unlock_irqrestore(&ebu_lock, flags);
}
static int __init
ltq_mtd_probe(struct platform_device *pdev)
{
struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
struct ltq_mtd *ltq_mtd;
struct resource *res;
struct cfi_private *cfi;
int err;
ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
platform_set_drvdata(pdev, ltq_mtd);
ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ltq_mtd->res) {
dev_err(&pdev->dev, "failed to get memory resource");
err = -ENOENT;
goto err_out;
}
res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
resource_size(ltq_mtd->res), dev_name(&pdev->dev));
if (!ltq_mtd->res) {
dev_err(&pdev->dev, "failed to request mem resource");
err = -EBUSY;
goto err_out;
}
ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
ltq_mtd->map->phys = res->start;
ltq_mtd->map->size = resource_size(res);
ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
ltq_mtd->map->phys, ltq_mtd->map->size);
if (!ltq_mtd->map->virt) {
dev_err(&pdev->dev, "failed to ioremap!\n");
err = -ENOMEM;
goto err_free;
}
ltq_mtd->map->name = ltq_map_name;
ltq_mtd->map->bankwidth = 2;
ltq_mtd->map->read = ltq_read16;
ltq_mtd->map->write = ltq_write16;
ltq_mtd->map->copy_from = ltq_copy_from;
ltq_mtd->map->copy_to = ltq_copy_to;
ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING;
ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map);
ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL;
if (!ltq_mtd->mtd) {
dev_err(&pdev->dev, "probing failed\n");
err = -ENXIO;
goto err_free;
}
ltq_mtd->mtd->owner = THIS_MODULE;
cfi = ltq_mtd->map->fldrv_priv;
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
ltq_mtd_data->parts,
ltq_mtd_data->nr_parts);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
}
return 0;
err_destroy:
map_destroy(ltq_mtd->mtd);
err_free:
kfree(ltq_mtd->map);
err_out:
kfree(ltq_mtd);
return err;
}
static int __devexit
ltq_mtd_remove(struct platform_device *pdev)
{
struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
if (ltq_mtd) {
if (ltq_mtd->mtd) {
mtd_device_unregister(ltq_mtd->mtd);
map_destroy(ltq_mtd->mtd);
}
kfree(ltq_mtd->map);
kfree(ltq_mtd);
}
return 0;
}
static struct platform_driver ltq_mtd_driver = {
.remove = __devexit_p(ltq_mtd_remove),
.driver = {
.name = "ltq_nor",
.owner = THIS_MODULE,
},
};
static int __init
init_ltq_mtd(void)
{
int ret = platform_driver_probe(<q_mtd_driver, ltq_mtd_probe);
if (ret)
pr_err("ltq_nor: error registering platform driver");
return ret;
}
static void __exit
exit_ltq_mtd(void)
{
platform_driver_unregister(<q_mtd_driver);
}
module_init(init_ltq_mtd);
module_exit(exit_ltq_mtd);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC NOR");
| gpl-2.0 |
pinkflozd/L5_Kernel_3.4 | drivers/mtd/nand/docg4.c | 4804 | 39237 | /*
* Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
*
* mtd nand driver for M-Systems DiskOnChip G4
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus
* P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
* Should work on these as well. Let me know!
*
* TODO:
*
* Mechanism for management of password-protected areas
*
* Hamming ecc when reading oob only
*
* According to the M-Sys documentation, this device is also available in a
* "dual-die" configuration having a 256MB capacity, but no mechanism for
* detecting this variant is documented. Currently this driver assumes 128MB
* capacity.
*
* Support for multiple cascaded devices ("floors"). Not sure which gadgets
* contain multiple G4s in a cascaded configuration, if any.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/bitops.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/bch.h>
#include <linux/bitrev.h>
/*
* You'll want to ignore badblocks if you're reading a partition that contains
* data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
* it does not use mtd nand's method for marking bad blocks (using oob area).
* This will also skip the check of the "page written" flag.
*/
static bool ignore_badblocks;
module_param(ignore_badblocks, bool, 0);
MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
struct docg4_priv {
struct mtd_info *mtd;
struct device *dev;
void __iomem *virtadr;
int status;
struct {
unsigned int command;
int column;
int page;
} last_command;
uint8_t oob_buf[16];
uint8_t ecc_buf[7];
int oob_page;
struct bch_control *bch;
};
/*
* Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are
* shared with other diskonchip devices (P3, G3 at least).
*
* Functions with names prefixed with docg4_ are mtd / nand interface functions
* (though they may also be called internally). All others are internal.
*/
#define DOC_IOSPACE_DATA 0x0800
/* register offsets */
#define DOC_CHIPID 0x1000
#define DOC_DEVICESELECT 0x100a
#define DOC_ASICMODE 0x100c
#define DOC_DATAEND 0x101e
#define DOC_NOP 0x103e
#define DOC_FLASHSEQUENCE 0x1032
#define DOC_FLASHCOMMAND 0x1034
#define DOC_FLASHADDRESS 0x1036
#define DOC_FLASHCONTROL 0x1038
#define DOC_ECCCONF0 0x1040
#define DOC_ECCCONF1 0x1042
#define DOC_HAMMINGPARITY 0x1046
#define DOC_BCH_SYNDROM(idx) (0x1048 + idx)
#define DOC_ASICMODECONFIRM 0x1072
#define DOC_CHIPID_INV 0x1074
#define DOC_POWERMODE 0x107c
#define DOCG4_MYSTERY_REG 0x1050
/* apparently used only to write oob bytes 6 and 7 */
#define DOCG4_OOB_6_7 0x1052
/* DOC_FLASHSEQUENCE register commands */
#define DOC_SEQ_RESET 0x00
#define DOCG4_SEQ_PAGE_READ 0x03
#define DOCG4_SEQ_FLUSH 0x29
#define DOCG4_SEQ_PAGEWRITE 0x16
#define DOCG4_SEQ_PAGEPROG 0x1e
#define DOCG4_SEQ_BLOCKERASE 0x24
/* DOC_FLASHCOMMAND register commands */
#define DOCG4_CMD_PAGE_READ 0x00
#define DOC_CMD_ERASECYCLE2 0xd0
#define DOCG4_CMD_FLUSH 0x70
#define DOCG4_CMD_READ2 0x30
#define DOC_CMD_PROG_BLOCK_ADDR 0x60
#define DOCG4_CMD_PAGEWRITE 0x80
#define DOC_CMD_PROG_CYCLE2 0x10
#define DOC_CMD_RESET 0xff
/* DOC_POWERMODE register bits */
#define DOC_POWERDOWN_READY 0x80
/* DOC_FLASHCONTROL register bits */
#define DOC_CTRL_CE 0x10
#define DOC_CTRL_UNKNOWN 0x40
#define DOC_CTRL_FLASHREADY 0x01
/* DOC_ECCCONF0 register bits */
#define DOC_ECCCONF0_READ_MODE 0x8000
#define DOC_ECCCONF0_UNKNOWN 0x2000
#define DOC_ECCCONF0_ECC_ENABLE 0x1000
#define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff
/* DOC_ECCCONF1 register bits */
#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
#define DOC_ECCCONF1_ECC_ENABLE 0x07
#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
/* DOC_ASICMODE register bits */
#define DOC_ASICMODE_RESET 0x00
#define DOC_ASICMODE_NORMAL 0x01
#define DOC_ASICMODE_POWERDOWN 0x02
#define DOC_ASICMODE_MDWREN 0x04
#define DOC_ASICMODE_BDETCT_RESET 0x08
#define DOC_ASICMODE_RSTIN_RESET 0x10
#define DOC_ASICMODE_RAM_WE 0x20
/* good status values read after read/write/erase operations */
#define DOCG4_PROGSTATUS_GOOD 0x51
#define DOCG4_PROGSTATUS_GOOD_2 0xe0
/*
* On read operations (page and oob-only), the first byte read from I/O reg is a
* status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read
* after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
*/
#define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */
/* anatomy of the device */
#define DOCG4_CHIP_SIZE 0x8000000
#define DOCG4_PAGE_SIZE 0x200
#define DOCG4_PAGES_PER_BLOCK 0x200
#define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
#define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
#define DOCG4_OOB_SIZE 0x10
#define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */
#define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */
#define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */
/* all but the last byte is included in ecc calculation */
#define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
#define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */
/* expected values from the ID registers */
#define DOCG4_IDREG1_VALUE 0x0400
#define DOCG4_IDREG2_VALUE 0xfbff
/* primitive polynomial used to build the Galois field used by hw ecc gen */
#define DOCG4_PRIMITIVE_POLY 0x4443
#define DOCG4_M 14 /* Galois field is of order 2^14 */
#define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */
#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
/*
* Oob bytes 0 - 6 are available to the user.
* Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc.
* Byte 15 (the last) is used by the driver as a "page written" flag.
*/
static struct nand_ecclayout docg4_oobinfo = {
.eccbytes = 9,
.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15},
.oobavail = 7,
.oobfree = { {0, 7} }
};
/*
* The device has a nop register which M-Sys claims is for the purpose of
* inserting precise delays. But beware; at least some operations fail if the
* nop writes are replaced with a generic delay!
*/
static inline void write_nop(void __iomem *docptr)
{
writew(0, docptr + DOC_NOP);
}
static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
int i;
struct nand_chip *nand = mtd->priv;
uint16_t *p = (uint16_t *) buf;
len >>= 1;
for (i = 0; i < len; i++)
p[i] = readw(nand->IO_ADDR_R);
}
static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
{
int i;
struct nand_chip *nand = mtd->priv;
uint16_t *p = (uint16_t *) buf;
len >>= 1;
for (i = 0; i < len; i++)
writew(p[i], nand->IO_ADDR_W);
}
static int poll_status(struct docg4_priv *doc)
{
/*
* Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
* register. Operations known to take a long time (e.g., block erase)
* should sleep for a while before calling this.
*/
uint16_t flash_status;
unsigned int timeo;
void __iomem *docptr = doc->virtadr;
dev_dbg(doc->dev, "%s...\n", __func__);
/* hardware quirk requires reading twice initially */
flash_status = readw(docptr + DOC_FLASHCONTROL);
timeo = 1000;
do {
cpu_relax();
flash_status = readb(docptr + DOC_FLASHCONTROL);
} while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
if (!timeo) {
dev_err(doc->dev, "%s: timed out!\n", __func__);
return NAND_STATUS_FAIL;
}
if (unlikely(timeo < 50))
dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
__func__, timeo);
return 0;
}
static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
{
struct docg4_priv *doc = nand->priv;
int status = NAND_STATUS_WP; /* inverse logic?? */
dev_dbg(doc->dev, "%s...\n", __func__);
/* report any previously unreported error */
if (doc->status) {
status |= doc->status;
doc->status = 0;
return status;
}
status |= poll_status(doc);
return status;
}
static void docg4_select_chip(struct mtd_info *mtd, int chip)
{
/*
* Select among multiple cascaded chips ("floors"). Multiple floors are
* not yet supported, so the only valid non-negative value is 0.
*/
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
if (chip < 0)
return; /* deselected */
if (chip > 0)
dev_warn(doc->dev, "multiple floors currently unsupported\n");
writew(0, docptr + DOC_DEVICESELECT);
}
static void reset(struct mtd_info *mtd)
{
/* full device reset */
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
docptr + DOC_ASICMODE);
writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
docptr + DOC_ASICMODECONFIRM);
write_nop(docptr);
writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
docptr + DOC_ASICMODE);
writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
docptr + DOC_ASICMODECONFIRM);
writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
poll_status(doc);
}
static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
{
/* read the 7 hw-generated ecc bytes */
int i;
for (i = 0; i < 7; i++) { /* hw quirk; read twice */
ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
}
}
static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
/*
* Called after a page read when hardware reports bitflips.
* Up to four bitflips can be corrected.
*/
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
int i, numerrs, errpos[4];
const uint8_t blank_read_hwecc[8] = {
0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
/* check if read error is due to a blank page */
if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
return 0; /* yes */
/* skip additional check of "written flag" if ignore_badblocks */
if (ignore_badblocks == false) {
/*
* If the hw ecc bytes are not those of a blank page, there's
* still a chance that the page is blank, but was read with
* errors. Check the "written flag" in last oob byte, which
* is set to zero when a page is written. If more than half
* the bits are set, assume a blank page. Unfortunately, the
* bit flips(s) are not reported in stats.
*/
if (doc->oob_buf[15]) {
int bit, numsetbits = 0;
unsigned long written_flag = doc->oob_buf[15];
for_each_set_bit(bit, &written_flag, 8)
numsetbits++;
if (numsetbits > 4) { /* assume blank */
dev_warn(doc->dev,
"error(s) in blank page "
"at offset %08x\n",
page * DOCG4_PAGE_SIZE);
return 0;
}
}
}
/*
* The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
* algorithm is used to decode this. However the hw operates on page
* data in a bit order that is the reverse of that of the bch alg,
* requiring that the bits be reversed on the result. Thanks to Ivan
* Djelic for his analysis!
*/
for (i = 0; i < 7; i++)
doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
doc->ecc_buf, NULL, errpos);
if (numerrs == -EBADMSG) {
dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
page * DOCG4_PAGE_SIZE);
return -EBADMSG;
}
BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */
/* undo last step in BCH alg (modulo mirroring not needed) */
for (i = 0; i < numerrs; i++)
errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
/* fix the errors */
for (i = 0; i < numerrs; i++) {
/* ignore if error within oob ecc bytes */
if (errpos[i] > DOCG4_USERDATA_LEN * 8)
continue;
/* if error within oob area preceeding ecc bytes... */
if (errpos[i] > DOCG4_PAGE_SIZE * 8)
change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
(unsigned long *)doc->oob_buf);
else /* error in page data */
change_bit(errpos[i], (unsigned long *)buf);
}
dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
numerrs, page * DOCG4_PAGE_SIZE);
return numerrs;
}
static uint8_t docg4_read_byte(struct mtd_info *mtd)
{
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
dev_dbg(doc->dev, "%s\n", __func__);
if (doc->last_command.command == NAND_CMD_STATUS) {
int status;
/*
* Previous nand command was status request, so nand
* infrastructure code expects to read the status here. If an
* error occurred in a previous operation, report it.
*/
doc->last_command.command = 0;
if (doc->status) {
status = doc->status;
doc->status = 0;
}
/* why is NAND_STATUS_WP inverse logic?? */
else
status = NAND_STATUS_WP | NAND_STATUS_READY;
return status;
}
dev_warn(doc->dev, "unexpectd call to read_byte()\n");
return 0;
}
static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
{
/* write the four address bytes packed in docg4_addr to the device */
void __iomem *docptr = doc->virtadr;
writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
docg4_addr >>= 8;
writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
docg4_addr >>= 8;
writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
docg4_addr >>= 8;
writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
}
static int read_progstatus(struct docg4_priv *doc)
{
/*
* This apparently checks the status of programming. Done after an
* erasure, and after page data is written. On error, the status is
* saved, to be later retrieved by the nand infrastructure code.
*/
void __iomem *docptr = doc->virtadr;
/* status is read from the I/O reg */
uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
__func__, status1, status2, status3);
if (status1 != DOCG4_PROGSTATUS_GOOD
|| status2 != DOCG4_PROGSTATUS_GOOD_2
|| status3 != DOCG4_PROGSTATUS_GOOD_2) {
doc->status = NAND_STATUS_FAIL;
dev_warn(doc->dev, "read_progstatus failed: "
"%02x, %02x, %02x\n", status1, status2, status3);
return -EIO;
}
return 0;
}
static int pageprog(struct mtd_info *mtd)
{
/*
* Final step in writing a page. Writes the contents of its
* internal buffer out to the flash array, or some such.
*/
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
int retval = 0;
dev_dbg(doc->dev, "docg4: %s\n", __func__);
writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
write_nop(docptr);
/* Just busy-wait; usleep_range() slows things down noticeably. */
poll_status(doc);
writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
retval = read_progstatus(doc);
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
poll_status(doc);
write_nop(docptr);
return retval;
}
static void sequence_reset(struct mtd_info *mtd)
{
/* common starting sequence for all operations */
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
write_nop(docptr);
poll_status(doc);
write_nop(docptr);
}
static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
{
/* first step in reading a page */
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
dev_dbg(doc->dev,
"docg4: %s: g4 page %08x\n", __func__, docg4_addr);
sequence_reset(mtd);
writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
write_addr(doc, docg4_addr);
write_nop(docptr);
writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
write_nop(docptr);
poll_status(doc);
}
static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
{
/* first step in writing a page */
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
dev_dbg(doc->dev,
"docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
sequence_reset(mtd);
writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
write_addr(doc, docg4_addr);
write_nop(docptr);
write_nop(docptr);
poll_status(doc);
}
static uint32_t mtd_to_docg4_address(int page, int column)
{
/*
* Convert mtd address to format used by the device, 32 bit packed.
*
* Some notes on G4 addressing... The M-Sys documentation on this device
* claims that pages are 2K in length, and indeed, the format of the
* address used by the device reflects that. But within each page are
* four 512 byte "sub-pages", each with its own oob data that is
* read/written immediately after the 512 bytes of page data. This oob
* data contains the ecc bytes for the preceeding 512 bytes.
*
* Rather than tell the mtd nand infrastructure that page size is 2k,
* with four sub-pages each, we engage in a little subterfuge and tell
* the infrastructure code that pages are 512 bytes in size. This is
* done because during the course of reverse-engineering the device, I
* never observed an instance where an entire 2K "page" was read or
* written as a unit. Each "sub-page" is always addressed individually,
* its data read/written, and ecc handled before the next "sub-page" is
* addressed.
*
* This requires us to convert addresses passed by the mtd nand
* infrastructure code to those used by the device.
*
* The address that is written to the device consists of four bytes: the
* first two are the 2k page number, and the second is the index into
* the page. The index is in terms of 16-bit half-words and includes
* the preceeding oob data, so e.g., the index into the second
* "sub-page" is 0x108, and the full device address of the start of mtd
* page 0x201 is 0x00800108.
*/
int g4_page = page / 4; /* device's 2K page */
int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
return (g4_page << 16) | g4_index; /* pack */
}
static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
int page_addr)
{
/* handle standard nand commands */
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
__func__, command, page_addr, column);
/*
* Save the command and its arguments. This enables emulation of
* standard flash devices, and also some optimizations.
*/
doc->last_command.command = command;
doc->last_command.column = column;
doc->last_command.page = page_addr;
switch (command) {
case NAND_CMD_RESET:
reset(mtd);
break;
case NAND_CMD_READ0:
read_page_prologue(mtd, g4_addr);
break;
case NAND_CMD_STATUS:
/* next call to read_byte() will expect a status */
break;
case NAND_CMD_SEQIN:
write_page_prologue(mtd, g4_addr);
/* hack for deferred write of oob bytes */
if (doc->oob_page == page_addr)
memcpy(nand->oob_poi, doc->oob_buf, 16);
break;
case NAND_CMD_PAGEPROG:
pageprog(mtd);
break;
/* we don't expect these, based on review of nand_base.c */
case NAND_CMD_READOOB:
case NAND_CMD_READID:
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
dev_warn(doc->dev, "docg4_command: "
"unexpected nand command 0x%x\n", command);
break;
}
}
static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
uint8_t *buf, int page, bool use_ecc)
{
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
uint16_t status, edc_err, *buf16;
dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
writew(DOC_ECCCONF0_READ_MODE |
DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
DOCG4_BCH_SIZE,
docptr + DOC_ECCCONF0);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
/* the 1st byte from the I/O reg is a status; the rest is page data */
status = readw(docptr + DOC_IOSPACE_DATA);
if (status & DOCG4_READ_ERROR) {
dev_err(doc->dev,
"docg4_read_page: bad status: 0x%02x\n", status);
writew(0, docptr + DOC_DATAEND);
return -EIO;
}
dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
/*
* Diskonchips read oob immediately after a page read. Mtd
* infrastructure issues a separate command for reading oob after the
* page is read. So we save the oob bytes in a local buffer and just
* copy it if the next command reads oob from the same page.
*/
/* first 14 oob bytes read from I/O reg */
docg4_read_buf(mtd, doc->oob_buf, 14);
/* last 2 read from another reg */
buf16 = (uint16_t *)(doc->oob_buf + 14);
*buf16 = readw(docptr + DOCG4_MYSTERY_REG);
write_nop(docptr);
if (likely(use_ecc == true)) {
/* read the register that tells us if bitflip(s) detected */
edc_err = readw(docptr + DOC_ECCCONF1);
edc_err = readw(docptr + DOC_ECCCONF1);
dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
/* If bitflips are reported, attempt to correct with ecc */
if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
int bits_corrected = correct_data(mtd, buf, page);
if (bits_corrected == -EBADMSG)
mtd->ecc_stats.failed++;
else
mtd->ecc_stats.corrected += bits_corrected;
}
}
writew(0, docptr + DOC_DATAEND);
return 0;
}
static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
uint8_t *buf, int page)
{
return read_page(mtd, nand, buf, page, false);
}
static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
uint8_t *buf, int page)
{
return read_page(mtd, nand, buf, page, true);
}
static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
int page, int sndcmd)
{
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
uint16_t status;
dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
/*
* Oob bytes are read as part of a normal page read. If the previous
* nand command was a read of the page whose oob is now being read, just
* copy the oob bytes that we saved in a local buffer and avoid a
* separate oob read.
*/
if (doc->last_command.command == NAND_CMD_READ0 &&
doc->last_command.page == page) {
memcpy(nand->oob_poi, doc->oob_buf, 16);
return 0;
}
/*
* Separate read of oob data only.
*/
docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
/* the 1st byte from the I/O reg is a status; the rest is oob data */
status = readw(docptr + DOC_IOSPACE_DATA);
if (status & DOCG4_READ_ERROR) {
dev_warn(doc->dev,
"docg4_read_oob failed: status = 0x%02x\n", status);
return -EIO;
}
dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
docg4_read_buf(mtd, nand->oob_poi, 16);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
return 0;
}
static void docg4_erase_block(struct mtd_info *mtd, int page)
{
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
uint16_t g4_page;
dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
sequence_reset(mtd);
writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
/* only 2 bytes of address are written to specify erase block */
g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */
writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
g4_page >>= 8;
writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
write_nop(docptr);
/* start the erasure */
writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
write_nop(docptr);
write_nop(docptr);
usleep_range(500, 1000); /* erasure is long; take a snooze */
poll_status(doc);
writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
write_nop(docptr);
read_progstatus(doc);
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
poll_status(doc);
write_nop(docptr);
}
static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf, bool use_ecc)
{
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
uint8_t ecc_buf[8];
dev_dbg(doc->dev, "%s...\n", __func__);
writew(DOC_ECCCONF0_ECC_ENABLE |
DOC_ECCCONF0_UNKNOWN |
DOCG4_BCH_SIZE,
docptr + DOC_ECCCONF0);
write_nop(docptr);
/* write the page data */
docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
/* oob bytes 0 through 5 are written to I/O reg */
docg4_write_buf16(mtd, nand->oob_poi, 6);
/* oob byte 6 written to a separate reg */
writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
write_nop(docptr);
write_nop(docptr);
/* write hw-generated ecc bytes to oob */
if (likely(use_ecc == true)) {
/* oob byte 7 is hamming code */
uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
writew(hamming, docptr + DOCG4_OOB_6_7);
write_nop(docptr);
/* read the 7 bch bytes from ecc regs */
read_hw_ecc(docptr, ecc_buf);
ecc_buf[7] = 0; /* clear the "page written" flag */
}
/* write user-supplied bytes to oob */
else {
writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
write_nop(docptr);
memcpy(ecc_buf, &nand->oob_poi[8], 8);
}
docg4_write_buf16(mtd, ecc_buf, 8);
write_nop(docptr);
write_nop(docptr);
writew(0, docptr + DOC_DATAEND);
write_nop(docptr);
}
static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf)
{
return write_page(mtd, nand, buf, false);
}
static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
const uint8_t *buf)
{
return write_page(mtd, nand, buf, true);
}
static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
int page)
{
/*
* Writing oob-only is not really supported, because MLC nand must write
* oob bytes at the same time as page data. Nonetheless, we save the
* oob buffer contents here, and then write it along with the page data
* if the same page is subsequently written. This allows user space
* utilities that write the oob data prior to the page data to work
* (e.g., nandwrite). The disdvantage is that, if the intention was to
* write oob only, the operation is quietly ignored. Also, oob can get
* corrupted if two concurrent processes are running nandwrite.
*/
/* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
struct docg4_priv *doc = nand->priv;
doc->oob_page = page;
memcpy(doc->oob_buf, nand->oob_poi, 16);
return 0;
}
static int __init read_factory_bbt(struct mtd_info *mtd)
{
/*
* The device contains a read-only factory bad block table. Read it and
* update the memory-based bbt accordingly.
*/
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
uint8_t *buf;
int i, block, status;
buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
read_page_prologue(mtd, g4_addr);
status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
if (status)
goto exit;
/*
* If no memory-based bbt was created, exit. This will happen if module
* parameter ignore_badblocks is set. Then why even call this function?
* For an unknown reason, block erase always fails if it's the first
* operation after device power-up. The above read ensures it never is.
* Ugly, I know.
*/
if (nand->bbt == NULL) /* no memory-based bbt */
goto exit;
/*
* Parse factory bbt and update memory-based bbt. Factory bbt format is
* simple: one bit per block, block numbers increase left to right (msb
* to lsb). Bit clear means bad block.
*/
for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
int bitnum;
unsigned long bits = ~buf[i];
for_each_set_bit(bitnum, &bits, 8) {
int badblock = block + 7 - bitnum;
nand->bbt[badblock / 4] |=
0x03 << ((badblock % 4) * 2);
mtd->ecc_stats.badblocks++;
dev_notice(doc->dev, "factory-marked bad block: %d\n",
badblock);
}
}
exit:
kfree(buf);
return status;
}
static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
/*
* Mark a block as bad. Bad blocks are marked in the oob area of the
* first page of the block. The default scan_bbt() in the nand
* infrastructure code works fine for building the memory-based bbt
* during initialization, as does the nand infrastructure function that
* checks if a block is bad by reading the bbt. This function replaces
* the nand default because writes to oob-only are not supported.
*/
int ret, i;
uint8_t *buf;
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
struct nand_bbt_descr *bbtd = nand->badblock_pattern;
int block = (int)(ofs >> nand->bbt_erase_shift);
int page = (int)(ofs >> nand->page_shift);
uint32_t g4_addr = mtd_to_docg4_address(page, 0);
dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
__func__, ofs);
/* allocate blank buffer for page data */
buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
/* update bbt in memory */
nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2);
/* write bit-wise negation of pattern to oob buffer */
memset(nand->oob_poi, 0xff, mtd->oobsize);
for (i = 0; i < bbtd->len; i++)
nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
/* write first page of block */
write_page_prologue(mtd, g4_addr);
docg4_write_page(mtd, nand, buf);
ret = pageprog(mtd);
if (!ret)
mtd->ecc_stats.badblocks++;
kfree(buf);
return ret;
}
static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip)
{
/* only called when module_param ignore_badblocks is set */
return 0;
}
static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
{
/*
* Put the device into "deep power-down" mode. Note that CE# must be
* deasserted for this to take effect. The xscale, e.g., can be
* configured to float this signal when the processor enters power-down,
* and a suitable pull-up ensures its deassertion.
*/
int i;
uint8_t pwr_down;
struct docg4_priv *doc = platform_get_drvdata(pdev);
void __iomem *docptr = doc->virtadr;
dev_dbg(doc->dev, "%s...\n", __func__);
/* poll the register that tells us we're ready to go to sleep */
for (i = 0; i < 10; i++) {
pwr_down = readb(docptr + DOC_POWERMODE);
if (pwr_down & DOC_POWERDOWN_READY)
break;
usleep_range(1000, 4000);
}
if (pwr_down & DOC_POWERDOWN_READY) {
dev_err(doc->dev, "suspend failed; "
"timeout polling DOC_POWERDOWN_READY\n");
return -EIO;
}
writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
docptr + DOC_ASICMODE);
writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
docptr + DOC_ASICMODECONFIRM);
write_nop(docptr);
return 0;
}
static int docg4_resume(struct platform_device *pdev)
{
/*
* Exit power-down. Twelve consecutive reads of the address below
* accomplishes this, assuming CE# has been asserted.
*/
struct docg4_priv *doc = platform_get_drvdata(pdev);
void __iomem *docptr = doc->virtadr;
int i;
dev_dbg(doc->dev, "%s...\n", __func__);
for (i = 0; i < 12; i++)
readb(docptr + 0x1fff);
return 0;
}
static void __init init_mtd_structs(struct mtd_info *mtd)
{
/* initialize mtd and nand data structures */
/*
* Note that some of the following initializations are not usually
* required within a nand driver because they are performed by the nand
* infrastructure code as part of nand_scan(). In this case they need
* to be initialized here because we skip call to nand_scan_ident() (the
* first half of nand_scan()). The call to nand_scan_ident() is skipped
* because for this device the chip id is not read in the manner of a
* standard nand device. Unfortunately, nand_scan_ident() does other
* things as well, such as call nand_set_defaults().
*/
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
mtd->size = DOCG4_CHIP_SIZE;
mtd->name = "Msys_Diskonchip_G4";
mtd->writesize = DOCG4_PAGE_SIZE;
mtd->erasesize = DOCG4_BLOCK_SIZE;
mtd->oobsize = DOCG4_OOB_SIZE;
nand->chipsize = DOCG4_CHIP_SIZE;
nand->chip_shift = DOCG4_CHIP_SHIFT;
nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
nand->chip_delay = 20;
nand->page_shift = DOCG4_PAGE_SHIFT;
nand->pagemask = 0x3ffff;
nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
nand->badblockbits = 8;
nand->ecc.layout = &docg4_oobinfo;
nand->ecc.mode = NAND_ECC_HW_SYNDROME;
nand->ecc.size = DOCG4_PAGE_SIZE;
nand->ecc.prepad = 8;
nand->ecc.bytes = 8;
nand->ecc.strength = DOCG4_T;
nand->options =
NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
nand->controller = &nand->hwcontrol;
spin_lock_init(&nand->controller->lock);
init_waitqueue_head(&nand->controller->wq);
/* methods */
nand->cmdfunc = docg4_command;
nand->waitfunc = docg4_wait;
nand->select_chip = docg4_select_chip;
nand->read_byte = docg4_read_byte;
nand->block_markbad = docg4_block_markbad;
nand->read_buf = docg4_read_buf;
nand->write_buf = docg4_write_buf16;
nand->scan_bbt = nand_default_bbt;
nand->erase_cmd = docg4_erase_block;
nand->ecc.read_page = docg4_read_page;
nand->ecc.write_page = docg4_write_page;
nand->ecc.read_page_raw = docg4_read_page_raw;
nand->ecc.write_page_raw = docg4_write_page_raw;
nand->ecc.read_oob = docg4_read_oob;
nand->ecc.write_oob = docg4_write_oob;
/*
* The way the nand infrastructure code is written, a memory-based bbt
* is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt,
* nand->block_bad() is used. So when ignoring bad blocks, we skip the
* scan and define a dummy block_bad() which always returns 0.
*/
if (ignore_badblocks) {
nand->options |= NAND_SKIP_BBTSCAN;
nand->block_bad = docg4_block_neverbad;
}
}
static int __init read_id_reg(struct mtd_info *mtd)
{
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
uint16_t id1, id2;
/* check for presence of g4 chip by reading id registers */
id1 = readw(docptr + DOC_CHIPID);
id1 = readw(docptr + DOCG4_MYSTERY_REG);
id2 = readw(docptr + DOC_CHIPID_INV);
id2 = readw(docptr + DOCG4_MYSTERY_REG);
if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
dev_info(doc->dev,
"NAND device: 128MiB Diskonchip G4 detected\n");
return 0;
}
return -ENODEV;
}
static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
static int __init probe_docg4(struct platform_device *pdev)
{
struct mtd_info *mtd;
struct nand_chip *nand;
void __iomem *virtadr;
struct docg4_priv *doc;
int len, retval;
struct resource *r;
struct device *dev = &pdev->dev;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
dev_err(dev, "no io memory resource defined!\n");
return -ENODEV;
}
virtadr = ioremap(r->start, resource_size(r));
if (!virtadr) {
dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
return -EIO;
}
len = sizeof(struct mtd_info) + sizeof(struct nand_chip) +
sizeof(struct docg4_priv);
mtd = kzalloc(len, GFP_KERNEL);
if (mtd == NULL) {
retval = -ENOMEM;
goto fail;
}
nand = (struct nand_chip *) (mtd + 1);
doc = (struct docg4_priv *) (nand + 1);
mtd->priv = nand;
nand->priv = doc;
mtd->owner = THIS_MODULE;
doc->virtadr = virtadr;
doc->dev = dev;
init_mtd_structs(mtd);
/* initialize kernel bch algorithm */
doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
if (doc->bch == NULL) {
retval = -EINVAL;
goto fail;
}
platform_set_drvdata(pdev, doc);
reset(mtd);
retval = read_id_reg(mtd);
if (retval == -ENODEV) {
dev_warn(dev, "No diskonchip G4 device found.\n");
goto fail;
}
retval = nand_scan_tail(mtd);
if (retval)
goto fail;
retval = read_factory_bbt(mtd);
if (retval)
goto fail;
retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
if (retval)
goto fail;
doc->mtd = mtd;
return 0;
fail:
iounmap(virtadr);
if (mtd) {
/* re-declarations avoid compiler warning */
struct nand_chip *nand = mtd->priv;
struct docg4_priv *doc = nand->priv;
nand_release(mtd); /* deletes partitions and mtd devices */
platform_set_drvdata(pdev, NULL);
free_bch(doc->bch);
kfree(mtd);
}
return retval;
}
static int __exit cleanup_docg4(struct platform_device *pdev)
{
struct docg4_priv *doc = platform_get_drvdata(pdev);
nand_release(doc->mtd);
platform_set_drvdata(pdev, NULL);
free_bch(doc->bch);
kfree(doc->mtd);
iounmap(doc->virtadr);
return 0;
}
static struct platform_driver docg4_driver = {
.driver = {
.name = "docg4",
.owner = THIS_MODULE,
},
.suspend = docg4_suspend,
.resume = docg4_resume,
.remove = __exit_p(cleanup_docg4),
};
static int __init docg4_init(void)
{
return platform_driver_probe(&docg4_driver, probe_docg4);
}
static void __exit docg4_exit(void)
{
platform_driver_unregister(&docg4_driver);
}
module_init(docg4_init);
module_exit(docg4_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mike Dunn");
MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
| gpl-2.0 |
SerenityS/android_kernel_pantech_ef39s_jb | drivers/video/cobalt_lcdfb.c | 5060 | 7896 | /*
* Cobalt server LCD frame buffer driver.
*
* Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/module.h>
/*
* Cursor position address
* \X 0 1 2 ... 14 15
* Y+----+----+----+---+----+----+
* 0|0x00|0x01|0x02|...|0x0e|0x0f|
* +----+----+----+---+----+----+
* 1|0x40|0x41|0x42|...|0x4e|0x4f|
* +----+----+----+---+----+----+
*/
#define LCD_DATA_REG_OFFSET 0x10
#define LCD_XRES_MAX 16
#define LCD_YRES_MAX 2
#define LCD_CHARS_MAX 32
#define LCD_CLEAR 0x01
#define LCD_CURSOR_MOVE_HOME 0x02
#define LCD_RESET 0x06
#define LCD_OFF 0x08
#define LCD_CURSOR_OFF 0x0c
#define LCD_CURSOR_BLINK_OFF 0x0e
#define LCD_CURSOR_ON 0x0f
#define LCD_ON LCD_CURSOR_ON
#define LCD_CURSOR_MOVE_LEFT 0x10
#define LCD_CURSOR_MOVE_RIGHT 0x14
#define LCD_DISPLAY_LEFT 0x18
#define LCD_DISPLAY_RIGHT 0x1c
#define LCD_PRERESET 0x3f /* execute 4 times continuously */
#define LCD_BUSY 0x80
#define LCD_GRAPHIC_MODE 0x40
#define LCD_TEXT_MODE 0x80
#define LCD_CUR_POS_MASK 0x7f
#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
static inline void lcd_write_control(struct fb_info *info, u8 control)
{
writel((u32)control << 24, info->screen_base);
}
static inline u8 lcd_read_control(struct fb_info *info)
{
return readl(info->screen_base) >> 24;
}
static inline void lcd_write_data(struct fb_info *info, u8 data)
{
writel((u32)data << 24, info->screen_base + LCD_DATA_REG_OFFSET);
}
static inline u8 lcd_read_data(struct fb_info *info)
{
return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
}
static int lcd_busy_wait(struct fb_info *info)
{
u8 val = 0;
int timeout = 10, retval = 0;
do {
val = lcd_read_control(info);
val &= LCD_BUSY;
if (val != LCD_BUSY)
break;
if (msleep_interruptible(1))
return -EINTR;
timeout--;
} while (timeout);
if (val == LCD_BUSY)
retval = -EBUSY;
return retval;
}
static void lcd_clear(struct fb_info *info)
{
int i;
for (i = 0; i < 4; i++) {
udelay(150);
lcd_write_control(info, LCD_PRERESET);
}
udelay(150);
lcd_write_control(info, LCD_CLEAR);
udelay(150);
lcd_write_control(info, LCD_RESET);
}
static struct fb_fix_screeninfo cobalt_lcdfb_fix __devinitdata = {
.id = "cobalt-lcd",
.type = FB_TYPE_TEXT,
.type_aux = FB_AUX_TEXT_MDA,
.visual = FB_VISUAL_MONO01,
.line_length = LCD_XRES_MAX,
.accel = FB_ACCEL_NONE,
};
static ssize_t cobalt_lcdfb_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos)
{
char src[LCD_CHARS_MAX];
unsigned long pos;
int len, retval = 0;
pos = *ppos;
if (pos >= LCD_CHARS_MAX || count == 0)
return 0;
if (count > LCD_CHARS_MAX)
count = LCD_CHARS_MAX;
if (pos + count > LCD_CHARS_MAX)
count = LCD_CHARS_MAX - pos;
for (len = 0; len < count; len++) {
retval = lcd_busy_wait(info);
if (retval < 0)
break;
lcd_write_control(info, LCD_TEXT_POS(pos));
retval = lcd_busy_wait(info);
if (retval < 0)
break;
src[len] = lcd_read_data(info);
if (pos == 0x0f)
pos = 0x40;
else
pos++;
}
if (retval < 0 && signal_pending(current))
return -ERESTARTSYS;
if (copy_to_user(buf, src, len))
return -EFAULT;
*ppos += len;
return len;
}
static ssize_t cobalt_lcdfb_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos)
{
char dst[LCD_CHARS_MAX];
unsigned long pos;
int len, retval = 0;
pos = *ppos;
if (pos >= LCD_CHARS_MAX || count == 0)
return 0;
if (count > LCD_CHARS_MAX)
count = LCD_CHARS_MAX;
if (pos + count > LCD_CHARS_MAX)
count = LCD_CHARS_MAX - pos;
if (copy_from_user(dst, buf, count))
return -EFAULT;
for (len = 0; len < count; len++) {
retval = lcd_busy_wait(info);
if (retval < 0)
break;
lcd_write_control(info, LCD_TEXT_POS(pos));
retval = lcd_busy_wait(info);
if (retval < 0)
break;
lcd_write_data(info, dst[len]);
if (pos == 0x0f)
pos = 0x40;
else
pos++;
}
if (retval < 0 && signal_pending(current))
return -ERESTARTSYS;
*ppos += len;
return len;
}
static int cobalt_lcdfb_blank(int blank_mode, struct fb_info *info)
{
int retval;
retval = lcd_busy_wait(info);
if (retval < 0)
return retval;
switch (blank_mode) {
case FB_BLANK_UNBLANK:
lcd_write_control(info, LCD_ON);
break;
default:
lcd_write_control(info, LCD_OFF);
break;
}
return 0;
}
static int cobalt_lcdfb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
u32 x, y;
int retval;
switch (cursor->set) {
case FB_CUR_SETPOS:
x = cursor->image.dx;
y = cursor->image.dy;
if (x >= LCD_XRES_MAX || y >= LCD_YRES_MAX)
return -EINVAL;
retval = lcd_busy_wait(info);
if (retval < 0)
return retval;
lcd_write_control(info,
LCD_TEXT_POS(info->fix.line_length * y + x));
break;
default:
return -EINVAL;
}
retval = lcd_busy_wait(info);
if (retval < 0)
return retval;
if (cursor->enable)
lcd_write_control(info, LCD_CURSOR_ON);
else
lcd_write_control(info, LCD_CURSOR_OFF);
return 0;
}
static struct fb_ops cobalt_lcd_fbops = {
.owner = THIS_MODULE,
.fb_read = cobalt_lcdfb_read,
.fb_write = cobalt_lcdfb_write,
.fb_blank = cobalt_lcdfb_blank,
.fb_cursor = cobalt_lcdfb_cursor,
};
static int __devinit cobalt_lcdfb_probe(struct platform_device *dev)
{
struct fb_info *info;
struct resource *res;
int retval;
info = framebuffer_alloc(0, &dev->dev);
if (!info)
return -ENOMEM;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res) {
framebuffer_release(info);
return -EBUSY;
}
info->screen_size = resource_size(res);
info->screen_base = ioremap(res->start, info->screen_size);
info->fbops = &cobalt_lcd_fbops;
info->fix = cobalt_lcdfb_fix;
info->fix.smem_start = res->start;
info->fix.smem_len = info->screen_size;
info->pseudo_palette = NULL;
info->par = NULL;
info->flags = FBINFO_DEFAULT;
retval = register_framebuffer(info);
if (retval < 0) {
iounmap(info->screen_base);
framebuffer_release(info);
return retval;
}
platform_set_drvdata(dev, info);
lcd_clear(info);
printk(KERN_INFO "fb%d: Cobalt server LCD frame buffer device\n",
info->node);
return 0;
}
static int __devexit cobalt_lcdfb_remove(struct platform_device *dev)
{
struct fb_info *info;
info = platform_get_drvdata(dev);
if (info) {
iounmap(info->screen_base);
unregister_framebuffer(info);
framebuffer_release(info);
}
return 0;
}
static struct platform_driver cobalt_lcdfb_driver = {
.probe = cobalt_lcdfb_probe,
.remove = __devexit_p(cobalt_lcdfb_remove),
.driver = {
.name = "cobalt-lcd",
.owner = THIS_MODULE,
},
};
static int __init cobalt_lcdfb_init(void)
{
return platform_driver_register(&cobalt_lcdfb_driver);
}
static void __exit cobalt_lcdfb_exit(void)
{
platform_driver_unregister(&cobalt_lcdfb_driver);
}
module_init(cobalt_lcdfb_init);
module_exit(cobalt_lcdfb_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoichi Yuasa");
MODULE_DESCRIPTION("Cobalt server LCD frame buffer driver");
| gpl-2.0 |
halaszk/Perseus-halaszk-universal5433 | drivers/media/i2c/cx25840/cx25840-audio.c | 12484 | 14919 | /* cx25840 audio functions
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/videodev2.h>
#include <linux/i2c.h>
#include <media/v4l2-common.h>
#include <media/cx25840.h>
#include "cx25840-core.h"
/*
* Note: The PLL and SRC parameters are based on a reference frequency that
* would ideally be:
*
* NTSC Color subcarrier freq * 8 = 4.5 MHz/286 * 455/2 * 8 = 28.63636363... MHz
*
* However, it's not the exact reference frequency that matters, only that the
* firmware and modules that comprise the driver for a particular board all
* use the same value (close to the ideal value).
*
* Comments below will note which reference frequency is assumed for various
* parameters. They will usually be one of
*
* ref_freq = 28.636360 MHz
* or
* ref_freq = 28.636363 MHz
*/
static int cx25840_set_audclk_freq(struct i2c_client *client, u32 freq)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
if (state->aud_input != CX25840_AUDIO_SERIAL) {
switch (freq) {
case 32000:
/*
* VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
* AUX_PLL Integer = 0x06, AUX PLL Post Divider = 0x10
*/
cx25840_write4(client, 0x108, 0x1006040f);
/*
* VID_PLL Fraction (register 0x10c) = 0x2be2fe
* 28636360 * 0xf.15f17f0/4 = 108 MHz
* 432 MHz pre-postdivide
*/
/*
* AUX_PLL Fraction = 0x1bb39ee
* 28636363 * 0x6.dd9cf70/0x10 = 32000 * 384
* 196.6 MHz pre-postdivide
* FIXME < 200 MHz is out of specified valid range
* FIXME 28636363 ref_freq doesn't match VID PLL ref
*/
cx25840_write4(client, 0x110, 0x01bb39ee);
/*
* SA_MCLK_SEL = 1
* SA_MCLK_DIV = 0x10 = 384/384 * AUX_PLL post dvivider
*/
cx25840_write(client, 0x127, 0x50);
if (is_cx2583x(state))
break;
/* src3/4/6_ctl */
/* 0x1.f77f = (4 * 28636360/8 * 2/455) / 32000 */
cx25840_write4(client, 0x900, 0x0801f77f);
cx25840_write4(client, 0x904, 0x0801f77f);
cx25840_write4(client, 0x90c, 0x0801f77f);
break;
case 44100:
/*
* VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
* AUX_PLL Integer = 0x09, AUX PLL Post Divider = 0x10
*/
cx25840_write4(client, 0x108, 0x1009040f);
/*
* VID_PLL Fraction (register 0x10c) = 0x2be2fe
* 28636360 * 0xf.15f17f0/4 = 108 MHz
* 432 MHz pre-postdivide
*/
/*
* AUX_PLL Fraction = 0x0ec6bd6
* 28636363 * 0x9.7635eb0/0x10 = 44100 * 384
* 271 MHz pre-postdivide
* FIXME 28636363 ref_freq doesn't match VID PLL ref
*/
cx25840_write4(client, 0x110, 0x00ec6bd6);
/*
* SA_MCLK_SEL = 1
* SA_MCLK_DIV = 0x10 = 384/384 * AUX_PLL post dvivider
*/
cx25840_write(client, 0x127, 0x50);
if (is_cx2583x(state))
break;
/* src3/4/6_ctl */
/* 0x1.6d59 = (4 * 28636360/8 * 2/455) / 44100 */
cx25840_write4(client, 0x900, 0x08016d59);
cx25840_write4(client, 0x904, 0x08016d59);
cx25840_write4(client, 0x90c, 0x08016d59);
break;
case 48000:
/*
* VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
* AUX_PLL Integer = 0x0a, AUX PLL Post Divider = 0x10
*/
cx25840_write4(client, 0x108, 0x100a040f);
/*
* VID_PLL Fraction (register 0x10c) = 0x2be2fe
* 28636360 * 0xf.15f17f0/4 = 108 MHz
* 432 MHz pre-postdivide
*/
/*
* AUX_PLL Fraction = 0x098d6e5
* 28636363 * 0xa.4c6b728/0x10 = 48000 * 384
* 295 MHz pre-postdivide
* FIXME 28636363 ref_freq doesn't match VID PLL ref
*/
cx25840_write4(client, 0x110, 0x0098d6e5);
/*
* SA_MCLK_SEL = 1
* SA_MCLK_DIV = 0x10 = 384/384 * AUX_PLL post dvivider
*/
cx25840_write(client, 0x127, 0x50);
if (is_cx2583x(state))
break;
/* src3/4/6_ctl */
/* 0x1.4faa = (4 * 28636360/8 * 2/455) / 48000 */
cx25840_write4(client, 0x900, 0x08014faa);
cx25840_write4(client, 0x904, 0x08014faa);
cx25840_write4(client, 0x90c, 0x08014faa);
break;
}
} else {
switch (freq) {
case 32000:
/*
* VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
* AUX_PLL Integer = 0x08, AUX PLL Post Divider = 0x1e
*/
cx25840_write4(client, 0x108, 0x1e08040f);
/*
* VID_PLL Fraction (register 0x10c) = 0x2be2fe
* 28636360 * 0xf.15f17f0/4 = 108 MHz
* 432 MHz pre-postdivide
*/
/*
* AUX_PLL Fraction = 0x12a0869
* 28636363 * 0x8.9504348/0x1e = 32000 * 256
* 246 MHz pre-postdivide
* FIXME 28636363 ref_freq doesn't match VID PLL ref
*/
cx25840_write4(client, 0x110, 0x012a0869);
/*
* SA_MCLK_SEL = 1
* SA_MCLK_DIV = 0x14 = 256/384 * AUX_PLL post dvivider
*/
cx25840_write(client, 0x127, 0x54);
if (is_cx2583x(state))
break;
/* src1_ctl */
/* 0x1.0000 = 32000/32000 */
cx25840_write4(client, 0x8f8, 0x08010000);
/* src3/4/6_ctl */
/* 0x2.0000 = 2 * (32000/32000) */
cx25840_write4(client, 0x900, 0x08020000);
cx25840_write4(client, 0x904, 0x08020000);
cx25840_write4(client, 0x90c, 0x08020000);
break;
case 44100:
/*
* VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
* AUX_PLL Integer = 0x09, AUX PLL Post Divider = 0x18
*/
cx25840_write4(client, 0x108, 0x1809040f);
/*
* VID_PLL Fraction (register 0x10c) = 0x2be2fe
* 28636360 * 0xf.15f17f0/4 = 108 MHz
* 432 MHz pre-postdivide
*/
/*
* AUX_PLL Fraction = 0x0ec6bd6
* 28636363 * 0x9.7635eb0/0x18 = 44100 * 256
* 271 MHz pre-postdivide
* FIXME 28636363 ref_freq doesn't match VID PLL ref
*/
cx25840_write4(client, 0x110, 0x00ec6bd6);
/*
* SA_MCLK_SEL = 1
* SA_MCLK_DIV = 0x10 = 256/384 * AUX_PLL post dvivider
*/
cx25840_write(client, 0x127, 0x50);
if (is_cx2583x(state))
break;
/* src1_ctl */
/* 0x1.60cd = 44100/32000 */
cx25840_write4(client, 0x8f8, 0x080160cd);
/* src3/4/6_ctl */
/* 0x1.7385 = 2 * (32000/44100) */
cx25840_write4(client, 0x900, 0x08017385);
cx25840_write4(client, 0x904, 0x08017385);
cx25840_write4(client, 0x90c, 0x08017385);
break;
case 48000:
/*
* VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04
* AUX_PLL Integer = 0x0a, AUX PLL Post Divider = 0x18
*/
cx25840_write4(client, 0x108, 0x180a040f);
/*
* VID_PLL Fraction (register 0x10c) = 0x2be2fe
* 28636360 * 0xf.15f17f0/4 = 108 MHz
* 432 MHz pre-postdivide
*/
/*
* AUX_PLL Fraction = 0x098d6e5
* 28636363 * 0xa.4c6b728/0x18 = 48000 * 256
* 295 MHz pre-postdivide
* FIXME 28636363 ref_freq doesn't match VID PLL ref
*/
cx25840_write4(client, 0x110, 0x0098d6e5);
/*
* SA_MCLK_SEL = 1
* SA_MCLK_DIV = 0x10 = 256/384 * AUX_PLL post dvivider
*/
cx25840_write(client, 0x127, 0x50);
if (is_cx2583x(state))
break;
/* src1_ctl */
/* 0x1.8000 = 48000/32000 */
cx25840_write4(client, 0x8f8, 0x08018000);
/* src3/4/6_ctl */
/* 0x1.5555 = 2 * (32000/48000) */
cx25840_write4(client, 0x900, 0x08015555);
cx25840_write4(client, 0x904, 0x08015555);
cx25840_write4(client, 0x90c, 0x08015555);
break;
}
}
state->audclk_freq = freq;
return 0;
}
static inline int cx25836_set_audclk_freq(struct i2c_client *client, u32 freq)
{
return cx25840_set_audclk_freq(client, freq);
}
static int cx23885_set_audclk_freq(struct i2c_client *client, u32 freq)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
if (state->aud_input != CX25840_AUDIO_SERIAL) {
switch (freq) {
case 32000:
case 44100:
case 48000:
/* We don't have register values
* so avoid destroying registers. */
/* FIXME return -EINVAL; */
break;
}
} else {
switch (freq) {
case 32000:
case 44100:
/* We don't have register values
* so avoid destroying registers. */
/* FIXME return -EINVAL; */
break;
case 48000:
/* src1_ctl */
/* 0x1.867c = 48000 / (2 * 28636360/8 * 2/455) */
cx25840_write4(client, 0x8f8, 0x0801867c);
/* src3/4/6_ctl */
/* 0x1.4faa = (4 * 28636360/8 * 2/455) / 48000 */
cx25840_write4(client, 0x900, 0x08014faa);
cx25840_write4(client, 0x904, 0x08014faa);
cx25840_write4(client, 0x90c, 0x08014faa);
break;
}
}
state->audclk_freq = freq;
return 0;
}
static int cx231xx_set_audclk_freq(struct i2c_client *client, u32 freq)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
if (state->aud_input != CX25840_AUDIO_SERIAL) {
switch (freq) {
case 32000:
/* src3/4/6_ctl */
/* 0x1.f77f = (4 * 28636360/8 * 2/455) / 32000 */
cx25840_write4(client, 0x900, 0x0801f77f);
cx25840_write4(client, 0x904, 0x0801f77f);
cx25840_write4(client, 0x90c, 0x0801f77f);
break;
case 44100:
/* src3/4/6_ctl */
/* 0x1.6d59 = (4 * 28636360/8 * 2/455) / 44100 */
cx25840_write4(client, 0x900, 0x08016d59);
cx25840_write4(client, 0x904, 0x08016d59);
cx25840_write4(client, 0x90c, 0x08016d59);
break;
case 48000:
/* src3/4/6_ctl */
/* 0x1.4faa = (4 * 28636360/8 * 2/455) / 48000 */
cx25840_write4(client, 0x900, 0x08014faa);
cx25840_write4(client, 0x904, 0x08014faa);
cx25840_write4(client, 0x90c, 0x08014faa);
break;
}
} else {
switch (freq) {
/* FIXME These cases make different assumptions about audclk */
case 32000:
/* src1_ctl */
/* 0x1.0000 = 32000/32000 */
cx25840_write4(client, 0x8f8, 0x08010000);
/* src3/4/6_ctl */
/* 0x2.0000 = 2 * (32000/32000) */
cx25840_write4(client, 0x900, 0x08020000);
cx25840_write4(client, 0x904, 0x08020000);
cx25840_write4(client, 0x90c, 0x08020000);
break;
case 44100:
/* src1_ctl */
/* 0x1.60cd = 44100/32000 */
cx25840_write4(client, 0x8f8, 0x080160cd);
/* src3/4/6_ctl */
/* 0x1.7385 = 2 * (32000/44100) */
cx25840_write4(client, 0x900, 0x08017385);
cx25840_write4(client, 0x904, 0x08017385);
cx25840_write4(client, 0x90c, 0x08017385);
break;
case 48000:
/* src1_ctl */
/* 0x1.867c = 48000 / (2 * 28636360/8 * 2/455) */
cx25840_write4(client, 0x8f8, 0x0801867c);
/* src3/4/6_ctl */
/* 0x1.4faa = (4 * 28636360/8 * 2/455) / 48000 */
cx25840_write4(client, 0x900, 0x08014faa);
cx25840_write4(client, 0x904, 0x08014faa);
cx25840_write4(client, 0x90c, 0x08014faa);
break;
}
}
state->audclk_freq = freq;
return 0;
}
static int set_audclk_freq(struct i2c_client *client, u32 freq)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
if (freq != 32000 && freq != 44100 && freq != 48000)
return -EINVAL;
if (is_cx231xx(state))
return cx231xx_set_audclk_freq(client, freq);
if (is_cx2388x(state))
return cx23885_set_audclk_freq(client, freq);
if (is_cx2583x(state))
return cx25836_set_audclk_freq(client, freq);
return cx25840_set_audclk_freq(client, freq);
}
void cx25840_audio_set_path(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
if (!is_cx2583x(state)) {
/* assert soft reset */
cx25840_and_or(client, 0x810, ~0x1, 0x01);
/* stop microcontroller */
cx25840_and_or(client, 0x803, ~0x10, 0);
/* Mute everything to prevent the PFFT! */
cx25840_write(client, 0x8d3, 0x1f);
if (state->aud_input == CX25840_AUDIO_SERIAL) {
/* Set Path1 to Serial Audio Input */
cx25840_write4(client, 0x8d0, 0x01011012);
/* The microcontroller should not be started for the
* non-tuner inputs: autodetection is specific for
* TV audio. */
} else {
/* Set Path1 to Analog Demod Main Channel */
cx25840_write4(client, 0x8d0, 0x1f063870);
}
}
set_audclk_freq(client, state->audclk_freq);
if (!is_cx2583x(state)) {
if (state->aud_input != CX25840_AUDIO_SERIAL) {
/* When the microcontroller detects the
* audio format, it will unmute the lines */
cx25840_and_or(client, 0x803, ~0x10, 0x10);
}
/* deassert soft reset */
cx25840_and_or(client, 0x810, ~0x1, 0x00);
/* Ensure the controller is running when we exit */
if (is_cx2388x(state) || is_cx231xx(state))
cx25840_and_or(client, 0x803, ~0x10, 0x10);
}
}
static void set_volume(struct i2c_client *client, int volume)
{
int vol;
/* Convert the volume to msp3400 values (0-127) */
vol = volume >> 9;
/* now scale it up to cx25840 values
* -114dB to -96dB maps to 0
* this should be 19, but in my testing that was 4dB too loud */
if (vol <= 23) {
vol = 0;
} else {
vol -= 23;
}
/* PATH1_VOLUME */
cx25840_write(client, 0x8d4, 228 - (vol * 2));
}
static void set_balance(struct i2c_client *client, int balance)
{
int bal = balance >> 8;
if (bal > 0x80) {
/* PATH1_BAL_LEFT */
cx25840_and_or(client, 0x8d5, 0x7f, 0x80);
/* PATH1_BAL_LEVEL */
cx25840_and_or(client, 0x8d5, ~0x7f, bal & 0x7f);
} else {
/* PATH1_BAL_LEFT */
cx25840_and_or(client, 0x8d5, 0x7f, 0x00);
/* PATH1_BAL_LEVEL */
cx25840_and_or(client, 0x8d5, ~0x7f, 0x80 - bal);
}
}
int cx25840_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct cx25840_state *state = to_state(sd);
int retval;
if (!is_cx2583x(state))
cx25840_and_or(client, 0x810, ~0x1, 1);
if (state->aud_input != CX25840_AUDIO_SERIAL) {
cx25840_and_or(client, 0x803, ~0x10, 0);
cx25840_write(client, 0x8d3, 0x1f);
}
retval = set_audclk_freq(client, freq);
if (state->aud_input != CX25840_AUDIO_SERIAL)
cx25840_and_or(client, 0x803, ~0x10, 0x10);
if (!is_cx2583x(state))
cx25840_and_or(client, 0x810, ~0x1, 0);
return retval;
}
static int cx25840_audio_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
switch (ctrl->id) {
case V4L2_CID_AUDIO_VOLUME:
if (state->mute->val)
set_volume(client, 0);
else
set_volume(client, state->volume->val);
break;
case V4L2_CID_AUDIO_BASS:
/* PATH1_EQ_BASS_VOL */
cx25840_and_or(client, 0x8d9, ~0x3f,
48 - (ctrl->val * 48 / 0xffff));
break;
case V4L2_CID_AUDIO_TREBLE:
/* PATH1_EQ_TREBLE_VOL */
cx25840_and_or(client, 0x8db, ~0x3f,
48 - (ctrl->val * 48 / 0xffff));
break;
case V4L2_CID_AUDIO_BALANCE:
set_balance(client, ctrl->val);
break;
default:
return -EINVAL;
}
return 0;
}
const struct v4l2_ctrl_ops cx25840_audio_ctrl_ops = {
.s_ctrl = cx25840_audio_s_ctrl,
};
| gpl-2.0 |
janztec/empc-arpi-linux | arch/ia64/kernel/unwind_decoder.c | 14020 | 12293 | /*
* Copyright (C) 2000 Hewlett-Packard Co
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
*
* Generic IA-64 unwind info decoder.
*
* This file is used both by the Linux kernel and objdump. Please keep
* the two copies of this file in sync.
*
* You need to customize the decoder by defining the following
* macros/constants before including this file:
*
* Types:
* unw_word Unsigned integer type with at least 64 bits
*
* Register names:
* UNW_REG_BSP
* UNW_REG_BSPSTORE
* UNW_REG_FPSR
* UNW_REG_LC
* UNW_REG_PFS
* UNW_REG_PR
* UNW_REG_RNAT
* UNW_REG_PSP
* UNW_REG_RP
* UNW_REG_UNAT
*
* Decoder action macros:
* UNW_DEC_BAD_CODE(code)
* UNW_DEC_ABI(fmt,abi,context,arg)
* UNW_DEC_BR_GR(fmt,brmask,gr,arg)
* UNW_DEC_BR_MEM(fmt,brmask,arg)
* UNW_DEC_COPY_STATE(fmt,label,arg)
* UNW_DEC_EPILOGUE(fmt,t,ecount,arg)
* UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg)
* UNW_DEC_FR_MEM(fmt,frmask,arg)
* UNW_DEC_GR_GR(fmt,grmask,gr,arg)
* UNW_DEC_GR_MEM(fmt,grmask,arg)
* UNW_DEC_LABEL_STATE(fmt,label,arg)
* UNW_DEC_MEM_STACK_F(fmt,t,size,arg)
* UNW_DEC_MEM_STACK_V(fmt,t,arg)
* UNW_DEC_PRIUNAT_GR(fmt,r,arg)
* UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)
* UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)
* UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg)
* UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg)
* UNW_DEC_PROLOGUE(fmt,body,rlen,arg)
* UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg)
* UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg)
* UNW_DEC_REG_REG(fmt,src,dst,arg)
* UNW_DEC_REG_SPREL(fmt,reg,spoff,arg)
* UNW_DEC_REG_WHEN(fmt,reg,t,arg)
* UNW_DEC_RESTORE(fmt,t,abreg,arg)
* UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg)
* UNW_DEC_SPILL_BASE(fmt,pspoff,arg)
* UNW_DEC_SPILL_MASK(fmt,imaskp,arg)
* UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg)
* UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg)
* UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg)
* UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg)
* UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg)
* UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg)
*/
static unw_word
unw_decode_uleb128 (unsigned char **dpp)
{
unsigned shift = 0;
unw_word byte, result = 0;
unsigned char *bp = *dpp;
while (1)
{
byte = *bp++;
result |= (byte & 0x7f) << shift;
if ((byte & 0x80) == 0)
break;
shift += 7;
}
*dpp = bp;
return result;
}
static unsigned char *
unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, abreg;
unw_word t, off;
byte1 = *dp++;
t = unw_decode_uleb128 (&dp);
off = unw_decode_uleb128 (&dp);
abreg = (byte1 & 0x7f);
if (byte1 & 0x80)
UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg);
else
UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg);
return dp;
}
static unsigned char *
unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, byte2, abreg, x, ytreg;
unw_word t;
byte1 = *dp++; byte2 = *dp++;
t = unw_decode_uleb128 (&dp);
abreg = (byte1 & 0x7f);
ytreg = byte2;
x = (byte1 >> 7) & 1;
if ((byte1 & 0x80) == 0 && ytreg == 0)
UNW_DEC_RESTORE(X2, t, abreg, arg);
else
UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg);
return dp;
}
static unsigned char *
unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, byte2, abreg, qp;
unw_word t, off;
byte1 = *dp++; byte2 = *dp++;
t = unw_decode_uleb128 (&dp);
off = unw_decode_uleb128 (&dp);
qp = (byte1 & 0x3f);
abreg = (byte2 & 0x7f);
if (byte1 & 0x80)
UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg);
else
UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg);
return dp;
}
static unsigned char *
unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg;
unw_word t;
byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
t = unw_decode_uleb128 (&dp);
qp = (byte1 & 0x3f);
abreg = (byte2 & 0x7f);
x = (byte2 >> 7) & 1;
ytreg = byte3;
if ((byte2 & 0x80) == 0 && byte3 == 0)
UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg);
else
UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg);
return dp;
}
static unsigned char *
unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg)
{
int body = (code & 0x20) != 0;
unw_word rlen;
rlen = (code & 0x1f);
UNW_DEC_PROLOGUE(R1, body, rlen, arg);
return dp;
}
static unsigned char *
unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char byte1, mask, grsave;
unw_word rlen;
byte1 = *dp++;
mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
grsave = (byte1 & 0x7f);
rlen = unw_decode_uleb128 (&dp);
UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg);
return dp;
}
static unsigned char *
unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg)
{
unw_word rlen;
rlen = unw_decode_uleb128 (&dp);
UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg);
return dp;
}
static unsigned char *
unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char brmask = (code & 0x1f);
UNW_DEC_BR_MEM(P1, brmask, arg);
return dp;
}
static unsigned char *
unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg)
{
if ((code & 0x10) == 0)
{
unsigned char byte1 = *dp++;
UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1),
(byte1 & 0x7f), arg);
}
else if ((code & 0x08) == 0)
{
unsigned char byte1 = *dp++, r, dst;
r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1);
dst = (byte1 & 0x7f);
switch (r)
{
case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break;
case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break;
case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break;
case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break;
case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break;
case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break;
case 6: UNW_DEC_RP_BR(P3, dst, arg); break;
case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break;
case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break;
case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break;
case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break;
case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break;
default: UNW_DEC_BAD_CODE(r); break;
}
}
else if ((code & 0x7) == 0)
UNW_DEC_SPILL_MASK(P4, dp, arg);
else if ((code & 0x7) == 1)
{
unw_word grmask, frmask, byte1, byte2, byte3;
byte1 = *dp++; byte2 = *dp++; byte3 = *dp++;
grmask = ((byte1 >> 4) & 0xf);
frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3;
UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg);
}
else
UNW_DEC_BAD_CODE(code);
return dp;
}
static unsigned char *
unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg)
{
int gregs = (code & 0x10) != 0;
unsigned char mask = (code & 0x0f);
if (gregs)
UNW_DEC_GR_MEM(P6, mask, arg);
else
UNW_DEC_FR_MEM(P6, mask, arg);
return dp;
}
static unsigned char *
unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg)
{
unsigned char r, byte1, byte2;
unw_word t, size;
if ((code & 0x10) == 0)
{
r = (code & 0xf);
t = unw_decode_uleb128 (&dp);
switch (r)
{
case 0:
size = unw_decode_uleb128 (&dp);
UNW_DEC_MEM_STACK_F(P7, t, size, arg);
break;
case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break;
case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break;
case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break;
case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break;
case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break;
case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break;
case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break;
case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break;
case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break;
case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break;
case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break;
case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break;
case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break;
case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break;
case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break;
default: UNW_DEC_BAD_CODE(r); break;
}
}
else
{
switch (code & 0xf)
{
case 0x0: /* p8 */
{
r = *dp++;
t = unw_decode_uleb128 (&dp);
switch (r)
{
case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break;
case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break;
case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break;
case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break;
case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break;
case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break;
case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break;
case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break;
case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break;
case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break;
case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break;
case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break;
case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break;
case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break;
case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break;
case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break;
case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break;
case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break;
default: UNW_DEC_BAD_CODE(r); break;
}
}
break;
case 0x1:
byte1 = *dp++; byte2 = *dp++;
UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg);
break;
case 0xf: /* p10 */
byte1 = *dp++; byte2 = *dp++;
UNW_DEC_ABI(P10, byte1, byte2, arg);
break;
case 0x9:
return unw_decode_x1 (dp, code, arg);
case 0xa:
return unw_decode_x2 (dp, code, arg);
case 0xb:
return unw_decode_x3 (dp, code, arg);
case 0xc:
return unw_decode_x4 (dp, code, arg);
default:
UNW_DEC_BAD_CODE(code);
break;
}
}
return dp;
}
static unsigned char *
unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg)
{
unw_word label = (code & 0x1f);
if ((code & 0x20) != 0)
UNW_DEC_COPY_STATE(B1, label, arg);
else
UNW_DEC_LABEL_STATE(B1, label, arg);
return dp;
}
static unsigned char *
unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg)
{
unw_word t;
t = unw_decode_uleb128 (&dp);
UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg);
return dp;
}
static unsigned char *
unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg)
{
unw_word t, ecount, label;
if ((code & 0x10) == 0)
{
t = unw_decode_uleb128 (&dp);
ecount = unw_decode_uleb128 (&dp);
UNW_DEC_EPILOGUE(B3, t, ecount, arg);
}
else if ((code & 0x07) == 0)
{
label = unw_decode_uleb128 (&dp);
if ((code & 0x08) != 0)
UNW_DEC_COPY_STATE(B4, label, arg);
else
UNW_DEC_LABEL_STATE(B4, label, arg);
}
else
switch (code & 0x7)
{
case 1: return unw_decode_x1 (dp, code, arg);
case 2: return unw_decode_x2 (dp, code, arg);
case 3: return unw_decode_x3 (dp, code, arg);
case 4: return unw_decode_x4 (dp, code, arg);
default: UNW_DEC_BAD_CODE(code); break;
}
return dp;
}
typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *);
static unw_decoder unw_decode_table[2][8] =
{
/* prologue table: */
{
unw_decode_r1, /* 0 */
unw_decode_r1,
unw_decode_r2,
unw_decode_r3,
unw_decode_p1, /* 4 */
unw_decode_p2_p5,
unw_decode_p6,
unw_decode_p7_p10
},
{
unw_decode_r1, /* 0 */
unw_decode_r1,
unw_decode_r2,
unw_decode_r3,
unw_decode_b1, /* 4 */
unw_decode_b1,
unw_decode_b2,
unw_decode_b3_x4
}
};
/*
* Decode one descriptor and return address of next descriptor.
*/
static inline unsigned char *
unw_decode (unsigned char *dp, int inside_body, void *arg)
{
unw_decoder decoder;
unsigned char code;
code = *dp++;
decoder = unw_decode_table[inside_body][code >> 5];
dp = (*decoder) (dp, code, arg);
return dp;
}
| gpl-2.0 |
TeamExodus/kernel_samsung_smdk4412 | arch/arm/mach-exynos/p10-battery.c | 197 | 13715 | /*
* Copyright (C) 2012 Samsung Electronics, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/i2c-gpio.h>
#include <linux/gpio.h>
#include <mach/gpio-p10.h>
#include <mach/regs-pmu.h> /* S5P_INFORMX */
#include <plat/gpio-cfg.h>
#ifdef CONFIG_STMPE811_ADC
#include <linux/stmpe811-adc.h>
#endif
#if defined(CONFIG_BATTERY_SAMSUNG_P1X)
#include <linux/battery/sec_battery.h>
#include <linux/battery/sec_fuelgauge.h>
#include <linux/battery/sec_charger.h>
#define SEC_BATTERY_PMIC_NAME ""
#define SEC_FUELGAUGE_I2C_ID 9
#define SEC_CHARGER_I2C_ID 10
static bool sec_bat_adc_none_init(struct platform_device *pdev) { return true; }
static bool sec_bat_adc_none_exit(void) { return true; }
static int sec_bat_adc_none_read(unsigned int channel) { return 0; }
static bool sec_bat_adc_ap_init(struct platform_device *pdev) { return true; }
static bool sec_bat_adc_ap_exit(void) { return true; }
static int sec_bat_adc_ap_read(unsigned int channel) { return 0; }
/* CHECK ME */
#define SMTPE811_CHANNEL_ADC_CHECK_1 6
#define SMTPE811_CHANNEL_VICHG 4 /* Not supported in P10 */
static bool sec_bat_adc_ic_init(struct platform_device *pdev) { return true; }
static bool sec_bat_adc_ic_exit(void) { return true; }
static int sec_bat_adc_ic_read(unsigned int channel)
{
int data = 0;
int max_voltage = 3300;
switch (channel) {
case SEC_BAT_ADC_CHANNEL_CABLE_CHECK:
data = stmpe811_get_adc_data(SMTPE811_CHANNEL_ADC_CHECK_1);
data = data * max_voltage / 4095; /* 4096 ? */
break;
}
return data;
}
static bool sec_bat_gpio_init(void)
{
#if defined(CONFIG_MACH_P10_LTE_00_BD) || defined(CONFIG_MACH_P10_WIFI_00_BD)
s3c_gpio_cfgpin(GPIO_TA_nCONNECTED, S3C_GPIO_INPUT);
s3c_gpio_setpull(GPIO_TA_nCONNECTED, S3C_GPIO_PULL_NONE);
#else
/* IRQ to detect cable insertion and removal */
s3c_gpio_cfgpin(GPIO_TA_INT, S3C_GPIO_INPUT);
s3c_gpio_setpull(GPIO_TA_INT, S3C_GPIO_PULL_NONE);
#endif
return true;
}
static bool sec_fg_gpio_init(void)
{
/* IRQ to detect low battery from fuel gauge */
s3c_gpio_cfgpin(GPIO_FUEL_ALERT, S3C_GPIO_INPUT);
s3c_gpio_setpull(GPIO_FUEL_ALERT, S3C_GPIO_PULL_UP);
return true;
}
static bool sec_chg_gpio_init(void)
{
s3c_gpio_cfgpin(GPIO_TA_EN, S3C_GPIO_OUTPUT);
s3c_gpio_setpull(GPIO_TA_EN, S3C_GPIO_PULL_UP);
/* gpio_set_value(GPIO_TA_EN, 1); */
s3c_gpio_cfgpin(GPIO_TA_nCHG, S3C_GPIO_INPUT);
s3c_gpio_setpull(GPIO_TA_nCHG, S3C_GPIO_PULL_UP);
#if defined(CONFIG_MACH_P10_LTE_00_BD) || defined(CONFIG_MACH_P10_WIFI_00_BD)
/* GPIO_CHG_INT not supported */
#else
/* IRQ to detect charger status change */
s3c_gpio_cfgpin(GPIO_CHG_INT, S3C_GPIO_INPUT);
s3c_gpio_setpull(GPIO_CHG_INT, S3C_GPIO_PULL_UP);
#endif
return true;
}
static bool sec_bat_is_lpm(void)
{
u32 val = __raw_readl(S5P_INFORM2);
pr_info("%s: LP charging: (INFORM2) 0x%x\n", __func__, val);
if (val == 0x1)
return true;
return false;
}
static void sec_bat_initial_check(void)
{
struct power_supply *psy = power_supply_get_by_name("battery");
union power_supply_propval value;
int ret = 0;
value.intval = gpio_get_value(GPIO_TA_nCONNECTED);
pr_debug("%s: %d\n", __func__, value.intval);
ret = psy->set_property(psy, POWER_SUPPLY_PROP_ONLINE, &value);
if (ret) {
pr_err("%s: fail to set power_suppy ONLINE property(%d)\n",
__func__, ret);
}
}
static bool sec_bat_check_jig_status(void)
{
/* TODO: */
return false;
}
static void sec_bat_switch_to_check(void)
{
pr_debug("%s\n", __func__);
s3c_gpio_cfgpin(GPIO_USB_SEL1, S3C_GPIO_OUTPUT);
s3c_gpio_setpull(GPIO_USB_SEL1, S3C_GPIO_PULL_NONE);
gpio_set_value(GPIO_USB_SEL1, 0);
mdelay(300);
}
static void sec_bat_switch_to_normal(void)
{
pr_debug("%s\n", __func__);
s3c_gpio_cfgpin(GPIO_USB_SEL1, S3C_GPIO_OUTPUT);
s3c_gpio_setpull(GPIO_USB_SEL1, S3C_GPIO_PULL_NONE);
gpio_set_value(GPIO_USB_SEL1, 1);
}
static int current_cable_type = POWER_SUPPLY_TYPE_BATTERY;
static int sec_bat_check_cable_callback(void)
{
return current_cable_type;
}
static bool sec_bat_check_cable_result_callback(
int cable_type)
{
current_cable_type = cable_type;
switch (cable_type) {
case POWER_SUPPLY_TYPE_USB:
pr_info("%s set vbus applied\n",
__func__);
break;
case POWER_SUPPLY_TYPE_BATTERY:
pr_info("%s set vbus cut\n",
__func__);
break;
case POWER_SUPPLY_TYPE_MAINS:
default:
pr_err("%s cable type (%d)\n",
__func__, cable_type);
return false;
}
return true;
}
/* callback for battery check
* return : bool
* true - battery detected, false battery NOT detected
*/
static bool sec_bat_check_callback(void) { return true; }
static bool sec_bat_check_result_callback(void) { return true; }
/* callback for OVP/UVLO check
* return : int
* battery health
*/
static int sec_bat_ovp_uvlo_callback(void)
{
int health;
health = POWER_SUPPLY_HEALTH_GOOD;
return health;
}
static bool sec_bat_ovp_uvlo_result_callback(int health) { return true; }
/*
* val.intval : temperature
*/
static bool sec_bat_get_temperature_callback(
enum power_supply_property psp,
union power_supply_propval *val) { return true; }
static bool sec_fg_fuelalert_process(bool is_fuel_alerted) { return true; }
/* ADC region should be exclusive */
static sec_bat_adc_region_t cable_adc_value_table[] = {
{ 0, 500 }, /* POWER_SUPPLY_TYPE_BATTERY */
{ 0, 0 }, /* POWER_SUPPLY_TYPE_UPS */
{ 1000, 1500 }, /* POWER_SUPPLY_TYPE_MAINS */
{ 0, 0 }, /* POWER_SUPPLY_TYPE_USB */
{ 0, 0 }, /* POWER_SUPPLY_TYPE_OTG */
{ 0, 0 }, /* POWER_SUPPLY_TYPE_DOCK */
{ 0, 0 }, /* POWER_SUPPLY_TYPE_MISC */
};
/* charging current (mA, 0 - NOT supported) */
/* matching with power_supply_type in power_supply.h */
static sec_charging_current_t charging_current_table[] = {
{0, 0, 0, 0}, /* POWER_SUPPLY_TYPE_BATTERY */
{0, 0, 0, 0}, /* POWER_SUPPLY_TYPE_UPS */
{2000, 2000, 256, 0}, /* POWER_SUPPLY_TYPE_MAINS */
{500, 500, 256, 0}, /* POWER_SUPPLY_TYPE_USB */
{500, 500, 256, 0}, /* POWER_SUPPLY_TYPE_USB_DCP */
{500, 500, 256, 0}, /* POWER_SUPPLY_TYPE_USB_CDP */
{500, 500, 256, 0}, /* POWER_SUPPLY_TYPE_USB_ACA */
{0, 0, 0, 0}, /* POWER_SUPPLY_TYPE_OTG */
{0, 0, 0, 0}, /* POWER_SUPPLY_TYPE_DOCK */
{500, 500, 256, 0}, /* POWER_SUPPLY_TYPE_MISC */
{0, 0, 0, 0}, /* POWER_SUPPLY_TYPE_WIRELESS */
};
/* unit: seconds */
static int polling_time_table[] = {
10, /* BASIC */
30, /* CHARGING */
30, /* DISCHARGING */
30, /* NOT_CHARGING */
300, /* SLEEP */
};
/* for MAX17050, MAX17047 */
static struct battery_data_t p10_battery_data[] = {
/* SDI battery data */
{
.Capacity = 0x2008,
.low_battery_comp_voltage = 3600,
.low_battery_table = {
/* range, slope, offset */
{-5000, 0, 0}, /* dummy for top limit */
{-1250, 0, 3320},
{-750, 97, 3451},
{-100, 96, 3461},
{0, 0, 3456},
},
.temp_adjust_table = {
/* range, slope, offset */
{47000, 122, 8950},
{60000, 200, 51000},
{100000, 0, 0}, /* dummy for top limit */
},
.type_str = "SDI",
}
};
static sec_battery_platform_data_t sec_battery_pdata = {
/* NO NEED TO BE CHANGED */
.initial_check = sec_bat_initial_check,
.bat_gpio_init = sec_bat_gpio_init,
.fg_gpio_init = sec_fg_gpio_init,
.chg_gpio_init = sec_chg_gpio_init,
.is_lpm = sec_bat_is_lpm,
.check_jig_status = sec_bat_check_jig_status,
.check_cable_callback =
sec_bat_check_cable_callback,
.cable_switch_check = sec_bat_switch_to_check,
.cable_switch_normal = sec_bat_switch_to_normal,
.check_cable_result_callback =
sec_bat_check_cable_result_callback,
.check_battery_callback =
sec_bat_check_callback,
.check_battery_result_callback =
sec_bat_check_result_callback,
.ovp_uvlo_callback = sec_bat_ovp_uvlo_callback,
.ovp_uvlo_result_callback =
sec_bat_ovp_uvlo_result_callback,
.fuelalert_process = sec_fg_fuelalert_process,
.get_temperature_callback =
sec_bat_get_temperature_callback,
.adc_api[SEC_BATTERY_ADC_TYPE_NONE] = {
.init = sec_bat_adc_none_init,
.exit = sec_bat_adc_none_exit,
.read = sec_bat_adc_none_read
},
.adc_api[SEC_BATTERY_ADC_TYPE_AP] = {
.init = sec_bat_adc_ap_init,
.exit = sec_bat_adc_ap_exit,
.read = sec_bat_adc_ap_read
},
.adc_api[SEC_BATTERY_ADC_TYPE_IC] = {
.init = sec_bat_adc_ic_init,
.exit = sec_bat_adc_ic_exit,
.read = sec_bat_adc_ic_read
},
.cable_adc_value = cable_adc_value_table,
.charging_current = charging_current_table,
.polling_time = polling_time_table,
/* NO NEED TO BE CHANGED */
.pmic_name = SEC_BATTERY_PMIC_NAME,
.adc_check_count = 7,
.adc_type = {
SEC_BATTERY_ADC_TYPE_IC, /* CABLE_CHECK */
SEC_BATTERY_ADC_TYPE_NONE, /* BAT_CHECK */
SEC_BATTERY_ADC_TYPE_NONE, /* TEMP */
SEC_BATTERY_ADC_TYPE_NONE, /* TEMP_AMB */
SEC_BATTERY_ADC_TYPE_NONE, /* FULL_CHECK */
},
/* Battery */
.vendor = "SDI SDI",
.technology = POWER_SUPPLY_TECHNOLOGY_LION,
.battery_data = (void *)p10_battery_data,
.bat_gpio_ta_nconnected = GPIO_TA_nCONNECTED,
.bat_polarity_ta_nconnected = 1, /* active HIGH */
.bat_irq = IRQ_EINT(0), /* GPIO_TA_INT */
.bat_irq_attr =
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
.cable_check_type =
SEC_BATTERY_CABLE_CHECK_NOUSBCHARGE |
SEC_BATTERY_CABLE_CHECK_INT,
.cable_source_type = SEC_BATTERY_CABLE_SOURCE_ADC,
.event_check = false,
.event_waiting_time = 60,
/* Monitor setting */
.polling_type = SEC_BATTERY_MONITOR_ALARM,
.monitor_initial_count = 3,
/* Battery check */
.battery_check_type = SEC_BATTERY_CHECK_NONE,
.check_count = 3,
/* Battery check by ADC */
.check_adc_max = 0,
.check_adc_min = 0,
/* OVP/UVLO check */
.ovp_uvlo_check_type = SEC_BATTERY_OVP_UVLO_CHGINT,
/* Temperature check */
.thermal_source = SEC_BATTERY_THERMAL_SOURCE_FG,
.temp_check_type = SEC_BATTERY_TEMP_CHECK_TEMP,
.temp_check_count = 3,
.temp_high_threshold_event = 650,
.temp_high_recovery_event = 450,
.temp_low_threshold_event = 0,
.temp_low_recovery_event = -50,
.temp_high_threshold_normal = 470,
.temp_high_recovery_normal = 400,
.temp_low_threshold_normal = 0,
.temp_low_recovery_normal = -30,
.temp_high_threshold_lpm = 600,
.temp_high_recovery_lpm = 420,
.temp_low_threshold_lpm = 2,
.temp_low_recovery_lpm = -30,
.full_check_type = SEC_BATTERY_FULLCHARGED_CHGGPIO,
.full_check_count = 3,
.full_check_adc_1st = 26500, /* CHECK ME */
.full_check_adc_2nd = 25800, /* CHECK ME */
.chg_gpio_full_check = GPIO_TA_nCHG, /* STAT of bq24191 */
.chg_polarity_full_check = 1,
.full_condition_type =
SEC_BATTERY_FULL_CONDITION_SOC |
SEC_BATTERY_FULL_CONDITION_OCV,
.full_condition_soc = 99,
.full_condition_ocv = 4170,
.recharge_condition_type =
SEC_BATTERY_RECHARGE_CONDITION_SOC |
SEC_BATTERY_RECHARGE_CONDITION_VCELL,
.recharge_condition_soc = 98,
.recharge_condition_avgvcell = 4150,
.recharge_condition_vcell = 4150,
.charging_total_time = 6 * 60 * 60,
.recharging_total_time = 90 * 60,
.charging_reset_time = 10 * 60,
/* Fuel Gauge */
.fg_irq = IRQ_EINT(19), /* GPIO_FUEL_ALERT */
.fg_irq_attr = IRQF_TRIGGER_LOW | IRQF_ONESHOT,
.fuel_alert_soc = 1,
.repeated_fuelalert = false,
.capacity_calculation_type =
SEC_FUELGAUGE_CAPACITY_TYPE_RAW,
/* SEC_FUELGAUGE_CAPACITY_TYPE_SCALE | */
/* SEC_FUELGAUGE_CAPACITY_TYPE_ATOMIC, */
.capacity_max = 1000,
.capacity_min = 0,
/* Charger */
.chg_gpio_en = GPIO_TA_EN,
.chg_polarity_en = 0, /* active LOW charge enable */
.chg_gpio_status = GPIO_TA_nCHG,
.chg_polarity_status = 0,
#if defined(CONFIG_MACH_P10_LTE_00_BD) || defined(CONFIG_MACH_P10_WIFI_00_BD)
.chg_irq = 0,
.chg_irq_attr = 0,
#else
.chg_irq = IRQ_EINT(4), /* GPIO_CHG_INT */
.chg_irq_attr = IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
#endif
.chg_float_voltage = 4200,
};
static struct platform_device sec_device_battery = {
.name = "sec-battery",
.id = -1,
.dev.platform_data = &sec_battery_pdata,
};
static struct i2c_gpio_platform_data gpio_i2c_data_fuelgauge = {
.sda_pin = GPIO_FUEL_SDA_18V,
.scl_pin = GPIO_FUEL_SCL_18V,
};
struct platform_device sec_device_fuelgauge = {
.name = "i2c-gpio",
.id = SEC_FUELGAUGE_I2C_ID,
.dev.platform_data = &gpio_i2c_data_fuelgauge,
};
static struct i2c_board_info sec_brdinfo_fuelgauge[] __initdata = {
{
I2C_BOARD_INFO("sec-fuelgauge",
SEC_FUELGAUGE_I2C_SLAVEADDR),
.platform_data = &sec_battery_pdata,
},
};
static struct i2c_gpio_platform_data gpio_i2c_data_charger = {
.sda_pin = GPIO_CHG_SDA_18V,
.scl_pin = GPIO_CHG_SCL_18V,
};
struct platform_device sec_device_charger = {
.name = "i2c-gpio",
.id = SEC_CHARGER_I2C_ID,
.dev.platform_data = &gpio_i2c_data_charger,
};
static struct i2c_board_info sec_brdinfo_charger[] __initdata = {
{
I2C_BOARD_INFO("sec-charger",
SEC_CHARGER_I2C_SLAVEADDR),
.platform_data = &sec_battery_pdata,
},
};
static struct platform_device *sec_battery_devices[] __initdata = {
&sec_device_charger,
&sec_device_fuelgauge,
&sec_device_battery,
};
void __init p10_battery_init(void)
{
platform_add_devices(
sec_battery_devices,
ARRAY_SIZE(sec_battery_devices));
i2c_register_board_info(
SEC_CHARGER_I2C_ID,
sec_brdinfo_charger,
ARRAY_SIZE(sec_brdinfo_charger));
i2c_register_board_info(
SEC_FUELGAUGE_I2C_ID,
sec_brdinfo_fuelgauge,
ARRAY_SIZE(sec_brdinfo_fuelgauge));
}
#endif /* CONFIG_BATTERY_SAMSUNG_P1X */
| gpl-2.0 |
NeverLEX/linux | drivers/media/dvb-frontends/rtl2832_sdr.c | 197 | 44199 | /*
* Realtek RTL2832U SDR driver
*
* Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* GNU Radio plugin "gr-kernel" for device usage will be on:
* http://git.linuxtv.org/anttip/gr-kernel.git
*
*/
#include "rtl2832_sdr.h"
#include "dvb_usb.h"
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/videobuf2-vmalloc.h>
#include <linux/platform_device.h>
#include <linux/jiffies.h>
#include <linux/math64.h>
static bool rtl2832_sdr_emulated_fmt;
module_param_named(emulated_formats, rtl2832_sdr_emulated_fmt, bool, 0644);
MODULE_PARM_DESC(emulated_formats, "enable emulated formats (disappears in future)");
/* Original macro does not contain enough null pointer checks for our need */
#define V4L2_SUBDEV_HAS_OP(sd, o, f) \
((sd) && (sd)->ops && (sd)->ops->o && (sd)->ops->o->f)
#define MAX_BULK_BUFS (10)
#define BULK_BUFFER_SIZE (128 * 512)
static const struct v4l2_frequency_band bands_adc[] = {
{
.tuner = 0,
.type = V4L2_TUNER_ADC,
.index = 0,
.capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
.rangelow = 300000,
.rangehigh = 300000,
},
{
.tuner = 0,
.type = V4L2_TUNER_ADC,
.index = 1,
.capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
.rangelow = 900001,
.rangehigh = 2800000,
},
{
.tuner = 0,
.type = V4L2_TUNER_ADC,
.index = 2,
.capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
.rangelow = 3200000,
.rangehigh = 3200000,
},
};
static const struct v4l2_frequency_band bands_fm[] = {
{
.tuner = 1,
.type = V4L2_TUNER_RF,
.index = 0,
.capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS,
.rangelow = 50000000,
.rangehigh = 2000000000,
},
};
/* stream formats */
struct rtl2832_sdr_format {
char *name;
u32 pixelformat;
u32 buffersize;
};
static struct rtl2832_sdr_format formats[] = {
{
.name = "Complex U8",
.pixelformat = V4L2_SDR_FMT_CU8,
.buffersize = BULK_BUFFER_SIZE,
}, {
.name = "Complex U16LE (emulated)",
.pixelformat = V4L2_SDR_FMT_CU16LE,
.buffersize = BULK_BUFFER_SIZE * 2,
},
};
static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats);
/* intermediate buffers with raw data from the USB device */
struct rtl2832_sdr_frame_buf {
struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
struct list_head list;
};
struct rtl2832_sdr_dev {
#define POWER_ON 0 /* BIT(0) */
#define URB_BUF 1 /* BIT(1) */
unsigned long flags;
struct platform_device *pdev;
struct video_device vdev;
struct v4l2_device v4l2_dev;
struct v4l2_subdev *v4l2_subdev;
/* videobuf2 queue and queued buffers list */
struct vb2_queue vb_queue;
struct list_head queued_bufs;
spinlock_t queued_bufs_lock; /* Protects queued_bufs */
unsigned sequence; /* buffer sequence counter */
/* Note if taking both locks v4l2_lock must always be locked first! */
struct mutex v4l2_lock; /* Protects everything else */
struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */
/* Pointer to our usb_device, will be NULL after unplug */
struct usb_device *udev; /* Both mutexes most be hold when setting! */
unsigned int vb_full; /* vb is full and packets dropped */
struct urb *urb_list[MAX_BULK_BUFS];
int buf_num;
unsigned long buf_size;
u8 *buf_list[MAX_BULK_BUFS];
dma_addr_t dma_addr[MAX_BULK_BUFS];
int urbs_initialized;
int urbs_submitted;
unsigned int f_adc, f_tuner;
u32 pixelformat;
u32 buffersize;
unsigned int num_formats;
/* Controls */
struct v4l2_ctrl_handler hdl;
struct v4l2_ctrl *bandwidth_auto;
struct v4l2_ctrl *bandwidth;
/* for sample rate calc */
unsigned int sample;
unsigned int sample_measured;
unsigned long jiffies_next;
};
/* write multiple registers */
static int rtl2832_sdr_wr_regs(struct rtl2832_sdr_dev *dev, u16 reg,
const u8 *val, int len)
{
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct i2c_client *client = pdata->i2c_client;
return pdata->bulk_write(client, reg, val, len);
}
#if 0
/* read multiple registers */
static int rtl2832_sdr_rd_regs(struct rtl2832_sdr_dev *dev, u16 reg, u8 *val,
int len)
{
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct i2c_client *client = pdata->i2c_client;
return pdata->bulk_read(client, reg, val, len);
}
#endif
/* write single register */
static int rtl2832_sdr_wr_reg(struct rtl2832_sdr_dev *dev, u16 reg, u8 val)
{
return rtl2832_sdr_wr_regs(dev, reg, &val, 1);
}
/* write single register with mask */
static int rtl2832_sdr_wr_reg_mask(struct rtl2832_sdr_dev *dev, u16 reg,
u8 val, u8 mask)
{
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct i2c_client *client = pdata->i2c_client;
return pdata->update_bits(client, reg, mask, val);
}
/* Private functions */
static struct rtl2832_sdr_frame_buf *rtl2832_sdr_get_next_fill_buf(
struct rtl2832_sdr_dev *dev)
{
unsigned long flags;
struct rtl2832_sdr_frame_buf *buf = NULL;
spin_lock_irqsave(&dev->queued_bufs_lock, flags);
if (list_empty(&dev->queued_bufs))
goto leave;
buf = list_entry(dev->queued_bufs.next,
struct rtl2832_sdr_frame_buf, list);
list_del(&buf->list);
leave:
spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
return buf;
}
static unsigned int rtl2832_sdr_convert_stream(struct rtl2832_sdr_dev *dev,
void *dst, const u8 *src, unsigned int src_len)
{
struct platform_device *pdev = dev->pdev;
unsigned int dst_len;
if (dev->pixelformat == V4L2_SDR_FMT_CU8) {
/* native stream, no need to convert */
memcpy(dst, src, src_len);
dst_len = src_len;
} else if (dev->pixelformat == V4L2_SDR_FMT_CU16LE) {
/* convert u8 to u16 */
unsigned int i;
u16 *u16dst = dst;
for (i = 0; i < src_len; i++)
*u16dst++ = (src[i] << 8) | (src[i] >> 0);
dst_len = 2 * src_len;
} else {
dst_len = 0;
}
/* calculate sample rate and output it in 10 seconds intervals */
if (unlikely(time_is_before_jiffies(dev->jiffies_next))) {
#define MSECS 10000UL
unsigned int msecs = jiffies_to_msecs(jiffies -
dev->jiffies_next + msecs_to_jiffies(MSECS));
unsigned int samples = dev->sample - dev->sample_measured;
dev->jiffies_next = jiffies + msecs_to_jiffies(MSECS);
dev->sample_measured = dev->sample;
dev_dbg(&pdev->dev,
"slen=%u samples=%u msecs=%u sample rate=%lu\n",
src_len, samples, msecs, samples * 1000UL / msecs);
}
/* total number of I+Q pairs */
dev->sample += src_len / 2;
return dst_len;
}
/*
* This gets called for the bulk stream pipe. This is done in interrupt
* time, so it has to be fast, not crash, and not stall. Neat.
*/
static void rtl2832_sdr_urb_complete(struct urb *urb)
{
struct rtl2832_sdr_dev *dev = urb->context;
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_frame_buf *fbuf;
dev_dbg_ratelimited(&pdev->dev, "status=%d length=%d/%d errors=%d\n",
urb->status, urb->actual_length,
urb->transfer_buffer_length, urb->error_count);
switch (urb->status) {
case 0: /* success */
case -ETIMEDOUT: /* NAK */
break;
case -ECONNRESET: /* kill */
case -ENOENT:
case -ESHUTDOWN:
return;
default: /* error */
dev_err_ratelimited(&pdev->dev, "urb failed=%d\n", urb->status);
break;
}
if (likely(urb->actual_length > 0)) {
void *ptr;
unsigned int len;
/* get free framebuffer */
fbuf = rtl2832_sdr_get_next_fill_buf(dev);
if (unlikely(fbuf == NULL)) {
dev->vb_full++;
dev_notice_ratelimited(&pdev->dev,
"videobuf is full, %d packets dropped\n",
dev->vb_full);
goto skip;
}
/* fill framebuffer */
ptr = vb2_plane_vaddr(&fbuf->vb, 0);
len = rtl2832_sdr_convert_stream(dev, ptr, urb->transfer_buffer,
urb->actual_length);
vb2_set_plane_payload(&fbuf->vb, 0, len);
v4l2_get_timestamp(&fbuf->vb.v4l2_buf.timestamp);
fbuf->vb.v4l2_buf.sequence = dev->sequence++;
vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
}
skip:
usb_submit_urb(urb, GFP_ATOMIC);
}
static int rtl2832_sdr_kill_urbs(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
int i;
for (i = dev->urbs_submitted - 1; i >= 0; i--) {
dev_dbg(&pdev->dev, "kill urb=%d\n", i);
/* stop the URB */
usb_kill_urb(dev->urb_list[i]);
}
dev->urbs_submitted = 0;
return 0;
}
static int rtl2832_sdr_submit_urbs(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
int i, ret;
for (i = 0; i < dev->urbs_initialized; i++) {
dev_dbg(&pdev->dev, "submit urb=%d\n", i);
ret = usb_submit_urb(dev->urb_list[i], GFP_ATOMIC);
if (ret) {
dev_err(&pdev->dev,
"Could not submit urb no. %d - get them all back\n",
i);
rtl2832_sdr_kill_urbs(dev);
return ret;
}
dev->urbs_submitted++;
}
return 0;
}
static int rtl2832_sdr_free_stream_bufs(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
if (test_bit(URB_BUF, &dev->flags)) {
while (dev->buf_num) {
dev->buf_num--;
dev_dbg(&pdev->dev, "free buf=%d\n", dev->buf_num);
usb_free_coherent(dev->udev, dev->buf_size,
dev->buf_list[dev->buf_num],
dev->dma_addr[dev->buf_num]);
}
}
clear_bit(URB_BUF, &dev->flags);
return 0;
}
static int rtl2832_sdr_alloc_stream_bufs(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
dev->buf_num = 0;
dev->buf_size = BULK_BUFFER_SIZE;
dev_dbg(&pdev->dev, "all in all I will use %u bytes for streaming\n",
MAX_BULK_BUFS * BULK_BUFFER_SIZE);
for (dev->buf_num = 0; dev->buf_num < MAX_BULK_BUFS; dev->buf_num++) {
dev->buf_list[dev->buf_num] = usb_alloc_coherent(dev->udev,
BULK_BUFFER_SIZE, GFP_ATOMIC,
&dev->dma_addr[dev->buf_num]);
if (!dev->buf_list[dev->buf_num]) {
dev_dbg(&pdev->dev, "alloc buf=%d failed\n",
dev->buf_num);
rtl2832_sdr_free_stream_bufs(dev);
return -ENOMEM;
}
dev_dbg(&pdev->dev, "alloc buf=%d %p (dma %llu)\n",
dev->buf_num, dev->buf_list[dev->buf_num],
(long long)dev->dma_addr[dev->buf_num]);
set_bit(URB_BUF, &dev->flags);
}
return 0;
}
static int rtl2832_sdr_free_urbs(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
int i;
rtl2832_sdr_kill_urbs(dev);
for (i = dev->urbs_initialized - 1; i >= 0; i--) {
if (dev->urb_list[i]) {
dev_dbg(&pdev->dev, "free urb=%d\n", i);
/* free the URBs */
usb_free_urb(dev->urb_list[i]);
}
}
dev->urbs_initialized = 0;
return 0;
}
static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
int i, j;
/* allocate the URBs */
for (i = 0; i < MAX_BULK_BUFS; i++) {
dev_dbg(&pdev->dev, "alloc urb=%d\n", i);
dev->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!dev->urb_list[i]) {
dev_dbg(&pdev->dev, "failed\n");
for (j = 0; j < i; j++)
usb_free_urb(dev->urb_list[j]);
return -ENOMEM;
}
usb_fill_bulk_urb(dev->urb_list[i],
dev->udev,
usb_rcvbulkpipe(dev->udev, 0x81),
dev->buf_list[i],
BULK_BUFFER_SIZE,
rtl2832_sdr_urb_complete, dev);
dev->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
dev->urb_list[i]->transfer_dma = dev->dma_addr[i];
dev->urbs_initialized++;
}
return 0;
}
/* Must be called with vb_queue_lock hold */
static void rtl2832_sdr_cleanup_queued_bufs(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
unsigned long flags;
dev_dbg(&pdev->dev, "\n");
spin_lock_irqsave(&dev->queued_bufs_lock, flags);
while (!list_empty(&dev->queued_bufs)) {
struct rtl2832_sdr_frame_buf *buf;
buf = list_entry(dev->queued_bufs.next,
struct rtl2832_sdr_frame_buf, list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
}
static int rtl2832_sdr_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
dev_dbg(&pdev->dev, "\n");
strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
strlcpy(cap->card, dev->vdev.name, sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE | V4L2_CAP_TUNER;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
/* Videobuf2 operations */
static int rtl2832_sdr_queue_setup(struct vb2_queue *vq,
const struct v4l2_format *fmt, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
{
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq);
struct platform_device *pdev = dev->pdev;
dev_dbg(&pdev->dev, "nbuffers=%d\n", *nbuffers);
/* Need at least 8 buffers */
if (vq->num_buffers + *nbuffers < 8)
*nbuffers = 8 - vq->num_buffers;
*nplanes = 1;
sizes[0] = PAGE_ALIGN(dev->buffersize);
dev_dbg(&pdev->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]);
return 0;
}
static int rtl2832_sdr_buf_prepare(struct vb2_buffer *vb)
{
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
/* Don't allow queing new buffers after device disconnection */
if (!dev->udev)
return -ENODEV;
return 0;
}
static void rtl2832_sdr_buf_queue(struct vb2_buffer *vb)
{
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct rtl2832_sdr_frame_buf *buf =
container_of(vb, struct rtl2832_sdr_frame_buf, vb);
unsigned long flags;
/* Check the device has not disconnected between prep and queuing */
if (!dev->udev) {
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
return;
}
spin_lock_irqsave(&dev->queued_bufs_lock, flags);
list_add_tail(&buf->list, &dev->queued_bufs);
spin_unlock_irqrestore(&dev->queued_bufs_lock, flags);
}
static int rtl2832_sdr_set_adc(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct dvb_frontend *fe = pdata->dvb_frontend;
int ret;
unsigned int f_sr, f_if;
u8 buf[4], u8tmp1, u8tmp2;
u64 u64tmp;
u32 u32tmp;
dev_dbg(&pdev->dev, "f_adc=%u\n", dev->f_adc);
if (!test_bit(POWER_ON, &dev->flags))
return 0;
if (dev->f_adc == 0)
return 0;
f_sr = dev->f_adc;
ret = rtl2832_sdr_wr_regs(dev, 0x13e, "\x00\x00", 2);
if (ret)
goto err;
ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x00\x00\x00\x00", 4);
if (ret)
goto err;
/* get IF from tuner */
if (fe->ops.tuner_ops.get_if_frequency)
ret = fe->ops.tuner_ops.get_if_frequency(fe, &f_if);
else
ret = -EINVAL;
if (ret)
goto err;
/* program IF */
u64tmp = f_if % pdata->clk;
u64tmp *= 0x400000;
u64tmp = div_u64(u64tmp, pdata->clk);
u64tmp = -u64tmp;
u32tmp = u64tmp & 0x3fffff;
dev_dbg(&pdev->dev, "f_if=%u if_ctl=%08x\n", f_if, u32tmp);
buf[0] = (u32tmp >> 16) & 0xff;
buf[1] = (u32tmp >> 8) & 0xff;
buf[2] = (u32tmp >> 0) & 0xff;
ret = rtl2832_sdr_wr_regs(dev, 0x119, buf, 3);
if (ret)
goto err;
/* BB / IF mode */
/* POR: 0x1b1=0x1f, 0x008=0x0d, 0x006=0x80 */
if (f_if) {
u8tmp1 = 0x1a; /* disable Zero-IF */
u8tmp2 = 0x8d; /* enable ADC I */
} else {
u8tmp1 = 0x1b; /* enable Zero-IF, DC, IQ */
u8tmp2 = 0xcd; /* enable ADC I, ADC Q */
}
ret = rtl2832_sdr_wr_reg(dev, 0x1b1, u8tmp1);
if (ret)
goto err;
ret = rtl2832_sdr_wr_reg(dev, 0x008, u8tmp2);
if (ret)
goto err;
ret = rtl2832_sdr_wr_reg(dev, 0x006, 0x80);
if (ret)
goto err;
/* program sampling rate (resampling down) */
u32tmp = div_u64(pdata->clk * 0x400000ULL, f_sr * 4U);
u32tmp <<= 2;
buf[0] = (u32tmp >> 24) & 0xff;
buf[1] = (u32tmp >> 16) & 0xff;
buf[2] = (u32tmp >> 8) & 0xff;
buf[3] = (u32tmp >> 0) & 0xff;
ret = rtl2832_sdr_wr_regs(dev, 0x19f, buf, 4);
if (ret)
goto err;
/* low-pass filter */
ret = rtl2832_sdr_wr_regs(dev, 0x11c,
"\xca\xdc\xd7\xd8\xe0\xf2\x0e\x35\x06\x50\x9c\x0d\x71\x11\x14\x71\x74\x19\x41\xa5",
20);
if (ret)
goto err;
ret = rtl2832_sdr_wr_regs(dev, 0x017, "\x11\x10", 2);
if (ret)
goto err;
/* mode */
ret = rtl2832_sdr_wr_regs(dev, 0x019, "\x05", 1);
if (ret)
goto err;
ret = rtl2832_sdr_wr_regs(dev, 0x01a, "\x1b\x16\x0d\x06\x01\xff", 6);
if (ret)
goto err;
/* FSM */
ret = rtl2832_sdr_wr_regs(dev, 0x192, "\x00\xf0\x0f", 3);
if (ret)
goto err;
/* PID filter */
ret = rtl2832_sdr_wr_regs(dev, 0x061, "\x60", 1);
if (ret)
goto err;
/* used RF tuner based settings */
switch (pdata->tuner) {
case RTL2832_SDR_TUNER_E4000:
ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x30", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xd0", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x18", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xd4", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1e5, "\xf0", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1d9, "\x00", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1db, "\x00", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1dd, "\x14", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1de, "\xec", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1d8, "\x0c", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1e6, "\x02", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1d7, "\x09", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x83", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x010, "\x49", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x87", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00d, "\x85", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x013, "\x02", 1);
break;
case RTL2832_SDR_TUNER_FC0012:
case RTL2832_SDR_TUNER_FC0013:
ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x2c", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x16", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xe9\xbf", 2);
ret = rtl2832_sdr_wr_regs(dev, 0x1e5, "\xf0", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1d9, "\x00", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1db, "\x00", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1dd, "\x11", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1de, "\xef", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1d8, "\x0c", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1e6, "\x02", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1d7, "\x09", 1);
break;
case RTL2832_SDR_TUNER_R820T:
case RTL2832_SDR_TUNER_R828D:
ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x5a", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x01", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x80", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x24", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x14", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x80", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xf4", 1);
break;
case RTL2832_SDR_TUNER_FC2580:
ret = rtl2832_sdr_wr_regs(dev, 0x112, "\x39", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x102, "\x40", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x103, "\x5a", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c7, "\x2c", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x104, "\xcc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x105, "\xbe", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c8, "\x16", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x106, "\x35", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1c9, "\x21", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1ca, "\x21", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1cb, "\x00", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x107, "\x40", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1cd, "\x10", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x1ce, "\x10", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x108, "\x80", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x109, "\x7f", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x10a, "\x9c", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x10b, "\x7f", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x00e, "\xfc", 1);
ret = rtl2832_sdr_wr_regs(dev, 0x011, "\xe9\xf4", 2);
break;
default:
dev_notice(&pdev->dev, "Unsupported tuner\n");
}
/* software reset */
ret = rtl2832_sdr_wr_reg_mask(dev, 0x101, 0x04, 0x04);
if (ret)
goto err;
ret = rtl2832_sdr_wr_reg_mask(dev, 0x101, 0x00, 0x04);
if (ret)
goto err;
err:
return ret;
};
static void rtl2832_sdr_unset_adc(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
int ret;
dev_dbg(&pdev->dev, "\n");
/* PID filter */
ret = rtl2832_sdr_wr_regs(dev, 0x061, "\xe0", 1);
if (ret)
goto err;
/* mode */
ret = rtl2832_sdr_wr_regs(dev, 0x019, "\x20", 1);
if (ret)
goto err;
ret = rtl2832_sdr_wr_regs(dev, 0x017, "\x11\x10", 2);
if (ret)
goto err;
/* FSM */
ret = rtl2832_sdr_wr_regs(dev, 0x192, "\x00\x0f\xff", 3);
if (ret)
goto err;
ret = rtl2832_sdr_wr_regs(dev, 0x13e, "\x40\x00", 2);
if (ret)
goto err;
ret = rtl2832_sdr_wr_regs(dev, 0x115, "\x06\x3f\xce\xcc", 4);
if (ret)
goto err;
err:
return;
};
static int rtl2832_sdr_set_tuner_freq(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct dvb_frontend *fe = pdata->dvb_frontend;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct v4l2_ctrl *bandwidth_auto;
struct v4l2_ctrl *bandwidth;
/*
* tuner RF (Hz)
*/
if (dev->f_tuner == 0)
return 0;
/*
* bandwidth (Hz)
*/
bandwidth_auto = v4l2_ctrl_find(&dev->hdl,
V4L2_CID_RF_TUNER_BANDWIDTH_AUTO);
bandwidth = v4l2_ctrl_find(&dev->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
if (v4l2_ctrl_g_ctrl(bandwidth_auto)) {
c->bandwidth_hz = dev->f_adc;
v4l2_ctrl_s_ctrl(bandwidth, dev->f_adc);
} else {
c->bandwidth_hz = v4l2_ctrl_g_ctrl(bandwidth);
}
c->frequency = dev->f_tuner;
c->delivery_system = SYS_DVBT;
dev_dbg(&pdev->dev, "frequency=%u bandwidth=%d\n",
c->frequency, c->bandwidth_hz);
if (!test_bit(POWER_ON, &dev->flags))
return 0;
if (!V4L2_SUBDEV_HAS_OP(dev->v4l2_subdev, tuner, s_frequency)) {
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
}
return 0;
};
static int rtl2832_sdr_set_tuner(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct dvb_frontend *fe = pdata->dvb_frontend;
dev_dbg(&pdev->dev, "\n");
if (fe->ops.tuner_ops.init)
fe->ops.tuner_ops.init(fe);
return 0;
};
static void rtl2832_sdr_unset_tuner(struct rtl2832_sdr_dev *dev)
{
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct dvb_frontend *fe = pdata->dvb_frontend;
dev_dbg(&pdev->dev, "\n");
if (fe->ops.tuner_ops.sleep)
fe->ops.tuner_ops.sleep(fe);
return;
};
static int rtl2832_sdr_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq);
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct dvb_usb_device *d = pdata->dvb_usb_device;
int ret;
dev_dbg(&pdev->dev, "\n");
if (!dev->udev)
return -ENODEV;
if (mutex_lock_interruptible(&dev->v4l2_lock))
return -ERESTARTSYS;
if (d->props->power_ctrl)
d->props->power_ctrl(d, 1);
/* enable ADC */
if (d->props->frontend_ctrl)
d->props->frontend_ctrl(pdata->dvb_frontend, 1);
set_bit(POWER_ON, &dev->flags);
/* wake-up tuner */
if (V4L2_SUBDEV_HAS_OP(dev->v4l2_subdev, core, s_power))
ret = v4l2_subdev_call(dev->v4l2_subdev, core, s_power, 1);
else
ret = rtl2832_sdr_set_tuner(dev);
if (ret)
goto err;
ret = rtl2832_sdr_set_tuner_freq(dev);
if (ret)
goto err;
ret = rtl2832_sdr_set_adc(dev);
if (ret)
goto err;
ret = rtl2832_sdr_alloc_stream_bufs(dev);
if (ret)
goto err;
ret = rtl2832_sdr_alloc_urbs(dev);
if (ret)
goto err;
dev->sequence = 0;
ret = rtl2832_sdr_submit_urbs(dev);
if (ret)
goto err;
err:
mutex_unlock(&dev->v4l2_lock);
return ret;
}
static void rtl2832_sdr_stop_streaming(struct vb2_queue *vq)
{
struct rtl2832_sdr_dev *dev = vb2_get_drv_priv(vq);
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct dvb_usb_device *d = pdata->dvb_usb_device;
dev_dbg(&pdev->dev, "\n");
mutex_lock(&dev->v4l2_lock);
rtl2832_sdr_kill_urbs(dev);
rtl2832_sdr_free_urbs(dev);
rtl2832_sdr_free_stream_bufs(dev);
rtl2832_sdr_cleanup_queued_bufs(dev);
rtl2832_sdr_unset_adc(dev);
/* sleep tuner */
if (V4L2_SUBDEV_HAS_OP(dev->v4l2_subdev, core, s_power))
v4l2_subdev_call(dev->v4l2_subdev, core, s_power, 0);
else
rtl2832_sdr_unset_tuner(dev);
clear_bit(POWER_ON, &dev->flags);
/* disable ADC */
if (d->props->frontend_ctrl)
d->props->frontend_ctrl(pdata->dvb_frontend, 0);
if (d->props->power_ctrl)
d->props->power_ctrl(d, 0);
mutex_unlock(&dev->v4l2_lock);
}
static struct vb2_ops rtl2832_sdr_vb2_ops = {
.queue_setup = rtl2832_sdr_queue_setup,
.buf_prepare = rtl2832_sdr_buf_prepare,
.buf_queue = rtl2832_sdr_buf_queue,
.start_streaming = rtl2832_sdr_start_streaming,
.stop_streaming = rtl2832_sdr_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
static int rtl2832_sdr_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
int ret;
dev_dbg(&pdev->dev, "index=%d type=%d\n", v->index, v->type);
if (v->index == 0) {
strlcpy(v->name, "ADC: Realtek RTL2832", sizeof(v->name));
v->type = V4L2_TUNER_ADC;
v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
v->rangelow = 300000;
v->rangehigh = 3200000;
ret = 0;
} else if (v->index == 1 &&
V4L2_SUBDEV_HAS_OP(dev->v4l2_subdev, tuner, g_tuner)) {
ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, g_tuner, v);
} else if (v->index == 1) {
strlcpy(v->name, "RF: <unknown>", sizeof(v->name));
v->type = V4L2_TUNER_RF;
v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS;
v->rangelow = 50000000;
v->rangehigh = 2000000000;
ret = 0;
} else {
ret = -EINVAL;
}
return ret;
}
static int rtl2832_sdr_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *v)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
int ret;
dev_dbg(&pdev->dev, "\n");
if (v->index == 0) {
ret = 0;
} else if (v->index == 1 &&
V4L2_SUBDEV_HAS_OP(dev->v4l2_subdev, tuner, s_tuner)) {
ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, s_tuner, v);
} else if (v->index == 1) {
ret = 0;
} else {
ret = -EINVAL;
}
return ret;
}
static int rtl2832_sdr_enum_freq_bands(struct file *file, void *priv,
struct v4l2_frequency_band *band)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
int ret;
dev_dbg(&pdev->dev, "tuner=%d type=%d index=%d\n",
band->tuner, band->type, band->index);
if (band->tuner == 0) {
if (band->index >= ARRAY_SIZE(bands_adc))
return -EINVAL;
*band = bands_adc[band->index];
ret = 0;
} else if (band->tuner == 1 &&
V4L2_SUBDEV_HAS_OP(dev->v4l2_subdev, tuner, enum_freq_bands)) {
ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, enum_freq_bands, band);
} else if (band->tuner == 1) {
if (band->index >= ARRAY_SIZE(bands_fm))
return -EINVAL;
*band = bands_fm[band->index];
ret = 0;
} else {
ret = -EINVAL;
}
return ret;
}
static int rtl2832_sdr_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
int ret;
dev_dbg(&pdev->dev, "tuner=%d type=%d\n", f->tuner, f->type);
if (f->tuner == 0) {
f->frequency = dev->f_adc;
f->type = V4L2_TUNER_ADC;
ret = 0;
} else if (f->tuner == 1 &&
V4L2_SUBDEV_HAS_OP(dev->v4l2_subdev, tuner, g_frequency)) {
f->type = V4L2_TUNER_RF;
ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, g_frequency, f);
} else if (f->tuner == 1) {
f->frequency = dev->f_tuner;
f->type = V4L2_TUNER_RF;
ret = 0;
} else {
ret = -EINVAL;
}
return ret;
}
static int rtl2832_sdr_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
int ret, band;
dev_dbg(&pdev->dev, "tuner=%d type=%d frequency=%u\n",
f->tuner, f->type, f->frequency);
/* ADC band midpoints */
#define BAND_ADC_0 ((bands_adc[0].rangehigh + bands_adc[1].rangelow) / 2)
#define BAND_ADC_1 ((bands_adc[1].rangehigh + bands_adc[2].rangelow) / 2)
if (f->tuner == 0 && f->type == V4L2_TUNER_ADC) {
if (f->frequency < BAND_ADC_0)
band = 0;
else if (f->frequency < BAND_ADC_1)
band = 1;
else
band = 2;
dev->f_adc = clamp_t(unsigned int, f->frequency,
bands_adc[band].rangelow,
bands_adc[band].rangehigh);
dev_dbg(&pdev->dev, "ADC frequency=%u Hz\n", dev->f_adc);
ret = rtl2832_sdr_set_adc(dev);
} else if (f->tuner == 1 &&
V4L2_SUBDEV_HAS_OP(dev->v4l2_subdev, tuner, s_frequency)) {
ret = v4l2_subdev_call(dev->v4l2_subdev, tuner, s_frequency, f);
} else if (f->tuner == 1) {
dev->f_tuner = clamp_t(unsigned int, f->frequency,
bands_fm[0].rangelow,
bands_fm[0].rangehigh);
dev_dbg(&pdev->dev, "RF frequency=%u Hz\n", f->frequency);
ret = rtl2832_sdr_set_tuner_freq(dev);
} else {
ret = -EINVAL;
}
return ret;
}
static int rtl2832_sdr_enum_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
dev_dbg(&pdev->dev, "\n");
if (f->index >= dev->num_formats)
return -EINVAL;
strlcpy(f->description, formats[f->index].name, sizeof(f->description));
f->pixelformat = formats[f->index].pixelformat;
return 0;
}
static int rtl2832_sdr_g_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
dev_dbg(&pdev->dev, "\n");
f->fmt.sdr.pixelformat = dev->pixelformat;
f->fmt.sdr.buffersize = dev->buffersize;
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
return 0;
}
static int rtl2832_sdr_s_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
struct vb2_queue *q = &dev->vb_queue;
int i;
dev_dbg(&pdev->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
if (vb2_is_busy(q))
return -EBUSY;
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < dev->num_formats; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
dev->pixelformat = formats[i].pixelformat;
dev->buffersize = formats[i].buffersize;
f->fmt.sdr.buffersize = formats[i].buffersize;
return 0;
}
}
dev->pixelformat = formats[0].pixelformat;
dev->buffersize = formats[0].buffersize;
f->fmt.sdr.pixelformat = formats[0].pixelformat;
f->fmt.sdr.buffersize = formats[0].buffersize;
return 0;
}
static int rtl2832_sdr_try_fmt_sdr_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rtl2832_sdr_dev *dev = video_drvdata(file);
struct platform_device *pdev = dev->pdev;
int i;
dev_dbg(&pdev->dev, "pixelformat fourcc %4.4s\n",
(char *)&f->fmt.sdr.pixelformat);
memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
for (i = 0; i < dev->num_formats; i++) {
if (formats[i].pixelformat == f->fmt.sdr.pixelformat) {
f->fmt.sdr.buffersize = formats[i].buffersize;
return 0;
}
}
f->fmt.sdr.pixelformat = formats[0].pixelformat;
f->fmt.sdr.buffersize = formats[0].buffersize;
return 0;
}
static const struct v4l2_ioctl_ops rtl2832_sdr_ioctl_ops = {
.vidioc_querycap = rtl2832_sdr_querycap,
.vidioc_enum_fmt_sdr_cap = rtl2832_sdr_enum_fmt_sdr_cap,
.vidioc_g_fmt_sdr_cap = rtl2832_sdr_g_fmt_sdr_cap,
.vidioc_s_fmt_sdr_cap = rtl2832_sdr_s_fmt_sdr_cap,
.vidioc_try_fmt_sdr_cap = rtl2832_sdr_try_fmt_sdr_cap,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_tuner = rtl2832_sdr_g_tuner,
.vidioc_s_tuner = rtl2832_sdr_s_tuner,
.vidioc_enum_freq_bands = rtl2832_sdr_enum_freq_bands,
.vidioc_g_frequency = rtl2832_sdr_g_frequency,
.vidioc_s_frequency = rtl2832_sdr_s_frequency,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_log_status = v4l2_ctrl_log_status,
};
static const struct v4l2_file_operations rtl2832_sdr_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = vb2_fop_release,
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
static struct video_device rtl2832_sdr_template = {
.name = "Realtek RTL2832 SDR",
.release = video_device_release_empty,
.fops = &rtl2832_sdr_fops,
.ioctl_ops = &rtl2832_sdr_ioctl_ops,
};
static int rtl2832_sdr_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct rtl2832_sdr_dev *dev =
container_of(ctrl->handler, struct rtl2832_sdr_dev,
hdl);
struct platform_device *pdev = dev->pdev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
struct dvb_frontend *fe = pdata->dvb_frontend;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
dev_dbg(&pdev->dev, "id=%d name=%s val=%d min=%lld max=%lld step=%lld\n",
ctrl->id, ctrl->name, ctrl->val, ctrl->minimum, ctrl->maximum,
ctrl->step);
switch (ctrl->id) {
case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO:
case V4L2_CID_RF_TUNER_BANDWIDTH:
/* TODO: these controls should be moved to tuner drivers */
if (dev->bandwidth_auto->val) {
/* Round towards the closest legal value */
s32 val = dev->f_adc + div_u64(dev->bandwidth->step, 2);
u32 offset;
val = clamp_t(s32, val, dev->bandwidth->minimum,
dev->bandwidth->maximum);
offset = val - dev->bandwidth->minimum;
offset = dev->bandwidth->step *
div_u64(offset, dev->bandwidth->step);
dev->bandwidth->val = dev->bandwidth->minimum + offset;
}
c->bandwidth_hz = dev->bandwidth->val;
if (!test_bit(POWER_ON, &dev->flags))
return 0;
if (fe->ops.tuner_ops.set_params)
ret = fe->ops.tuner_ops.set_params(fe);
else
ret = 0;
break;
default:
ret = -EINVAL;
}
return ret;
}
static const struct v4l2_ctrl_ops rtl2832_sdr_ctrl_ops = {
.s_ctrl = rtl2832_sdr_s_ctrl,
};
static void rtl2832_sdr_video_release(struct v4l2_device *v)
{
struct rtl2832_sdr_dev *dev =
container_of(v, struct rtl2832_sdr_dev, v4l2_dev);
struct platform_device *pdev = dev->pdev;
dev_dbg(&pdev->dev, "\n");
v4l2_ctrl_handler_free(&dev->hdl);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
}
/* Platform driver interface */
static int rtl2832_sdr_probe(struct platform_device *pdev)
{
struct rtl2832_sdr_dev *dev;
struct rtl2832_sdr_platform_data *pdata = pdev->dev.platform_data;
const struct v4l2_ctrl_ops *ops = &rtl2832_sdr_ctrl_ops;
struct v4l2_subdev *subdev;
int ret;
dev_dbg(&pdev->dev, "\n");
if (!pdata) {
dev_err(&pdev->dev, "Cannot proceed without platform data\n");
ret = -EINVAL;
goto err;
}
if (!pdev->dev.parent->driver) {
dev_dbg(&pdev->dev, "No parent device\n");
ret = -EINVAL;
goto err;
}
/* try to refcount host drv since we are the consumer */
if (!try_module_get(pdev->dev.parent->driver->owner)) {
dev_err(&pdev->dev, "Refcount fail");
ret = -EINVAL;
goto err;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
ret = -ENOMEM;
goto err_module_put;
}
/* setup the state */
subdev = pdata->v4l2_subdev;
dev->v4l2_subdev = pdata->v4l2_subdev;
dev->pdev = pdev;
dev->udev = pdata->dvb_usb_device->udev;
dev->f_adc = bands_adc[0].rangelow;
dev->f_tuner = bands_fm[0].rangelow;
dev->pixelformat = formats[0].pixelformat;
dev->buffersize = formats[0].buffersize;
dev->num_formats = NUM_FORMATS;
if (!rtl2832_sdr_emulated_fmt)
dev->num_formats -= 1;
mutex_init(&dev->v4l2_lock);
mutex_init(&dev->vb_queue_lock);
spin_lock_init(&dev->queued_bufs_lock);
INIT_LIST_HEAD(&dev->queued_bufs);
/* Init videobuf2 queue structure */
dev->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE;
dev->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
dev->vb_queue.drv_priv = dev;
dev->vb_queue.buf_struct_size = sizeof(struct rtl2832_sdr_frame_buf);
dev->vb_queue.ops = &rtl2832_sdr_vb2_ops;
dev->vb_queue.mem_ops = &vb2_vmalloc_memops;
dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(&dev->vb_queue);
if (ret) {
dev_err(&pdev->dev, "Could not initialize vb2 queue\n");
goto err_kfree;
}
/* Register controls */
switch (pdata->tuner) {
case RTL2832_SDR_TUNER_E4000:
v4l2_ctrl_handler_init(&dev->hdl, 9);
if (subdev)
v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler, NULL);
break;
case RTL2832_SDR_TUNER_R820T:
case RTL2832_SDR_TUNER_R828D:
v4l2_ctrl_handler_init(&dev->hdl, 2);
dev->bandwidth_auto = v4l2_ctrl_new_std(&dev->hdl, ops,
V4L2_CID_RF_TUNER_BANDWIDTH_AUTO,
0, 1, 1, 1);
dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, ops,
V4L2_CID_RF_TUNER_BANDWIDTH,
0, 8000000, 100000, 0);
v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false);
break;
case RTL2832_SDR_TUNER_FC0012:
case RTL2832_SDR_TUNER_FC0013:
v4l2_ctrl_handler_init(&dev->hdl, 2);
dev->bandwidth_auto = v4l2_ctrl_new_std(&dev->hdl, ops,
V4L2_CID_RF_TUNER_BANDWIDTH_AUTO,
0, 1, 1, 1);
dev->bandwidth = v4l2_ctrl_new_std(&dev->hdl, ops,
V4L2_CID_RF_TUNER_BANDWIDTH,
6000000, 8000000, 1000000,
6000000);
v4l2_ctrl_auto_cluster(2, &dev->bandwidth_auto, 0, false);
break;
case RTL2832_SDR_TUNER_FC2580:
v4l2_ctrl_handler_init(&dev->hdl, 2);
if (subdev)
v4l2_ctrl_add_handler(&dev->hdl, subdev->ctrl_handler,
NULL);
break;
default:
v4l2_ctrl_handler_init(&dev->hdl, 0);
dev_err(&pdev->dev, "Unsupported tuner\n");
goto err_v4l2_ctrl_handler_free;
}
if (dev->hdl.error) {
ret = dev->hdl.error;
dev_err(&pdev->dev, "Could not initialize controls\n");
goto err_v4l2_ctrl_handler_free;
}
/* Init video_device structure */
dev->vdev = rtl2832_sdr_template;
dev->vdev.queue = &dev->vb_queue;
dev->vdev.queue->lock = &dev->vb_queue_lock;
video_set_drvdata(&dev->vdev, dev);
/* Register the v4l2_device structure */
dev->v4l2_dev.release = rtl2832_sdr_video_release;
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to register v4l2-device %d\n", ret);
goto err_v4l2_ctrl_handler_free;
}
dev->v4l2_dev.ctrl_handler = &dev->hdl;
dev->vdev.v4l2_dev = &dev->v4l2_dev;
dev->vdev.lock = &dev->v4l2_lock;
dev->vdev.vfl_dir = VFL_DIR_RX;
ret = video_register_device(&dev->vdev, VFL_TYPE_SDR, -1);
if (ret) {
dev_err(&pdev->dev, "Failed to register as video device %d\n",
ret);
goto err_v4l2_device_unregister;
}
dev_info(&pdev->dev, "Registered as %s\n",
video_device_node_name(&dev->vdev));
dev_info(&pdev->dev, "Realtek RTL2832 SDR attached\n");
dev_notice(&pdev->dev,
"SDR API is still slightly experimental and functionality changes may follow\n");
platform_set_drvdata(pdev, dev);
return 0;
err_v4l2_device_unregister:
v4l2_device_unregister(&dev->v4l2_dev);
err_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(&dev->hdl);
err_kfree:
kfree(dev);
err_module_put:
module_put(pdev->dev.parent->driver->owner);
err:
return ret;
}
static int rtl2832_sdr_remove(struct platform_device *pdev)
{
struct rtl2832_sdr_dev *dev = platform_get_drvdata(pdev);
dev_dbg(&pdev->dev, "\n");
mutex_lock(&dev->vb_queue_lock);
mutex_lock(&dev->v4l2_lock);
/* No need to keep the urbs around after disconnection */
dev->udev = NULL;
v4l2_device_disconnect(&dev->v4l2_dev);
video_unregister_device(&dev->vdev);
mutex_unlock(&dev->v4l2_lock);
mutex_unlock(&dev->vb_queue_lock);
v4l2_device_put(&dev->v4l2_dev);
module_put(pdev->dev.parent->driver->owner);
return 0;
}
static struct platform_driver rtl2832_sdr_driver = {
.driver = {
.name = "rtl2832_sdr",
.owner = THIS_MODULE,
},
.probe = rtl2832_sdr_probe,
.remove = rtl2832_sdr_remove,
};
module_platform_driver(rtl2832_sdr_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Realtek RTL2832 SDR driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MatiasBjorling/lightnvm-moved-to-OpenChannelSSD-Linux | drivers/gpu/drm/r128/r128_cce.c | 453 | 24230 | /* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*/
/*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
*/
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/r128_drm.h>
#include "r128_drv.h"
#define R128_FIFO_DEBUG 0
#define FIRMWARE_NAME "r128/r128_cce.bin"
MODULE_FIRMWARE(FIRMWARE_NAME);
static int R128_READ_PLL(struct drm_device *dev, int addr)
{
drm_r128_private_t *dev_priv = dev->dev_private;
R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
return R128_READ(R128_CLOCK_CNTL_DATA);
}
#if R128_FIFO_DEBUG
static void r128_status(drm_r128_private_t *dev_priv)
{
printk("GUI_STAT = 0x%08x\n",
(unsigned int)R128_READ(R128_GUI_STAT));
printk("PM4_STAT = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_STAT));
printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
printk("PM4_MICRO_CNTL = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_MICRO_CNTL));
printk("PM4_BUFFER_CNTL = 0x%08x\n",
(unsigned int)R128_READ(R128_PM4_BUFFER_CNTL));
}
#endif
/* ================================================================
* Engine, FIFO control
*/
static int r128_do_pixcache_flush(drm_r128_private_t *dev_priv)
{
u32 tmp;
int i;
tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
return 0;
DRM_UDELAY(1);
}
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
return -EBUSY;
}
static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries)
{
int i;
for (i = 0; i < dev_priv->usec_timeout; i++) {
int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
if (slots >= entries)
return 0;
DRM_UDELAY(1);
}
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
return -EBUSY;
}
static int r128_do_wait_for_idle(drm_r128_private_t *dev_priv)
{
int i, ret;
ret = r128_do_wait_for_fifo(dev_priv, 64);
if (ret)
return ret;
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
r128_do_pixcache_flush(dev_priv);
return 0;
}
DRM_UDELAY(1);
}
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
return -EBUSY;
}
/* ================================================================
* CCE control, initialization
*/
/* Load the microcode for the CCE */
static int r128_cce_load_microcode(drm_r128_private_t *dev_priv)
{
struct platform_device *pdev;
const struct firmware *fw;
const __be32 *fw_data;
int rc, i;
DRM_DEBUG("\n");
pdev = platform_device_register_simple("r128_cce", 0, NULL, 0);
if (IS_ERR(pdev)) {
printk(KERN_ERR "r128_cce: Failed to register firmware\n");
return PTR_ERR(pdev);
}
rc = request_firmware(&fw, FIRMWARE_NAME, &pdev->dev);
platform_device_unregister(pdev);
if (rc) {
printk(KERN_ERR "r128_cce: Failed to load firmware \"%s\"\n",
FIRMWARE_NAME);
return rc;
}
if (fw->size != 256 * 8) {
printk(KERN_ERR
"r128_cce: Bogus length %zu in firmware \"%s\"\n",
fw->size, FIRMWARE_NAME);
rc = -EINVAL;
goto out_release;
}
r128_do_wait_for_idle(dev_priv);
fw_data = (const __be32 *)fw->data;
R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
for (i = 0; i < 256; i++) {
R128_WRITE(R128_PM4_MICROCODE_DATAH,
be32_to_cpup(&fw_data[i * 2]));
R128_WRITE(R128_PM4_MICROCODE_DATAL,
be32_to_cpup(&fw_data[i * 2 + 1]));
}
out_release:
release_firmware(fw);
return rc;
}
/* Flush any pending commands to the CCE. This should only be used just
* prior to a wait for idle, as it informs the engine that the command
* stream is ending.
*/
static void r128_do_cce_flush(drm_r128_private_t *dev_priv)
{
u32 tmp;
tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE;
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp);
}
/* Wait for the CCE to go idle.
*/
int r128_do_cce_idle(drm_r128_private_t *dev_priv)
{
int i;
for (i = 0; i < dev_priv->usec_timeout; i++) {
if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
int pm4stat = R128_READ(R128_PM4_STAT);
if (((pm4stat & R128_PM4_FIFOCNT_MASK) >=
dev_priv->cce_fifo_size) &&
!(pm4stat & (R128_PM4_BUSY |
R128_PM4_GUI_ACTIVE))) {
return r128_do_pixcache_flush(dev_priv);
}
}
DRM_UDELAY(1);
}
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
r128_status(dev_priv);
#endif
return -EBUSY;
}
/* Start the Concurrent Command Engine.
*/
static void r128_do_cce_start(drm_r128_private_t *dev_priv)
{
r128_do_wait_for_idle(dev_priv);
R128_WRITE(R128_PM4_BUFFER_CNTL,
dev_priv->cce_mode | dev_priv->ring.size_l2qw
| R128_PM4_BUFFER_CNTL_NOUPDATE);
R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */
R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
dev_priv->cce_running = 1;
}
/* Reset the Concurrent Command Engine. This will not flush any pending
* commands, so you must wait for the CCE command stream to complete
* before calling this routine.
*/
static void r128_do_cce_reset(drm_r128_private_t *dev_priv)
{
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
dev_priv->ring.tail = 0;
}
/* Stop the Concurrent Command Engine. This will not flush any pending
* commands, so you must flush the command stream and wait for the CCE
* to go idle before calling this routine.
*/
static void r128_do_cce_stop(drm_r128_private_t *dev_priv)
{
R128_WRITE(R128_PM4_MICRO_CNTL, 0);
R128_WRITE(R128_PM4_BUFFER_CNTL,
R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE);
dev_priv->cce_running = 0;
}
/* Reset the engine. This will stop the CCE if it is running.
*/
static int r128_do_engine_reset(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
r128_do_pixcache_flush(dev_priv);
clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
R128_WRITE_PLL(R128_MCLK_CNTL,
mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
/* Taken from the sample code - do not change */
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
R128_READ(R128_GEN_RESET_CNTL);
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
R128_READ(R128_GEN_RESET_CNTL);
R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
/* Reset the CCE ring */
r128_do_cce_reset(dev_priv);
/* The CCE is no longer running after an engine reset */
dev_priv->cce_running = 0;
/* Reset any pending vertex, indirect buffers */
r128_freelist_reset(dev);
return 0;
}
static void r128_cce_init_ring_buffer(struct drm_device *dev,
drm_r128_private_t *dev_priv)
{
u32 ring_start;
u32 tmp;
DRM_DEBUG("\n");
/* The manual (p. 2) says this address is in "VM space". This
* means it's an offset from the start of AGP space.
*/
#if __OS_HAS_AGP
if (!dev_priv->is_pci)
ring_start = dev_priv->cce_ring->offset - dev->agp->base;
else
#endif
ring_start = dev_priv->cce_ring->offset -
(unsigned long)dev->sg->virtual;
R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
/* Set watermark control */
R128_WRITE(R128_PM4_BUFFER_WM_CNTL,
((R128_WATERMARK_L / 4) << R128_WMA_SHIFT)
| ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT)
| ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT)
| ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT));
/* Force read. Why? Because it's in the examples... */
R128_READ(R128_PM4_BUFFER_ADDR);
/* Turn on bus mastering */
tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS;
R128_WRITE(R128_BUS_CNTL, tmp);
}
static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
{
drm_r128_private_t *dev_priv;
int rc;
DRM_DEBUG("\n");
if (dev->dev_private) {
DRM_DEBUG("called when already initialized\n");
return -EINVAL;
}
dev_priv = kzalloc(sizeof(drm_r128_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
dev_priv->is_pci = init->is_pci;
if (dev_priv->is_pci && !dev->sg) {
DRM_ERROR("PCI GART memory not allocated!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->usec_timeout = init->usec_timeout;
if (dev_priv->usec_timeout < 1 ||
dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
DRM_DEBUG("TIMEOUT problem!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->cce_mode = init->cce_mode;
/* GH: Simple idle check.
*/
atomic_set(&dev_priv->idle_count, 0);
/* We don't support anything other than bus-mastering ring mode,
* but the ring can be in either AGP or PCI space for the ring
* read pointer.
*/
if ((init->cce_mode != R128_PM4_192BM) &&
(init->cce_mode != R128_PM4_128BM_64INDBM) &&
(init->cce_mode != R128_PM4_64BM_128INDBM) &&
(init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) {
DRM_DEBUG("Bad cce_mode!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
switch (init->cce_mode) {
case R128_PM4_NONPM4:
dev_priv->cce_fifo_size = 0;
break;
case R128_PM4_192PIO:
case R128_PM4_192BM:
dev_priv->cce_fifo_size = 192;
break;
case R128_PM4_128PIO_64INDBM:
case R128_PM4_128BM_64INDBM:
dev_priv->cce_fifo_size = 128;
break;
case R128_PM4_64PIO_128INDBM:
case R128_PM4_64BM_128INDBM:
case R128_PM4_64PIO_64VCBM_64INDBM:
case R128_PM4_64BM_64VCBM_64INDBM:
case R128_PM4_64PIO_64VCPIO_64INDPIO:
dev_priv->cce_fifo_size = 64;
break;
}
switch (init->fb_bpp) {
case 16:
dev_priv->color_fmt = R128_DATATYPE_RGB565;
break;
case 32:
default:
dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
break;
}
dev_priv->front_offset = init->front_offset;
dev_priv->front_pitch = init->front_pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->back_pitch = init->back_pitch;
switch (init->depth_bpp) {
case 16:
dev_priv->depth_fmt = R128_DATATYPE_RGB565;
break;
case 24:
case 32:
default:
dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
break;
}
dev_priv->depth_offset = init->depth_offset;
dev_priv->depth_pitch = init->depth_pitch;
dev_priv->span_offset = init->span_offset;
dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) |
(dev_priv->front_offset >> 5));
dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) |
(dev_priv->back_offset >> 5));
dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
(dev_priv->depth_offset >> 5) |
R128_DST_TILE);
dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
(dev_priv->span_offset >> 5));
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio) {
DRM_ERROR("could not find mmio region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
if (!dev_priv->cce_ring) {
DRM_ERROR("could not find cce ring region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
if (!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
if (!dev_priv->is_pci) {
dev_priv->agp_textures =
drm_core_findmap(dev, init->agp_textures_offset);
if (!dev_priv->agp_textures) {
DRM_ERROR("could not find agp texture region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -EINVAL;
}
}
dev_priv->sarea_priv =
(drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
init->sarea_priv_offset);
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
drm_core_ioremap_wc(dev_priv->cce_ring, dev);
drm_core_ioremap_wc(dev_priv->ring_rptr, dev);
drm_core_ioremap_wc(dev->agp_buffer_map, dev);
if (!dev_priv->cce_ring->handle ||
!dev_priv->ring_rptr->handle ||
!dev->agp_buffer_map->handle) {
DRM_ERROR("Could not ioremap agp regions!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -ENOMEM;
}
} else
#endif
{
dev_priv->cce_ring->handle =
(void *)(unsigned long)dev_priv->cce_ring->offset;
dev_priv->ring_rptr->handle =
(void *)(unsigned long)dev_priv->ring_rptr->offset;
dev->agp_buffer_map->handle =
(void *)(unsigned long)dev->agp_buffer_map->offset;
}
#if __OS_HAS_AGP
if (!dev_priv->is_pci)
dev_priv->cce_buffers_offset = dev->agp->base;
else
#endif
dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = 128;
dev_priv->sarea_priv->last_frame = 0;
R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
dev_priv->sarea_priv->last_dispatch = 0;
R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
#if __OS_HAS_AGP
if (dev_priv->is_pci) {
#endif
dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
dev_priv->gart_info.addr = NULL;
dev_priv->gart_info.bus_addr = 0;
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
return -ENOMEM;
}
R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
#if __OS_HAS_AGP
}
#endif
r128_cce_init_ring_buffer(dev, dev_priv);
rc = r128_cce_load_microcode(dev_priv);
dev->dev_private = (void *)dev_priv;
r128_do_engine_reset(dev);
if (rc) {
DRM_ERROR("Failed to load firmware!\n");
r128_do_cleanup_cce(dev);
}
return rc;
}
int r128_do_cleanup_cce(struct drm_device *dev)
{
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev->dev_private) {
drm_r128_private_t *dev_priv = dev->dev_private;
#if __OS_HAS_AGP
if (!dev_priv->is_pci) {
if (dev_priv->cce_ring != NULL)
drm_core_ioremapfree(dev_priv->cce_ring, dev);
if (dev_priv->ring_rptr != NULL)
drm_core_ioremapfree(dev_priv->ring_rptr, dev);
if (dev->agp_buffer_map != NULL) {
drm_core_ioremapfree(dev->agp_buffer_map, dev);
dev->agp_buffer_map = NULL;
}
} else
#endif
{
if (dev_priv->gart_info.bus_addr)
if (!drm_ati_pcigart_cleanup(dev,
&dev_priv->gart_info))
DRM_ERROR
("failed to cleanup PCI GART!\n");
}
kfree(dev->dev_private);
dev->dev_private = NULL;
}
return 0;
}
int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_init_t *init = data;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
switch (init->func) {
case R128_INIT_CCE:
return r128_do_init_cce(dev, init);
case R128_CLEANUP_CCE:
return r128_do_cleanup_cce(dev);
}
return -EINVAL;
}
int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
DEV_INIT_TEST_WITH_RETURN(dev_priv);
if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
DRM_DEBUG("while CCE running\n");
return 0;
}
r128_do_cce_start(dev_priv);
return 0;
}
/* Stop the CCE. The engine must have been idled before calling this
* routine.
*/
int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_cce_stop_t *stop = data;
int ret;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
DEV_INIT_TEST_WITH_RETURN(dev_priv);
/* Flush any pending CCE commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
if (stop->flush)
r128_do_cce_flush(dev_priv);
/* If we fail to make the engine go idle, we return an error
* code so that the DRM ioctl wrapper can try again.
*/
if (stop->idle) {
ret = r128_do_cce_idle(dev_priv);
if (ret)
return ret;
}
/* Finally, we can turn off the CCE. If the engine isn't idle,
* we will get some dropped triangles as they won't be fully
* rendered before the CCE is shut down.
*/
r128_do_cce_stop(dev_priv);
/* Reset the engine */
r128_do_engine_reset(dev);
return 0;
}
/* Just reset the CCE ring. Called as part of an X Server engine reset.
*/
int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
DEV_INIT_TEST_WITH_RETURN(dev_priv);
r128_do_cce_reset(dev_priv);
/* The CCE is no longer running after an engine reset */
dev_priv->cce_running = 0;
return 0;
}
int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
DEV_INIT_TEST_WITH_RETURN(dev_priv);
if (dev_priv->cce_running)
r128_do_cce_flush(dev_priv);
return r128_do_cce_idle(dev_priv);
}
int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
DEV_INIT_TEST_WITH_RETURN(dev->dev_private);
return r128_do_engine_reset(dev);
}
int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return -EINVAL;
}
/* ================================================================
* Freelist management
*/
#define R128_BUFFER_USED 0xffffffff
#define R128_BUFFER_FREE 0
#if 0
static int r128_freelist_init(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_freelist_t *entry;
int i;
dev_priv->head = kzalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
if (dev_priv->head == NULL)
return -ENOMEM;
dev_priv->head->age = R128_BUFFER_USED;
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
entry = kmalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->age = R128_BUFFER_FREE;
entry->buf = buf;
entry->prev = dev_priv->head;
entry->next = dev_priv->head->next;
if (!entry->next)
dev_priv->tail = entry;
buf_priv->discard = 0;
buf_priv->dispatched = 0;
buf_priv->list_entry = entry;
dev_priv->head->next = entry;
if (dev_priv->head->next)
dev_priv->head->next->prev = entry;
}
return 0;
}
#endif
static struct drm_buf *r128_freelist_get(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv;
struct drm_buf *buf;
int i, t;
/* FIXME: Optimize -- use freelist code */
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if (!buf->file_priv)
return buf;
}
for (t = 0; t < dev_priv->usec_timeout; t++) {
u32 done_age = R128_READ(R128_LAST_DISPATCH_REG);
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
if (buf->pending && buf_priv->age <= done_age) {
/* The buffer has been processed, so it
* can now be used.
*/
buf->pending = 0;
return buf;
}
}
DRM_UDELAY(1);
}
DRM_DEBUG("returning NULL!\n");
return NULL;
}
void r128_freelist_reset(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i;
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
}
/* ================================================================
* CCE command submission
*/
int r128_wait_ring(drm_r128_private_t *dev_priv, int n)
{
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
int i;
for (i = 0; i < dev_priv->usec_timeout; i++) {
r128_update_ring_snapshot(dev_priv);
if (ring->space >= n)
return 0;
DRM_UDELAY(1);
}
/* FIXME: This is being ignored... */
DRM_ERROR("failed!\n");
return -EBUSY;
}
static int r128_cce_get_buffers(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_dma *d)
{
int i;
struct drm_buf *buf;
for (i = d->granted_count; i < d->request_count; i++) {
buf = r128_freelist_get(dev);
if (!buf)
return -EAGAIN;
buf->file_priv = file_priv;
if (copy_to_user(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
return -EFAULT;
if (copy_to_user(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
return -EFAULT;
d->granted_count++;
}
return 0;
}
int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int ret = 0;
struct drm_dma *d = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
DRM_CURRENTPID, d->send_count);
return -EINVAL;
}
/* We'll send you buffers.
*/
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
DRM_CURRENTPID, d->request_count, dma->buf_count);
return -EINVAL;
}
d->granted_count = 0;
if (d->request_count)
ret = r128_cce_get_buffers(dev, file_priv, d);
return ret;
}
| gpl-2.0 |
turl/zeppelin_kernel | drivers/net/phy/et1011c.c | 709 | 2829 | /*
* drivers/net/phy/et1011c.c
*
* Driver for LSI ET1011C PHYs
*
* Author: Chaithrika U S
*
* Copyright (c) 2008 Texas Instruments
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#define ET1011C_STATUS_REG (0x1A)
#define ET1011C_CONFIG_REG (0x16)
#define ET1011C_SPEED_MASK (0x0300)
#define ET1011C_GIGABIT_SPEED (0x0200)
#define ET1011C_TX_FIFO_MASK (0x3000)
#define ET1011C_TX_FIFO_DEPTH_8 (0x0000)
#define ET1011C_TX_FIFO_DEPTH_16 (0x1000)
#define ET1011C_INTERFACE_MASK (0x0007)
#define ET1011C_GMII_INTERFACE (0x0002)
#define ET1011C_SYS_CLK_EN (0x01 << 4)
MODULE_DESCRIPTION("LSI ET1011C PHY driver");
MODULE_AUTHOR("Chaithrika U S");
MODULE_LICENSE("GPL");
static int et1011c_config_aneg(struct phy_device *phydev)
{
int ctl = 0;
ctl = phy_read(phydev, MII_BMCR);
if (ctl < 0)
return ctl;
ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 |
BMCR_ANENABLE);
/* First clear the PHY */
phy_write(phydev, MII_BMCR, ctl | BMCR_RESET);
return genphy_config_aneg(phydev);
}
static int et1011c_read_status(struct phy_device *phydev)
{
int ret;
u32 val;
static int speed;
ret = genphy_read_status(phydev);
if (speed != phydev->speed) {
speed = phydev->speed;
val = phy_read(phydev, ET1011C_STATUS_REG);
if ((val & ET1011C_SPEED_MASK) ==
ET1011C_GIGABIT_SPEED) {
val = phy_read(phydev, ET1011C_CONFIG_REG);
val &= ~ET1011C_TX_FIFO_MASK;
phy_write(phydev, ET1011C_CONFIG_REG, val\
| ET1011C_GMII_INTERFACE\
| ET1011C_SYS_CLK_EN\
| ET1011C_TX_FIFO_DEPTH_16);
}
}
return ret;
}
static struct phy_driver et1011c_driver = {
.phy_id = 0x0282f014,
.name = "ET1011C",
.phy_id_mask = 0xfffffff0,
.features = (PHY_BASIC_FEATURES | SUPPORTED_1000baseT_Full),
.flags = PHY_POLL,
.config_aneg = et1011c_config_aneg,
.read_status = et1011c_read_status,
.driver = { .owner = THIS_MODULE,},
};
static int __init et1011c_init(void)
{
return phy_driver_register(&et1011c_driver);
}
static void __exit et1011c_exit(void)
{
phy_driver_unregister(&et1011c_driver);
}
module_init(et1011c_init);
module_exit(et1011c_exit);
| gpl-2.0 |
sub77/matissewifi | sound/soc/msm/qdsp6v2/q6lsm.c | 709 | 24799 | /*
* Copyright (c) 2013, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/memory_alloc.h>
#include <linux/debugfs.h>
#include <linux/time.h>
#include <linux/atomic.h>
#include <sound/apr_audio-v2.h>
#include <sound/lsm_params.h>
#include <sound/q6lsm.h>
#include <asm/ioctls.h>
#include <mach/memory.h>
#include <mach/debug_mm.h>
#include "audio_acdb.h"
#include "q6core.h"
#define APR_TIMEOUT (5 * HZ)
#define LSM_CAL_SIZE 4096
enum {
CMD_STATE_CLEARED = 0,
CMD_STATE_WAIT_RESP = 1,
};
enum {
LSM_INVALID_SESSION_ID = 0,
LSM_MIN_SESSION_ID = 1,
LSM_MAX_SESSION_ID = 8,
LSM_CONTROL_SESSION = 0x0F,
};
struct lsm_common {
void *apr;
atomic_t apr_users;
uint32_t lsm_cal_addr;
uint32_t lsm_cal_size;
uint32_t mmap_handle_for_cal;
struct lsm_client common_client;
struct mutex apr_lock;
};
static struct lsm_common lsm_common;
/*
* mmap_handle_p can point either client->sound_model.mem_map_handle or
* lsm_common.mmap_handle_for_cal.
* mmap_lock must be held while accessing this.
*/
static spinlock_t mmap_lock;
static uint32_t *mmap_handle_p;
static spinlock_t lsm_session_lock;
static struct lsm_client *lsm_session[LSM_MAX_SESSION_ID + 1];
static int q6lsm_mmapcallback(struct apr_client_data *data, void *priv);
static int q6lsm_send_cal(struct lsm_client *client);
static int q6lsm_callback(struct apr_client_data *data, void *priv)
{
struct lsm_client *client = (struct lsm_client *)priv;
uint32_t token;
uint32_t *payload;
if (!client || !data) {
WARN_ON(1);
return -EINVAL;
}
if (data->opcode == RESET_EVENTS) {
pr_debug("%s: SSR event received 0x%x, event 0x%x, proc 0x%x\n",
__func__, data->opcode, data->reset_event,
data->reset_proc);
return 0;
}
payload = data->payload;
pr_debug("%s: Session %d opcode 0x%x token 0x%x payload size %d\n",
__func__, client->session,
data->opcode, data->token, data->payload_size);
if (data->opcode == APR_BASIC_RSP_RESULT) {
token = data->token;
switch (payload[0]) {
case LSM_SESSION_CMD_START:
case LSM_SESSION_CMD_STOP:
case LSM_SESSION_CMD_SET_PARAMS:
case LSM_SESSION_CMD_OPEN_TX:
case LSM_SESSION_CMD_CLOSE_TX:
case LSM_SESSION_CMD_REGISTER_SOUND_MODEL:
case LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL:
case LSM_SESSION_CMD_SHARED_MEM_UNMAP_REGIONS:
if (token != client->session &&
payload[0] !=
LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL) {
pr_err("%s: Invalid session %d receivced expected %d\n",
__func__, token, client->session);
return -EINVAL;
}
if (atomic_cmpxchg(&client->cmd_state,
CMD_STATE_WAIT_RESP,
CMD_STATE_CLEARED) ==
CMD_STATE_WAIT_RESP)
wake_up(&client->cmd_wait);
break;
default:
pr_debug("%s: Unknown command 0x%x\n",
__func__, payload[0]);
break;
}
return 0;
}
if (client->cb)
client->cb(data->opcode, data->token, data->payload,
client->priv);
return 0;
}
static int q6lsm_session_alloc(struct lsm_client *client)
{
unsigned long flags;
int n, ret = -ENOMEM;
spin_lock_irqsave(&lsm_session_lock, flags);
for (n = LSM_MIN_SESSION_ID; n <= LSM_MAX_SESSION_ID; n++) {
if (!lsm_session[n]) {
lsm_session[n] = client;
ret = n;
break;
}
}
spin_unlock_irqrestore(&lsm_session_lock, flags);
return ret;
}
static void q6lsm_session_free(struct lsm_client *client)
{
unsigned long flags;
pr_debug("%s: Freeing session ID %d\n", __func__, client->session);
spin_lock_irqsave(&lsm_session_lock, flags);
lsm_session[client->session] = LSM_INVALID_SESSION_ID;
spin_unlock_irqrestore(&lsm_session_lock, flags);
client->session = LSM_INVALID_SESSION_ID;
}
static void *q6lsm_mmap_apr_reg(void)
{
if (atomic_inc_return(&lsm_common.apr_users) == 1) {
lsm_common.apr =
apr_register("ADSP", "LSM", q6lsm_mmapcallback,
0x0FFFFFFFF, &lsm_common);
if (!lsm_common.apr) {
pr_debug("%s Unable to register APR LSM common port\n",
__func__);
atomic_dec(&lsm_common.apr_users);
}
}
return lsm_common.apr;
}
static int q6lsm_mmap_apr_dereg(void)
{
if (atomic_read(&lsm_common.apr_users) <= 0) {
WARN("%s: APR common port already closed\n", __func__);
} else {
if (atomic_dec_return(&lsm_common.apr_users) == 0) {
apr_deregister(lsm_common.apr);
pr_debug("%s:APR De-Register common port\n", __func__);
}
}
return 0;
}
struct lsm_client *q6lsm_client_alloc(lsm_app_cb cb, void *priv)
{
struct lsm_client *client;
int n;
pr_debug("%s: enter\n", __func__);
client = kzalloc(sizeof(struct lsm_client), GFP_KERNEL);
if (!client)
return NULL;
n = q6lsm_session_alloc(client);
if (n <= 0) {
kfree(client);
return NULL;
}
pr_debug("%s: New client session %d\n", __func__, client->session);
client->session = n;
client->cb = cb;
client->priv = priv;
client->apr = apr_register("ADSP", "LSM", q6lsm_callback,
((client->session) << 8 | 0x0001), client);
if (client->apr == NULL) {
pr_err("%s: Registration with APR failed\n", __func__);
goto fail;
}
pr_debug("%s Registering the common port with APR\n", __func__);
client->mmap_apr = q6lsm_mmap_apr_reg();
if (!client->mmap_apr) {
pr_err("%s: APR registration failed\n", __func__);
goto fail;
}
init_waitqueue_head(&client->cmd_wait);
mutex_init(&client->cmd_lock);
atomic_set(&client->cmd_state, CMD_STATE_CLEARED);
pr_debug("%s: New client allocated\n", __func__);
return client;
fail:
q6lsm_client_free(client);
return NULL;
}
void q6lsm_client_free(struct lsm_client *client)
{
if (!client || !client->session)
return;
apr_deregister(client->apr);
client->mmap_apr = NULL;
q6lsm_session_free(client);
q6lsm_mmap_apr_dereg();
mutex_destroy(&client->cmd_lock);
kfree(client);
}
/*
* q6lsm_apr_send_pkt : If wait == true, hold mutex to prevent from preempting
* other thread's wait.
* If mmap_handle_p != NULL, disable irq and spin lock to
* protect mmap_handle_p
*/
static int q6lsm_apr_send_pkt(struct lsm_client *client, void *handle,
void *data, bool wait, uint32_t *mmap_p)
{
int ret;
unsigned long flags = 0;
pr_debug("%s: enter wait %d\n", __func__, wait);
if (wait)
mutex_lock(&lsm_common.apr_lock);
if (mmap_p) {
WARN_ON(!wait);
spin_lock_irqsave(&mmap_lock, flags);
mmap_handle_p = mmap_p;
}
atomic_set(&client->cmd_state, CMD_STATE_WAIT_RESP);
ret = apr_send_pkt(handle, data);
if (mmap_p)
spin_unlock_irqrestore(&mmap_lock, flags);
if (ret < 0) {
pr_err("%s: apr_send_pkt failed %d\n", __func__, ret);
} else if (wait) {
ret = wait_event_timeout(client->cmd_wait,
(atomic_read(&client->cmd_state) ==
CMD_STATE_CLEARED),
APR_TIMEOUT);
if (likely(ret))
ret = 0;
else
pr_err("%s: wait timedout\n", __func__);
} else {
ret = 0;
}
if (wait)
mutex_unlock(&lsm_common.apr_lock);
pr_debug("%s: leave ret %d\n", __func__, ret);
return ret;
}
static void q6lsm_add_hdr(struct lsm_client *client, struct apr_hdr *hdr,
uint32_t pkt_size, bool cmd_flg)
{
pr_debug("%s: pkt_size %d cmd_flg %d session %d\n", __func__,
pkt_size, cmd_flg, client->session);
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(sizeof(struct apr_hdr)),
APR_PKT_VER);
hdr->src_svc = APR_SVC_LSM;
hdr->src_domain = APR_DOMAIN_APPS;
hdr->dest_svc = APR_SVC_LSM;
hdr->dest_domain = APR_DOMAIN_ADSP;
hdr->src_port = ((client->session << 8) & 0xFF00) | 0x01;
hdr->dest_port = ((client->session << 8) & 0xFF00) | 0x01;
hdr->pkt_size = pkt_size;
if (cmd_flg)
hdr->token = client->session;
}
int q6lsm_open(struct lsm_client *client)
{
int rc;
struct lsm_stream_cmd_open_tx open;
memset(&open, 0, sizeof(open));
q6lsm_add_hdr(client, &open.hdr, sizeof(open), true);
open.hdr.opcode = LSM_SESSION_CMD_OPEN_TX;
open.app_id = 1;
open.sampling_rate = 16000;
rc = q6lsm_apr_send_pkt(client, client->apr, &open, true, NULL);
if (rc)
pr_err("%s: Open failed opcode 0x%x, rc %d\n",
__func__, open.hdr.opcode, rc);
return rc;
}
static int q6lsm_set_params(struct lsm_client *client)
{
int rc;
struct lsm_cmd_set_params params;
struct lsm_params_payload *payload = ¶ms.payload;
pr_debug("%s: enter\n", __func__);
q6lsm_add_hdr(client, ¶ms.hdr, sizeof(params), true);
params.hdr.opcode = LSM_SESSION_CMD_SET_PARAMS;
params.data_payload_addr_lsw = 0;
params.data_payload_addr_msw = 0;
params.mem_map_handle = 0;
params.data_payload_size = sizeof(struct lsm_params_payload);
payload->op_mode.common.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
payload->op_mode.common.param_id = LSM_PARAM_ID_OPERATION_MODE;
payload->op_mode.common.param_size =
sizeof(struct lsm_param_op_mode) - sizeof(payload->op_mode.common);
payload->op_mode.common.reserved = 0;
payload->op_mode.minor_version = 1;
payload->op_mode.mode = client->mode;
payload->op_mode.reserved = 0;
payload->connect_to_port.common.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
payload->connect_to_port.common.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
payload->connect_to_port.common.param_size =
sizeof(payload->connect_to_port) - sizeof(payload->op_mode.common);
payload->connect_to_port.common.reserved = 0;
payload->connect_to_port.minor_version = 1;
payload->connect_to_port.port_id = client->connect_to_port;
payload->connect_to_port.reserved = 0;
payload->kwds.common.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
payload->kwds.common.param_id = LSM_PARAM_ID_KEYWORD_DETECT_SENSITIVITY;
payload->kwds.common.param_size =
sizeof(payload->kwds) - sizeof(payload->op_mode.common);
payload->kwds.common.reserved = 0;
payload->kwds.minor_version = 1;
payload->kwds.keyword_sensitivity = client->kw_sensitivity;
payload->kwds.reserved = 0;
payload->uds.common.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
payload->uds.common.param_id = LSM_PARAM_ID_USER_DETECT_SENSITIVITY;
payload->uds.common.param_size =
sizeof(payload->uds) - sizeof(payload->op_mode.common);
payload->uds.common.reserved = 0;
payload->uds.minor_version = 1;
payload->uds.user_sensitivity = client->user_sensitivity;
payload->uds.reserved = 0;
rc = q6lsm_apr_send_pkt(client, client->apr, ¶ms, true, NULL);
if (rc)
pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
__func__, params.hdr.opcode, rc);
pr_debug("%s: leave %d\n", __func__, rc);
return rc;
}
int q6lsm_register_sound_model(struct lsm_client *client,
enum lsm_detection_mode mode, u16 minkeyword,
u16 minuser, bool detectfailure)
{
int rc;
struct lsm_cmd_reg_snd_model cmd;
memset(&cmd, 0, sizeof(cmd));
if (mode == LSM_MODE_KEYWORD_ONLY_DETECTION) {
client->mode = 0x01;
} else if (mode == LSM_MODE_USER_KEYWORD_DETECTION) {
client->mode = 0x03;
} else {
pr_err("%s: Incorrect detection mode %d\n", __func__, mode);
return -EINVAL;
}
client->mode |= detectfailure << 2;
client->kw_sensitivity = minkeyword;
client->user_sensitivity = minuser;
client->connect_to_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX;
rc = q6lsm_set_params(client);
if (rc < 0) {
pr_err("%s: Failed to set lsm config params\n", __func__);
return rc;
}
rc = q6lsm_send_cal(client);
if (rc < 0) {
pr_err("%s: Failed to send calibration data\n", __func__);
return rc;
}
q6lsm_add_hdr(client, &cmd.hdr, sizeof(cmd), true);
cmd.hdr.opcode = LSM_SESSION_CMD_REGISTER_SOUND_MODEL;
cmd.model_addr_lsw = client->sound_model.phys;
cmd.model_addr_msw = 0;
cmd.model_size = client->sound_model.size;
/* read updated mem_map_handle by q6lsm_mmapcallback */
rmb();
cmd.mem_map_handle = client->sound_model.mem_map_handle;
pr_debug("%s: lsw %x, size %d, handle %x\n", __func__,
cmd.model_addr_lsw, cmd.model_size, cmd.mem_map_handle);
rc = q6lsm_apr_send_pkt(client, client->apr, &cmd, true, NULL);
if (rc)
pr_err("%s: Failed cmd op[0x%x]rc[%d]\n", __func__,
cmd.hdr.opcode, rc);
else
pr_debug("%s: Register sound model succeeded\n", __func__);
return rc;
}
int q6lsm_deregister_sound_model(struct lsm_client *client)
{
int rc;
struct lsm_cmd_reg_snd_model cmd;
if (!client || !client->apr) {
pr_err("APR handle NULL\n");
return -EINVAL;
}
pr_debug("%s: session[%d]", __func__, client->session);
memset(&cmd, 0, sizeof(cmd));
q6lsm_add_hdr(client, &cmd.hdr, sizeof(cmd.hdr), false);
cmd.hdr.opcode = LSM_SESSION_CMD_DEREGISTER_SOUND_MODEL;
rc = q6lsm_apr_send_pkt(client, client->apr, &cmd.hdr, true, NULL);
if (rc < 0) {
pr_err("%s: Failed cmd opcode 0x%x, rc %d\n", __func__,
cmd.hdr.opcode, rc);
} else {
pr_debug("%s: Deregister sound model succeeded\n", __func__);
}
q6lsm_snd_model_buf_free(client);
return rc;
}
static void q6lsm_add_mmaphdr(struct lsm_client *client, struct apr_hdr *hdr,
u32 pkt_size, u32 cmd_flg, u32 token)
{
pr_debug("%s:pkt size=%d cmd_flg=%d session=%d\n", __func__, pkt_size,
cmd_flg, client->session);
hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
hdr->src_port = 0x00;
hdr->dest_port = client->session;
if (cmd_flg)
hdr->token = token;
hdr->pkt_size = pkt_size;
return;
}
static int q6lsm_memory_map_regions(struct lsm_client *client,
uint32_t dma_addr_p, uint32_t dma_buf_sz,
uint32_t *mmap_p)
{
struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
struct avs_shared_map_region_payload *mregions = NULL;
void *mmap_region_cmd = NULL;
void *payload = NULL;
int rc;
int cmd_size = 0;
pr_debug("%s: dma_addr_p 0x%x, dma_buf_sz %d, mmap_p 0x%p, session %d\n",
__func__, dma_addr_p, dma_buf_sz, mmap_p, client->session);
cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions) +
sizeof(struct avs_shared_map_region_payload);
mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
if (!mmap_region_cmd)
return -ENOMEM;
mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
q6lsm_add_mmaphdr(client, &mmap_regions->hdr, cmd_size, true,
(client->session << 8));
mmap_regions->hdr.opcode = LSM_SESSION_CMD_SHARED_MEM_MAP_REGIONS;
mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL;
mmap_regions->num_regions = 1;
mmap_regions->property_flag = 0x00;
payload = ((u8 *)mmap_region_cmd +
sizeof(struct avs_cmd_shared_mem_map_regions));
mregions = (struct avs_shared_map_region_payload *)payload;
mregions->shm_addr_lsw = dma_addr_p;
mregions->shm_addr_msw = 0;
mregions->mem_size_bytes = dma_buf_sz;
rc = q6lsm_apr_send_pkt(client, client->mmap_apr, mmap_region_cmd,
true, mmap_p);
if (rc)
pr_err("%s: Failed mmap_regions opcode 0x%x, rc %d\n",
__func__, mmap_regions->hdr.opcode, rc);
pr_debug("%s: leave %d\n", __func__, rc);
kfree(mmap_region_cmd);
return rc;
}
static int q6lsm_memory_unmap_regions(struct lsm_client *client,
uint32_t handle)
{
struct avs_cmd_shared_mem_unmap_regions unmap;
int rc = 0;
int cmd_size = 0;
cmd_size = sizeof(struct avs_cmd_shared_mem_unmap_regions);
q6lsm_add_mmaphdr(client, &unmap.hdr, cmd_size,
true, (client->session << 8));
unmap.hdr.opcode = LSM_SESSION_CMD_SHARED_MEM_UNMAP_REGIONS;
unmap.mem_map_handle = handle;
pr_debug("%s: unmap handle 0x%x\n", __func__, unmap.mem_map_handle);
rc = q6lsm_apr_send_pkt(client, client->mmap_apr, &unmap, true,
NULL);
if (rc)
pr_err("%s: Failed mmap_regions opcode 0x%x rc %d\n",
__func__, unmap.hdr.opcode, rc);
return rc;
}
static int q6lsm_send_cal(struct lsm_client *client)
{
int rc;
struct lsm_cmd_set_params params;
struct acdb_cal_block lsm_cal;
pr_debug("%s: enter\n", __func__);
memset(&lsm_cal, 0, sizeof(lsm_cal));
get_lsm_cal(&lsm_cal);
if (!lsm_cal.cal_size) {
pr_err("%s: Could not get LSM calibration data\n", __func__);
rc = -EINVAL;
goto bail;
}
/* Cache mmap address, only map once or if new addr */
if ((lsm_common.lsm_cal_addr != lsm_cal.cal_paddr) ||
(lsm_cal.cal_size > lsm_common.lsm_cal_size)) {
if (lsm_common.lsm_cal_addr != 0) {
rc = q6lsm_memory_unmap_regions(client,
lsm_common.mmap_handle_for_cal);
if (rc)
pr_warn("%s: Unmapping %x failed, %d\n",
__func__, lsm_common.lsm_cal_addr, rc);
}
rc = q6lsm_memory_map_regions(client, lsm_cal.cal_paddr,
LSM_CAL_SIZE,
&lsm_common.mmap_handle_for_cal);
if (rc < 0) {
pr_err("%s: Calibration data memory map failed, %d\n",
__func__, rc);
goto bail;
}
lsm_common.lsm_cal_addr = lsm_cal.cal_paddr;
lsm_common.lsm_cal_size = LSM_CAL_SIZE;
lsm_common.common_client.session = client->session;
}
q6lsm_add_hdr(client, ¶ms.hdr, sizeof(params), true);
params.hdr.opcode = LSM_SESSION_CMD_SET_PARAMS;
params.data_payload_addr_lsw = lsm_cal.cal_paddr;
params.data_payload_addr_msw = 0;
/* read updated mem_map_handle by q6lsm_mmapcallback */
rmb();
params.mem_map_handle = lsm_common.mmap_handle_for_cal;
params.data_payload_size = lsm_cal.cal_size;
rc = q6lsm_apr_send_pkt(client, client->apr, ¶ms, true, NULL);
if (rc)
pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
__func__, params.hdr.opcode, rc);
bail:
return rc;
}
int q6lsm_unmap_cal_blocks(void)
{
int result = 0;
int result2 = 0;
if (lsm_common.mmap_handle_for_cal == 0)
goto done;
if (lsm_common.common_client.mmap_apr == NULL) {
lsm_common.common_client.mmap_apr = q6lsm_mmap_apr_reg();
if (lsm_common.common_client.mmap_apr == NULL) {
pr_err("%s: q6lsm_mmap_apr_reg failed\n",
__func__);
result = -EPERM;
goto done;
}
}
result2 = q6lsm_memory_unmap_regions(
&lsm_common.common_client,
lsm_common.mmap_handle_for_cal);
if (result2 < 0) {
pr_err("%s: unmap failed, err %d\n",
__func__, result2);
result = result2;
} else {
lsm_common.mmap_handle_for_cal = 0;
}
result2 = q6lsm_mmap_apr_dereg();
if (result2 < 0) {
pr_err("%s: q6lsm_mmap_apr_dereg failed, err %d\n",
__func__, result2);
result = result2;
} else {
lsm_common.common_client.mmap_apr = NULL;
}
lsm_common.lsm_cal_addr = 0;
lsm_common.lsm_cal_size = 0;
done:
return result;
}
int q6lsm_snd_model_buf_free(struct lsm_client *client)
{
int rc;
pr_debug("%s: Session id %d\n", __func__, client->session);
mutex_lock(&client->cmd_lock);
rc = q6lsm_memory_unmap_regions(client,
client->sound_model.mem_map_handle);
if (rc < 0)
pr_err("%s CMD Memory_unmap_regions failed\n", __func__);
if (client->sound_model.data) {
ion_unmap_kernel(client->sound_model.client,
client->sound_model.handle);
ion_free(client->sound_model.client,
client->sound_model.handle);
ion_client_destroy(client->sound_model.client);
client->sound_model.data = NULL;
client->sound_model.phys = 0;
}
mutex_unlock(&client->cmd_lock);
return rc;
}
static struct lsm_client *q6lsm_get_lsm_client(int session_id)
{
unsigned long flags;
struct lsm_client *client = NULL;
if (session_id == LSM_CONTROL_SESSION) {
client = &lsm_common.common_client;
goto done;
}
spin_lock_irqsave(&lsm_session_lock, flags);
if (session_id < LSM_MIN_SESSION_ID || session_id > LSM_MAX_SESSION_ID)
pr_err("%s: Invalid session %d\n", __func__, session_id);
else if (!lsm_session[session_id])
pr_err("%s: Not an active session %d\n", __func__, session_id);
else
client = lsm_session[session_id];
spin_unlock_irqrestore(&lsm_session_lock, flags);
done:
return client;
}
/*
* q6lsm_mmapcallback : atomic context
*/
static int q6lsm_mmapcallback(struct apr_client_data *data, void *priv)
{
unsigned long flags;
uint32_t command;
uint32_t retcode;
uint32_t sid;
const uint32_t *payload = data->payload;
struct lsm_client *client = NULL;
if (data->opcode == RESET_EVENTS) {
pr_debug("%s: SSR event received 0x%x, event 0x%x, proc 0x%x\n",
__func__, data->opcode, data->reset_event,
data->reset_proc);
lsm_common.lsm_cal_addr = 0;
return 0;
}
command = payload[0];
retcode = payload[1];
pr_debug("%s: opcode 0x%x command 0x%x return code 0x%x\n", __func__,
data->opcode, command, retcode);
sid = (data->token >> 8) & 0x0F;
client = q6lsm_get_lsm_client(sid);
if (!client) {
pr_debug("%s: Session %d already freed\n", __func__, sid);
return 0;
}
switch (data->opcode) {
case LSM_SESSION_CMDRSP_SHARED_MEM_MAP_REGIONS:
if (atomic_read(&client->cmd_state) == CMD_STATE_WAIT_RESP) {
spin_lock_irqsave(&mmap_lock, flags);
*mmap_handle_p = command;
/* spin_unlock_irqrestore implies barrier */
spin_unlock_irqrestore(&mmap_lock, flags);
atomic_set(&client->cmd_state, CMD_STATE_CLEARED);
wake_up(&client->cmd_wait);
}
break;
case APR_BASIC_RSP_RESULT:
if (command == LSM_SESSION_CMD_SHARED_MEM_UNMAP_REGIONS) {
atomic_set(&client->cmd_state, CMD_STATE_CLEARED);
wake_up(&client->cmd_wait);
} else {
pr_warn("%s: Unexpected command 0x%x\n", __func__,
command);
}
break;
default:
pr_debug("%s: command 0x%x return code 0x%x\n",
__func__, command, retcode);
break;
}
if (client->cb)
client->cb(data->opcode, data->token,
data->payload, client->priv);
return 0;
}
int q6lsm_snd_model_buf_alloc(struct lsm_client *client, uint32_t len)
{
int rc = -EINVAL;
if (!client)
return rc;
mutex_lock(&client->cmd_lock);
if (!client->sound_model.data) {
client->sound_model.client =
msm_ion_client_create(UINT_MAX, "lsm_client");
if (IS_ERR_OR_NULL(client->sound_model.client)) {
pr_err("%s: ION create client for AUDIO failed\n",
__func__);
goto fail;
}
client->sound_model.handle =
ion_alloc(client->sound_model.client,
len, SZ_4K, (0x1 << ION_AUDIO_HEAP_ID), 0);
if (IS_ERR_OR_NULL(client->sound_model.handle)) {
pr_err("%s: ION memory allocation for AUDIO failed\n",
__func__);
goto fail;
}
rc = ion_phys(client->sound_model.client,
client->sound_model.handle,
(ion_phys_addr_t *)&client->sound_model.phys,
(size_t *)&len);
if (rc) {
pr_err("%s: ION get physical mem failed, rc%d\n",
__func__, rc);
goto fail;
}
client->sound_model.data =
ion_map_kernel(client->sound_model.client,
client->sound_model.handle);
if (IS_ERR_OR_NULL(client->sound_model.data)) {
pr_err("%s: ION memory mapping failed\n", __func__);
goto fail;
}
memset(client->sound_model.data, 0, len);
client->sound_model.size = len;
} else {
rc = -EBUSY;
goto fail;
}
mutex_unlock(&client->cmd_lock);
rc = q6lsm_memory_map_regions(client, client->sound_model.phys,
client->sound_model.size,
&client->sound_model.mem_map_handle);
if (rc < 0) {
pr_err("%s:CMD Memory_map_regions failed\n", __func__);
goto exit;
}
return 0;
fail:
mutex_unlock(&client->cmd_lock);
exit:
q6lsm_snd_model_buf_free(client);
return rc;
}
static int q6lsm_cmd(struct lsm_client *client, int opcode, bool wait)
{
struct apr_hdr hdr;
int rc;
pr_debug("%s: enter opcode %d wait %d\n", __func__, opcode, wait);
q6lsm_add_hdr(client, &hdr, sizeof(hdr), true);
switch (opcode) {
case LSM_SESSION_CMD_START:
case LSM_SESSION_CMD_STOP:
case LSM_SESSION_CMD_CLOSE_TX:
hdr.opcode = opcode;
break;
default:
pr_err("%s: Invalid opcode %d\n", __func__, opcode);
return -EINVAL;
}
rc = q6lsm_apr_send_pkt(client, client->apr, &hdr, wait, NULL);
if (rc)
pr_err("%s: Failed commmand 0x%x\n", __func__, hdr.opcode);
pr_debug("%s: leave %d\n", __func__, rc);
return rc;
}
int q6lsm_start(struct lsm_client *client, bool wait)
{
return q6lsm_cmd(client, LSM_SESSION_CMD_START, wait);
}
int q6lsm_stop(struct lsm_client *client, bool wait)
{
return q6lsm_cmd(client, LSM_SESSION_CMD_STOP, wait);
}
int q6lsm_close(struct lsm_client *client)
{
return q6lsm_cmd(client, LSM_SESSION_CMD_CLOSE_TX, true);
}
static int __init q6lsm_init(void)
{
pr_debug("%s\n", __func__);
spin_lock_init(&lsm_session_lock);
spin_lock_init(&mmap_lock);
mutex_init(&lsm_common.apr_lock);
lsm_common.common_client.session = LSM_CONTROL_SESSION;
init_waitqueue_head(&lsm_common.common_client.cmd_wait);
mutex_init(&lsm_common.common_client.cmd_lock);
atomic_set(&lsm_common.common_client.cmd_state, CMD_STATE_CLEARED);
return 0;
}
device_initcall(q6lsm_init);
| gpl-2.0 |
senlan008/linux-1 | sound/isa/sb/sb16.c | 1221 | 20226 | /*
* Driver for SoundBlaster 16/AWE32/AWE64 soundcards
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/dma.h>
#include <linux/init.h>
#include <linux/pnp.h>
#include <linux/err.h>
#include <linux/isa.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/sb.h>
#include <sound/sb16_csp.h>
#include <sound/mpu401.h>
#include <sound/opl3.h>
#include <sound/emu8000.h>
#include <sound/seq_device.h>
#define SNDRV_LEGACY_FIND_FREE_IRQ
#define SNDRV_LEGACY_FIND_FREE_DMA
#include <sound/initval.h>
#ifdef SNDRV_SBAWE
#define PFX "sbawe: "
#else
#define PFX "sb16: "
#endif
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_LICENSE("GPL");
#ifndef SNDRV_SBAWE
MODULE_DESCRIPTION("Sound Blaster 16");
MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB 16},"
"{Creative Labs,SB Vibra16S},"
"{Creative Labs,SB Vibra16C},"
"{Creative Labs,SB Vibra16CL},"
"{Creative Labs,SB Vibra16X}}");
#else
MODULE_DESCRIPTION("Sound Blaster AWE");
MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB AWE 32},"
"{Creative Labs,SB AWE 64},"
"{Creative Labs,SB AWE 64 Gold}}");
#endif
#if 0
#define SNDRV_DEBUG_IRQ
#endif
#if defined(SNDRV_SBAWE) && (defined(CONFIG_SND_SEQUENCER) || (defined(MODULE) && defined(CONFIG_SND_SEQUENCER_MODULE)))
#define SNDRV_SBAWE_EMU8000
#endif
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */
#ifdef CONFIG_PNP
static bool isapnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
#endif
static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x240,0x260,0x280 */
static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x330,0x300 */
static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
#ifdef SNDRV_SBAWE_EMU8000
static long awe_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
#endif
static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 5,7,9,10 */
static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3 */
static int dma16[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 5,6,7 */
static int mic_agc[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
#ifdef CONFIG_SND_SB16_CSP
static int csp[SNDRV_CARDS];
#endif
#ifdef SNDRV_SBAWE_EMU8000
static int seq_ports[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 4};
#endif
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for SoundBlaster 16 soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for SoundBlaster 16 soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable SoundBlaster 16 soundcard.");
#ifdef CONFIG_PNP
module_param_array(isapnp, bool, NULL, 0444);
MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard.");
#endif
module_param_array(port, long, NULL, 0444);
MODULE_PARM_DESC(port, "Port # for SB16 driver.");
module_param_array(mpu_port, long, NULL, 0444);
MODULE_PARM_DESC(mpu_port, "MPU-401 port # for SB16 driver.");
module_param_array(fm_port, long, NULL, 0444);
MODULE_PARM_DESC(fm_port, "FM port # for SB16 PnP driver.");
#ifdef SNDRV_SBAWE_EMU8000
module_param_array(awe_port, long, NULL, 0444);
MODULE_PARM_DESC(awe_port, "AWE port # for SB16 PnP driver.");
#endif
module_param_array(irq, int, NULL, 0444);
MODULE_PARM_DESC(irq, "IRQ # for SB16 driver.");
module_param_array(dma8, int, NULL, 0444);
MODULE_PARM_DESC(dma8, "8-bit DMA # for SB16 driver.");
module_param_array(dma16, int, NULL, 0444);
MODULE_PARM_DESC(dma16, "16-bit DMA # for SB16 driver.");
module_param_array(mic_agc, int, NULL, 0444);
MODULE_PARM_DESC(mic_agc, "Mic Auto-Gain-Control switch.");
#ifdef CONFIG_SND_SB16_CSP
module_param_array(csp, int, NULL, 0444);
MODULE_PARM_DESC(csp, "ASP/CSP chip support.");
#endif
#ifdef SNDRV_SBAWE_EMU8000
module_param_array(seq_ports, int, NULL, 0444);
MODULE_PARM_DESC(seq_ports, "Number of sequencer ports for WaveTable synth.");
#endif
#ifdef CONFIG_PNP
static int isa_registered;
static int pnp_registered;
#endif
struct snd_card_sb16 {
struct resource *fm_res; /* used to block FM i/o region for legacy cards */
struct snd_sb *chip;
#ifdef CONFIG_PNP
int dev_no;
struct pnp_dev *dev;
#ifdef SNDRV_SBAWE_EMU8000
struct pnp_dev *devwt;
#endif
#endif
};
#ifdef CONFIG_PNP
static struct pnp_card_device_id snd_sb16_pnpids[] = {
#ifndef SNDRV_SBAWE
/* Sound Blaster 16 PnP */
{ .id = "CTL0024", .devs = { { "CTL0031" } } },
/* Sound Blaster 16 PnP */
{ .id = "CTL0025", .devs = { { "CTL0031" } } },
/* Sound Blaster 16 PnP */
{ .id = "CTL0026", .devs = { { "CTL0031" } } },
/* Sound Blaster 16 PnP */
{ .id = "CTL0027", .devs = { { "CTL0031" } } },
/* Sound Blaster 16 PnP */
{ .id = "CTL0028", .devs = { { "CTL0031" } } },
/* Sound Blaster 16 PnP */
{ .id = "CTL0029", .devs = { { "CTL0031" } } },
/* Sound Blaster 16 PnP */
{ .id = "CTL002a", .devs = { { "CTL0031" } } },
/* Sound Blaster 16 PnP */
/* Note: This card has also a CTL0051:StereoEnhance device!!! */
{ .id = "CTL002b", .devs = { { "CTL0031" } } },
/* Sound Blaster 16 PnP */
{ .id = "CTL002c", .devs = { { "CTL0031" } } },
/* Sound Blaster Vibra16S */
{ .id = "CTL0051", .devs = { { "CTL0001" } } },
/* Sound Blaster Vibra16C */
{ .id = "CTL0070", .devs = { { "CTL0001" } } },
/* Sound Blaster Vibra16CL - added by ctm@ardi.com */
{ .id = "CTL0080", .devs = { { "CTL0041" } } },
/* Sound Blaster 16 'value' PnP. It says model ct4130 on the pcb, */
/* but ct4131 on a sticker on the board.. */
{ .id = "CTL0086", .devs = { { "CTL0041" } } },
/* Sound Blaster Vibra16X */
{ .id = "CTL00f0", .devs = { { "CTL0043" } } },
/* Sound Blaster 16 (Virtual PC 2004) */
{ .id = "tBA03b0", .devs = { {.id="PNPb003" } } },
#else /* SNDRV_SBAWE defined */
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL0035", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL0039", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL0042", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL0043", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
/* Note: This card has also a CTL0051:StereoEnhance device!!! */
{ .id = "CTL0044", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
/* Note: This card has also a CTL0051:StereoEnhance device!!! */
{ .id = "CTL0045", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL0046", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL0047", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL0048", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL0054", .devs = { { "CTL0031" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL009a", .devs = { { "CTL0041" }, { "CTL0021" } } },
/* Sound Blaster AWE 32 PnP */
{ .id = "CTL009c", .devs = { { "CTL0041" }, { "CTL0021" } } },
/* Sound Blaster 32 PnP */
{ .id = "CTL009f", .devs = { { "CTL0041" }, { "CTL0021" } } },
/* Sound Blaster AWE 64 PnP */
{ .id = "CTL009d", .devs = { { "CTL0042" }, { "CTL0022" } } },
/* Sound Blaster AWE 64 PnP Gold */
{ .id = "CTL009e", .devs = { { "CTL0044" }, { "CTL0023" } } },
/* Sound Blaster AWE 64 PnP Gold */
{ .id = "CTL00b2", .devs = { { "CTL0044" }, { "CTL0023" } } },
/* Sound Blaster AWE 64 PnP */
{ .id = "CTL00c1", .devs = { { "CTL0042" }, { "CTL0022" } } },
/* Sound Blaster AWE 64 PnP */
{ .id = "CTL00c3", .devs = { { "CTL0045" }, { "CTL0022" } } },
/* Sound Blaster AWE 64 PnP */
{ .id = "CTL00c5", .devs = { { "CTL0045" }, { "CTL0022" } } },
/* Sound Blaster AWE 64 PnP */
{ .id = "CTL00c7", .devs = { { "CTL0045" }, { "CTL0022" } } },
/* Sound Blaster AWE 64 PnP */
{ .id = "CTL00e4", .devs = { { "CTL0045" }, { "CTL0022" } } },
/* Sound Blaster AWE 64 PnP */
{ .id = "CTL00e9", .devs = { { "CTL0045" }, { "CTL0022" } } },
/* Sound Blaster 16 PnP (AWE) */
{ .id = "CTL00ed", .devs = { { "CTL0041" }, { "CTL0070" } } },
/* Generic entries */
{ .id = "CTLXXXX" , .devs = { { "CTL0031" }, { "CTL0021" } } },
{ .id = "CTLXXXX" , .devs = { { "CTL0041" }, { "CTL0021" } } },
{ .id = "CTLXXXX" , .devs = { { "CTL0042" }, { "CTL0022" } } },
{ .id = "CTLXXXX" , .devs = { { "CTL0044" }, { "CTL0023" } } },
{ .id = "CTLXXXX" , .devs = { { "CTL0045" }, { "CTL0022" } } },
#endif /* SNDRV_SBAWE */
{ .id = "", }
};
MODULE_DEVICE_TABLE(pnp_card, snd_sb16_pnpids);
#endif /* CONFIG_PNP */
#ifdef SNDRV_SBAWE_EMU8000
#define DRIVER_NAME "snd-card-sbawe"
#else
#define DRIVER_NAME "snd-card-sb16"
#endif
#ifdef CONFIG_PNP
static int snd_card_sb16_pnp(int dev, struct snd_card_sb16 *acard,
struct pnp_card_link *card,
const struct pnp_card_device_id *id)
{
struct pnp_dev *pdev;
int err;
acard->dev = pnp_request_card_device(card, id->devs[0].id, NULL);
if (acard->dev == NULL)
return -ENODEV;
#ifdef SNDRV_SBAWE_EMU8000
acard->devwt = pnp_request_card_device(card, id->devs[1].id, acard->dev);
#endif
/* Audio initialization */
pdev = acard->dev;
err = pnp_activate_dev(pdev);
if (err < 0) {
snd_printk(KERN_ERR PFX "AUDIO pnp configure failure\n");
return err;
}
port[dev] = pnp_port_start(pdev, 0);
mpu_port[dev] = pnp_port_start(pdev, 1);
fm_port[dev] = pnp_port_start(pdev, 2);
dma8[dev] = pnp_dma(pdev, 0);
dma16[dev] = pnp_dma(pdev, 1);
irq[dev] = pnp_irq(pdev, 0);
snd_printdd("pnp SB16: port=0x%lx, mpu port=0x%lx, fm port=0x%lx\n",
port[dev], mpu_port[dev], fm_port[dev]);
snd_printdd("pnp SB16: dma1=%i, dma2=%i, irq=%i\n",
dma8[dev], dma16[dev], irq[dev]);
#ifdef SNDRV_SBAWE_EMU8000
/* WaveTable initialization */
pdev = acard->devwt;
if (pdev != NULL) {
err = pnp_activate_dev(pdev);
if (err < 0) {
goto __wt_error;
}
awe_port[dev] = pnp_port_start(pdev, 0);
snd_printdd("pnp SB16: wavetable port=0x%llx\n",
(unsigned long long)pnp_port_start(pdev, 0));
} else {
__wt_error:
if (pdev) {
pnp_release_card_device(pdev);
snd_printk(KERN_ERR PFX "WaveTable pnp configure failure\n");
}
acard->devwt = NULL;
awe_port[dev] = -1;
}
#endif
return 0;
}
#endif /* CONFIG_PNP */
static void snd_sb16_free(struct snd_card *card)
{
struct snd_card_sb16 *acard = card->private_data;
if (acard == NULL)
return;
release_and_free_resource(acard->fm_res);
}
#ifdef CONFIG_PNP
#define is_isapnp_selected(dev) isapnp[dev]
#else
#define is_isapnp_selected(dev) 0
#endif
static int snd_sb16_card_new(struct device *devptr, int dev,
struct snd_card **cardp)
{
struct snd_card *card;
int err;
err = snd_card_new(devptr, index[dev], id[dev], THIS_MODULE,
sizeof(struct snd_card_sb16), &card);
if (err < 0)
return err;
card->private_free = snd_sb16_free;
*cardp = card;
return 0;
}
static int snd_sb16_probe(struct snd_card *card, int dev)
{
int xirq, xdma8, xdma16;
struct snd_sb *chip;
struct snd_card_sb16 *acard = card->private_data;
struct snd_opl3 *opl3;
struct snd_hwdep *synth = NULL;
#ifdef CONFIG_SND_SB16_CSP
struct snd_hwdep *xcsp = NULL;
#endif
unsigned long flags;
int err;
xirq = irq[dev];
xdma8 = dma8[dev];
xdma16 = dma16[dev];
if ((err = snd_sbdsp_create(card,
port[dev],
xirq,
snd_sb16dsp_interrupt,
xdma8,
xdma16,
SB_HW_AUTO,
&chip)) < 0)
return err;
acard->chip = chip;
if (chip->hardware != SB_HW_16) {
snd_printk(KERN_ERR PFX "SB 16 chip was not detected at 0x%lx\n", port[dev]);
return -ENODEV;
}
chip->mpu_port = mpu_port[dev];
if (! is_isapnp_selected(dev) && (err = snd_sb16dsp_configure(chip)) < 0)
return err;
if ((err = snd_sb16dsp_pcm(chip, 0)) < 0)
return err;
strcpy(card->driver,
#ifdef SNDRV_SBAWE_EMU8000
awe_port[dev] > 0 ? "SB AWE" :
#endif
"SB16");
strcpy(card->shortname, chip->name);
sprintf(card->longname, "%s at 0x%lx, irq %i, dma ",
chip->name,
chip->port,
xirq);
if (xdma8 >= 0)
sprintf(card->longname + strlen(card->longname), "%d", xdma8);
if (xdma16 >= 0)
sprintf(card->longname + strlen(card->longname), "%s%d",
xdma8 >= 0 ? "&" : "", xdma16);
if (chip->mpu_port > 0 && chip->mpu_port != SNDRV_AUTO_PORT) {
if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_SB,
chip->mpu_port,
MPU401_INFO_IRQ_HOOK, -1,
&chip->rmidi)) < 0)
return err;
chip->rmidi_callback = snd_mpu401_uart_interrupt;
}
#ifdef SNDRV_SBAWE_EMU8000
if (awe_port[dev] == SNDRV_AUTO_PORT)
awe_port[dev] = 0; /* disable */
#endif
if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) {
if (snd_opl3_create(card, fm_port[dev], fm_port[dev] + 2,
OPL3_HW_OPL3,
acard->fm_res != NULL || fm_port[dev] == port[dev],
&opl3) < 0) {
snd_printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx\n",
fm_port[dev], fm_port[dev] + 2);
} else {
#ifdef SNDRV_SBAWE_EMU8000
int seqdev = awe_port[dev] > 0 ? 2 : 1;
#else
int seqdev = 1;
#endif
if ((err = snd_opl3_hwdep_new(opl3, 0, seqdev, &synth)) < 0)
return err;
}
}
if ((err = snd_sbmixer_new(chip)) < 0)
return err;
#ifdef CONFIG_SND_SB16_CSP
/* CSP chip on SB16ASP/AWE32 */
if ((chip->hardware == SB_HW_16) && csp[dev]) {
snd_sb_csp_new(chip, synth != NULL ? 1 : 0, &xcsp);
if (xcsp) {
chip->csp = xcsp->private_data;
chip->hardware = SB_HW_16CSP;
} else {
snd_printk(KERN_INFO PFX "warning - CSP chip not detected on soundcard #%i\n", dev + 1);
}
}
#endif
#ifdef SNDRV_SBAWE_EMU8000
if (awe_port[dev] > 0) {
if ((err = snd_emu8000_new(card, 1, awe_port[dev],
seq_ports[dev], NULL)) < 0) {
snd_printk(KERN_ERR PFX "fatal error - EMU-8000 synthesizer not detected at 0x%lx\n", awe_port[dev]);
return err;
}
}
#endif
/* setup Mic AGC */
spin_lock_irqsave(&chip->mixer_lock, flags);
snd_sbmixer_write(chip, SB_DSP4_MIC_AGC,
(snd_sbmixer_read(chip, SB_DSP4_MIC_AGC) & 0x01) |
(mic_agc[dev] ? 0x00 : 0x01));
spin_unlock_irqrestore(&chip->mixer_lock, flags);
if ((err = snd_card_register(card)) < 0)
return err;
return 0;
}
#ifdef CONFIG_PM
static int snd_sb16_suspend(struct snd_card *card, pm_message_t state)
{
struct snd_card_sb16 *acard = card->private_data;
struct snd_sb *chip = acard->chip;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_pcm_suspend_all(chip->pcm);
snd_sbmixer_suspend(chip);
return 0;
}
static int snd_sb16_resume(struct snd_card *card)
{
struct snd_card_sb16 *acard = card->private_data;
struct snd_sb *chip = acard->chip;
snd_sbdsp_reset(chip);
snd_sbmixer_resume(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
#endif
static int snd_sb16_isa_probe1(int dev, struct device *pdev)
{
struct snd_card_sb16 *acard;
struct snd_card *card;
int err;
err = snd_sb16_card_new(pdev, dev, &card);
if (err < 0)
return err;
acard = card->private_data;
/* non-PnP FM port address is hardwired with base port address */
fm_port[dev] = port[dev];
/* block the 0x388 port to avoid PnP conflicts */
acard->fm_res = request_region(0x388, 4, "SoundBlaster FM");
#ifdef SNDRV_SBAWE_EMU8000
/* non-PnP AWE port address is hardwired with base port address */
awe_port[dev] = port[dev] + 0x400;
#endif
if ((err = snd_sb16_probe(card, dev)) < 0) {
snd_card_free(card);
return err;
}
dev_set_drvdata(pdev, card);
return 0;
}
static int snd_sb16_isa_match(struct device *pdev, unsigned int dev)
{
return enable[dev] && !is_isapnp_selected(dev);
}
static int snd_sb16_isa_probe(struct device *pdev, unsigned int dev)
{
int err;
static int possible_irqs[] = {5, 9, 10, 7, -1};
static int possible_dmas8[] = {1, 3, 0, -1};
static int possible_dmas16[] = {5, 6, 7, -1};
if (irq[dev] == SNDRV_AUTO_IRQ) {
if ((irq[dev] = snd_legacy_find_free_irq(possible_irqs)) < 0) {
snd_printk(KERN_ERR PFX "unable to find a free IRQ\n");
return -EBUSY;
}
}
if (dma8[dev] == SNDRV_AUTO_DMA) {
if ((dma8[dev] = snd_legacy_find_free_dma(possible_dmas8)) < 0) {
snd_printk(KERN_ERR PFX "unable to find a free 8-bit DMA\n");
return -EBUSY;
}
}
if (dma16[dev] == SNDRV_AUTO_DMA) {
if ((dma16[dev] = snd_legacy_find_free_dma(possible_dmas16)) < 0) {
snd_printk(KERN_ERR PFX "unable to find a free 16-bit DMA\n");
return -EBUSY;
}
}
if (port[dev] != SNDRV_AUTO_PORT)
return snd_sb16_isa_probe1(dev, pdev);
else {
static int possible_ports[] = {0x220, 0x240, 0x260, 0x280};
int i;
for (i = 0; i < ARRAY_SIZE(possible_ports); i++) {
port[dev] = possible_ports[i];
err = snd_sb16_isa_probe1(dev, pdev);
if (! err)
return 0;
}
return err;
}
}
static int snd_sb16_isa_remove(struct device *pdev, unsigned int dev)
{
snd_card_free(dev_get_drvdata(pdev));
return 0;
}
#ifdef CONFIG_PM
static int snd_sb16_isa_suspend(struct device *dev, unsigned int n,
pm_message_t state)
{
return snd_sb16_suspend(dev_get_drvdata(dev), state);
}
static int snd_sb16_isa_resume(struct device *dev, unsigned int n)
{
return snd_sb16_resume(dev_get_drvdata(dev));
}
#endif
#ifdef SNDRV_SBAWE
#define DEV_NAME "sbawe"
#else
#define DEV_NAME "sb16"
#endif
static struct isa_driver snd_sb16_isa_driver = {
.match = snd_sb16_isa_match,
.probe = snd_sb16_isa_probe,
.remove = snd_sb16_isa_remove,
#ifdef CONFIG_PM
.suspend = snd_sb16_isa_suspend,
.resume = snd_sb16_isa_resume,
#endif
.driver = {
.name = DEV_NAME
},
};
#ifdef CONFIG_PNP
static int snd_sb16_pnp_detect(struct pnp_card_link *pcard,
const struct pnp_card_device_id *pid)
{
static int dev;
struct snd_card *card;
int res;
for ( ; dev < SNDRV_CARDS; dev++) {
if (!enable[dev] || !isapnp[dev])
continue;
res = snd_sb16_card_new(&pcard->card->dev, dev, &card);
if (res < 0)
return res;
if ((res = snd_card_sb16_pnp(dev, card->private_data, pcard, pid)) < 0 ||
(res = snd_sb16_probe(card, dev)) < 0) {
snd_card_free(card);
return res;
}
pnp_set_card_drvdata(pcard, card);
dev++;
return 0;
}
return -ENODEV;
}
static void snd_sb16_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
}
#ifdef CONFIG_PM
static int snd_sb16_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state)
{
return snd_sb16_suspend(pnp_get_card_drvdata(pcard), state);
}
static int snd_sb16_pnp_resume(struct pnp_card_link *pcard)
{
return snd_sb16_resume(pnp_get_card_drvdata(pcard));
}
#endif
static struct pnp_card_driver sb16_pnpc_driver = {
.flags = PNP_DRIVER_RES_DISABLE,
#ifdef SNDRV_SBAWE
.name = "sbawe",
#else
.name = "sb16",
#endif
.id_table = snd_sb16_pnpids,
.probe = snd_sb16_pnp_detect,
.remove = snd_sb16_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_sb16_pnp_suspend,
.resume = snd_sb16_pnp_resume,
#endif
};
#endif /* CONFIG_PNP */
static int __init alsa_card_sb16_init(void)
{
int err;
err = isa_register_driver(&snd_sb16_isa_driver, SNDRV_CARDS);
#ifdef CONFIG_PNP
if (!err)
isa_registered = 1;
err = pnp_register_card_driver(&sb16_pnpc_driver);
if (!err)
pnp_registered = 1;
if (isa_registered)
err = 0;
#endif
return err;
}
static void __exit alsa_card_sb16_exit(void)
{
#ifdef CONFIG_PNP
if (pnp_registered)
pnp_unregister_card_driver(&sb16_pnpc_driver);
if (isa_registered)
#endif
isa_unregister_driver(&snd_sb16_isa_driver);
}
module_init(alsa_card_sb16_init)
module_exit(alsa_card_sb16_exit)
| gpl-2.0 |
BenzoPlayer/kernel_asus_fugu | drivers/of/irq.c | 2245 | 14193 | /*
* Derived from arch/i386/kernel/irq.c
* Copyright (C) 1992 Linus Torvalds
* Adapted from arch/i386 by Gary Thomas
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Updated and modified by Cort Dougan <cort@fsmlabs.com>
* Copyright (C) 1996-2001 Cort Dougan
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This file contains the code used to make IRQ descriptions in the
* device tree to actual irq numbers on an interrupt controller
* driver.
*/
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/string.h>
#include <linux/slab.h>
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
* @index: Index of the interrupt to map
*
* This function is a wrapper that chains of_irq_map_one() and
* irq_create_of_mapping() to make things easier to callers
*/
unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
{
struct of_irq oirq;
if (of_irq_map_one(dev, index, &oirq))
return 0;
return irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
}
EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
/**
* of_irq_find_parent - Given a device node, find its interrupt parent node
* @child: pointer to device node
*
* Returns a pointer to the interrupt parent node, or NULL if the interrupt
* parent could not be determined.
*/
struct device_node *of_irq_find_parent(struct device_node *child)
{
struct device_node *p;
const __be32 *parp;
if (!of_node_get(child))
return NULL;
do {
parp = of_get_property(child, "interrupt-parent", NULL);
if (parp == NULL)
p = of_get_parent(child);
else {
if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
p = of_node_get(of_irq_dflt_pic);
else
p = of_find_node_by_phandle(be32_to_cpup(parp));
}
of_node_put(child);
child = p;
} while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL);
return p;
}
/**
* of_irq_map_raw - Low level interrupt tree parsing
* @parent: the device interrupt parent
* @intspec: interrupt specifier ("interrupts" property of the device)
* @ointsize: size of the passed in interrupt specifier
* @addr: address specifier (start of "reg" property of the device)
* @out_irq: structure of_irq filled by this function
*
* Returns 0 on success and a negative number on error
*
* This function is a low-level interrupt tree walking function. It
* can be used to do a partial walk with synthetized reg and interrupts
* properties, for example when resolving PCI interrupts when no device
* node exist for the parent.
*/
int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
u32 ointsize, const __be32 *addr, struct of_irq *out_irq)
{
struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
const __be32 *tmp, *imap, *imask;
u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
int imaplen, match, i;
pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
parent->full_name, be32_to_cpup(intspec),
be32_to_cpup(intspec + 1), ointsize);
ipar = of_node_get(parent);
/* First get the #interrupt-cells property of the current cursor
* that tells us how to interpret the passed-in intspec. If there
* is none, we are nice and just walk up the tree
*/
do {
tmp = of_get_property(ipar, "#interrupt-cells", NULL);
if (tmp != NULL) {
intsize = be32_to_cpu(*tmp);
break;
}
tnode = ipar;
ipar = of_irq_find_parent(ipar);
of_node_put(tnode);
} while (ipar);
if (ipar == NULL) {
pr_debug(" -> no parent found !\n");
goto fail;
}
pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
if (ointsize != intsize)
return -EINVAL;
/* Look for this #address-cells. We have to implement the old linux
* trick of looking for the parent here as some device-trees rely on it
*/
old = of_node_get(ipar);
do {
tmp = of_get_property(old, "#address-cells", NULL);
tnode = of_get_parent(old);
of_node_put(old);
old = tnode;
} while (old && tmp == NULL);
of_node_put(old);
old = NULL;
addrsize = (tmp == NULL) ? 2 : be32_to_cpu(*tmp);
pr_debug(" -> addrsize=%d\n", addrsize);
/* Now start the actual "proper" walk of the interrupt tree */
while (ipar != NULL) {
/* Now check if cursor is an interrupt-controller and if it is
* then we are done
*/
if (of_get_property(ipar, "interrupt-controller", NULL) !=
NULL) {
pr_debug(" -> got it !\n");
for (i = 0; i < intsize; i++)
out_irq->specifier[i] =
of_read_number(intspec +i, 1);
out_irq->size = intsize;
out_irq->controller = ipar;
of_node_put(old);
return 0;
}
/* Now look for an interrupt-map */
imap = of_get_property(ipar, "interrupt-map", &imaplen);
/* No interrupt map, check for an interrupt parent */
if (imap == NULL) {
pr_debug(" -> no map, getting parent\n");
newpar = of_irq_find_parent(ipar);
goto skiplevel;
}
imaplen /= sizeof(u32);
/* Look for a mask */
imask = of_get_property(ipar, "interrupt-map-mask", NULL);
/* If we were passed no "reg" property and we attempt to parse
* an interrupt-map, then #address-cells must be 0.
* Fail if it's not.
*/
if (addr == NULL && addrsize != 0) {
pr_debug(" -> no reg passed in when needed !\n");
goto fail;
}
/* Parse interrupt-map */
match = 0;
while (imaplen > (addrsize + intsize + 1) && !match) {
/* Compare specifiers */
match = 1;
for (i = 0; i < addrsize && match; ++i) {
__be32 mask = imask ? imask[i]
: cpu_to_be32(0xffffffffu);
match = ((addr[i] ^ imap[i]) & mask) == 0;
}
for (; i < (addrsize + intsize) && match; ++i) {
__be32 mask = imask ? imask[i]
: cpu_to_be32(0xffffffffu);
match =
((intspec[i-addrsize] ^ imap[i]) & mask) == 0;
}
imap += addrsize + intsize;
imaplen -= addrsize + intsize;
pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
/* Get the interrupt parent */
if (of_irq_workarounds & OF_IMAP_NO_PHANDLE)
newpar = of_node_get(of_irq_dflt_pic);
else
newpar = of_find_node_by_phandle(be32_to_cpup(imap));
imap++;
--imaplen;
/* Check if not found */
if (newpar == NULL) {
pr_debug(" -> imap parent not found !\n");
goto fail;
}
/* Get #interrupt-cells and #address-cells of new
* parent
*/
tmp = of_get_property(newpar, "#interrupt-cells", NULL);
if (tmp == NULL) {
pr_debug(" -> parent lacks #interrupt-cells!\n");
goto fail;
}
newintsize = be32_to_cpu(*tmp);
tmp = of_get_property(newpar, "#address-cells", NULL);
newaddrsize = (tmp == NULL) ? 0 : be32_to_cpu(*tmp);
pr_debug(" -> newintsize=%d, newaddrsize=%d\n",
newintsize, newaddrsize);
/* Check for malformed properties */
if (imaplen < (newaddrsize + newintsize))
goto fail;
imap += newaddrsize + newintsize;
imaplen -= newaddrsize + newintsize;
pr_debug(" -> imaplen=%d\n", imaplen);
}
if (!match)
goto fail;
of_node_put(old);
old = of_node_get(newpar);
addrsize = newaddrsize;
intsize = newintsize;
intspec = imap - intsize;
addr = intspec - addrsize;
skiplevel:
/* Iterate again with new parent */
pr_debug(" -> new parent: %s\n", of_node_full_name(newpar));
of_node_put(ipar);
ipar = newpar;
newpar = NULL;
}
fail:
of_node_put(ipar);
of_node_put(old);
of_node_put(newpar);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(of_irq_map_raw);
/**
* of_irq_map_one - Resolve an interrupt for a device
* @device: the device whose interrupt is to be resolved
* @index: index of the interrupt to resolve
* @out_irq: structure of_irq filled by this function
*
* This function resolves an interrupt, walking the tree, for a given
* device-tree node. It's the high level pendant to of_irq_map_raw().
*/
int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq)
{
struct device_node *p;
const __be32 *intspec, *tmp, *addr;
u32 intsize, intlen;
int res = -EINVAL;
pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index);
/* OldWorld mac stuff is "special", handle out of line */
if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
return of_irq_map_oldworld(device, index, out_irq);
/* Get the interrupts property */
intspec = of_get_property(device, "interrupts", &intlen);
if (intspec == NULL)
return -EINVAL;
intlen /= sizeof(*intspec);
pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
/* Get the reg property (if any) */
addr = of_get_property(device, "reg", NULL);
/* Look for the interrupt parent. */
p = of_irq_find_parent(device);
if (p == NULL)
return -EINVAL;
/* Get size of interrupt specifier */
tmp = of_get_property(p, "#interrupt-cells", NULL);
if (tmp == NULL)
goto out;
intsize = be32_to_cpu(*tmp);
pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);
/* Check index */
if ((index + 1) * intsize > intlen)
goto out;
/* Get new specifier and map it */
res = of_irq_map_raw(p, intspec + index * intsize, intsize,
addr, out_irq);
out:
of_node_put(p);
return res;
}
EXPORT_SYMBOL_GPL(of_irq_map_one);
/**
* of_irq_to_resource - Decode a node's IRQ and return it as a resource
* @dev: pointer to device tree node
* @index: zero-based index of the irq
* @r: pointer to resource structure to return result into.
*/
int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
{
int irq = irq_of_parse_and_map(dev, index);
/* Only dereference the resource if both the
* resource and the irq are valid. */
if (r && irq) {
const char *name = NULL;
/*
* Get optional "interrupts-names" property to add a name
* to the resource.
*/
of_property_read_string_index(dev, "interrupt-names", index,
&name);
r->start = r->end = irq;
r->flags = IORESOURCE_IRQ;
r->name = name ? name : dev->full_name;
}
return irq;
}
EXPORT_SYMBOL_GPL(of_irq_to_resource);
/**
* of_irq_count - Count the number of IRQs a node uses
* @dev: pointer to device tree node
*/
int of_irq_count(struct device_node *dev)
{
int nr = 0;
while (of_irq_to_resource(dev, nr, NULL))
nr++;
return nr;
}
/**
* of_irq_to_resource_table - Fill in resource table with node's IRQ info
* @dev: pointer to device tree node
* @res: array of resources to fill in
* @nr_irqs: the number of IRQs (and upper bound for num of @res elements)
*
* Returns the size of the filled in table (up to @nr_irqs).
*/
int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
int nr_irqs)
{
int i;
for (i = 0; i < nr_irqs; i++, res++)
if (!of_irq_to_resource(dev, i, res))
break;
return i;
}
EXPORT_SYMBOL_GPL(of_irq_to_resource_table);
struct intc_desc {
struct list_head list;
struct device_node *dev;
struct device_node *interrupt_parent;
};
/**
* of_irq_init - Scan and init matching interrupt controllers in DT
* @matches: 0 terminated array of nodes to match and init function to call
*
* This function scans the device tree for matching interrupt controller nodes,
* and calls their initialization functions in order with parents first.
*/
void __init of_irq_init(const struct of_device_id *matches)
{
struct device_node *np, *parent = NULL;
struct intc_desc *desc, *temp_desc;
struct list_head intc_desc_list, intc_parent_list;
INIT_LIST_HEAD(&intc_desc_list);
INIT_LIST_HEAD(&intc_parent_list);
for_each_matching_node(np, matches) {
if (!of_find_property(np, "interrupt-controller", NULL))
continue;
/*
* Here, we allocate and populate an intc_desc with the node
* pointer, interrupt-parent device_node etc.
*/
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (WARN_ON(!desc))
goto err;
desc->dev = np;
desc->interrupt_parent = of_irq_find_parent(np);
if (desc->interrupt_parent == np)
desc->interrupt_parent = NULL;
list_add_tail(&desc->list, &intc_desc_list);
}
/*
* The root irq controller is the one without an interrupt-parent.
* That one goes first, followed by the controllers that reference it,
* followed by the ones that reference the 2nd level controllers, etc.
*/
while (!list_empty(&intc_desc_list)) {
/*
* Process all controllers with the current 'parent'.
* First pass will be looking for NULL as the parent.
* The assumption is that NULL parent means a root controller.
*/
list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) {
const struct of_device_id *match;
int ret;
of_irq_init_cb_t irq_init_cb;
if (desc->interrupt_parent != parent)
continue;
list_del(&desc->list);
match = of_match_node(matches, desc->dev);
if (WARN(!match->data,
"of_irq_init: no init function for %s\n",
match->compatible)) {
kfree(desc);
continue;
}
pr_debug("of_irq_init: init %s @ %p, parent %p\n",
match->compatible,
desc->dev, desc->interrupt_parent);
irq_init_cb = (of_irq_init_cb_t)match->data;
ret = irq_init_cb(desc->dev, desc->interrupt_parent);
if (ret) {
kfree(desc);
continue;
}
/*
* This one is now set up; add it to the parent list so
* its children can get processed in a subsequent pass.
*/
list_add_tail(&desc->list, &intc_parent_list);
}
/* Get the next pending parent that might have children */
desc = list_first_entry(&intc_parent_list, typeof(*desc), list);
if (list_empty(&intc_parent_list) || !desc) {
pr_err("of_irq_init: children remain, but no parents\n");
break;
}
list_del(&desc->list);
parent = desc->dev;
kfree(desc);
}
list_for_each_entry_safe(desc, temp_desc, &intc_parent_list, list) {
list_del(&desc->list);
kfree(desc);
}
err:
list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) {
list_del(&desc->list);
kfree(desc);
}
}
| gpl-2.0 |
tecno789/kernel_lge_msm8226_g2m | drivers/media/video/cx25821/cx25821-core.c | 3269 | 39859 | /*
* Driver for the Conexant CX25821 PCIe bridge
*
* Copyright (C) 2009 Conexant Systems Inc.
* Authors <shu.lin@conexant.com>, <hiep.huynh@conexant.com>
* Based on Steven Toth <stoth@linuxtv.org> cx23885 driver
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/i2c.h>
#include <linux/slab.h>
#include "cx25821.h"
#include "cx25821-sram.h"
#include "cx25821-video.h"
MODULE_DESCRIPTION("Driver for Athena cards");
MODULE_AUTHOR("Shu Lin - Hiep Huynh");
MODULE_LICENSE("GPL");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
static unsigned int card[] = {[0 ... (CX25821_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
static unsigned int cx25821_devcount;
DEFINE_MUTEX(cx25821_devlist_mutex);
EXPORT_SYMBOL(cx25821_devlist_mutex);
LIST_HEAD(cx25821_devlist);
EXPORT_SYMBOL(cx25821_devlist);
struct sram_channel cx25821_sram_channels[] = {
[SRAM_CH00] = {
.i = SRAM_CH00,
.name = "VID A",
.cmds_start = VID_A_DOWN_CMDS,
.ctrl_start = VID_A_IQ,
.cdt = VID_A_CDT,
.fifo_start = VID_A_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA1_PTR1,
.ptr2_reg = DMA1_PTR2,
.cnt1_reg = DMA1_CNT1,
.cnt2_reg = DMA1_CNT2,
.int_msk = VID_A_INT_MSK,
.int_stat = VID_A_INT_STAT,
.int_mstat = VID_A_INT_MSTAT,
.dma_ctl = VID_DST_A_DMA_CTL,
.gpcnt_ctl = VID_DST_A_GPCNT_CTL,
.gpcnt = VID_DST_A_GPCNT,
.vip_ctl = VID_DST_A_VIP_CTL,
.pix_frmt = VID_DST_A_PIX_FRMT,
},
[SRAM_CH01] = {
.i = SRAM_CH01,
.name = "VID B",
.cmds_start = VID_B_DOWN_CMDS,
.ctrl_start = VID_B_IQ,
.cdt = VID_B_CDT,
.fifo_start = VID_B_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA2_PTR1,
.ptr2_reg = DMA2_PTR2,
.cnt1_reg = DMA2_CNT1,
.cnt2_reg = DMA2_CNT2,
.int_msk = VID_B_INT_MSK,
.int_stat = VID_B_INT_STAT,
.int_mstat = VID_B_INT_MSTAT,
.dma_ctl = VID_DST_B_DMA_CTL,
.gpcnt_ctl = VID_DST_B_GPCNT_CTL,
.gpcnt = VID_DST_B_GPCNT,
.vip_ctl = VID_DST_B_VIP_CTL,
.pix_frmt = VID_DST_B_PIX_FRMT,
},
[SRAM_CH02] = {
.i = SRAM_CH02,
.name = "VID C",
.cmds_start = VID_C_DOWN_CMDS,
.ctrl_start = VID_C_IQ,
.cdt = VID_C_CDT,
.fifo_start = VID_C_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA3_PTR1,
.ptr2_reg = DMA3_PTR2,
.cnt1_reg = DMA3_CNT1,
.cnt2_reg = DMA3_CNT2,
.int_msk = VID_C_INT_MSK,
.int_stat = VID_C_INT_STAT,
.int_mstat = VID_C_INT_MSTAT,
.dma_ctl = VID_DST_C_DMA_CTL,
.gpcnt_ctl = VID_DST_C_GPCNT_CTL,
.gpcnt = VID_DST_C_GPCNT,
.vip_ctl = VID_DST_C_VIP_CTL,
.pix_frmt = VID_DST_C_PIX_FRMT,
},
[SRAM_CH03] = {
.i = SRAM_CH03,
.name = "VID D",
.cmds_start = VID_D_DOWN_CMDS,
.ctrl_start = VID_D_IQ,
.cdt = VID_D_CDT,
.fifo_start = VID_D_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA4_PTR1,
.ptr2_reg = DMA4_PTR2,
.cnt1_reg = DMA4_CNT1,
.cnt2_reg = DMA4_CNT2,
.int_msk = VID_D_INT_MSK,
.int_stat = VID_D_INT_STAT,
.int_mstat = VID_D_INT_MSTAT,
.dma_ctl = VID_DST_D_DMA_CTL,
.gpcnt_ctl = VID_DST_D_GPCNT_CTL,
.gpcnt = VID_DST_D_GPCNT,
.vip_ctl = VID_DST_D_VIP_CTL,
.pix_frmt = VID_DST_D_PIX_FRMT,
},
[SRAM_CH04] = {
.i = SRAM_CH04,
.name = "VID E",
.cmds_start = VID_E_DOWN_CMDS,
.ctrl_start = VID_E_IQ,
.cdt = VID_E_CDT,
.fifo_start = VID_E_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA5_PTR1,
.ptr2_reg = DMA5_PTR2,
.cnt1_reg = DMA5_CNT1,
.cnt2_reg = DMA5_CNT2,
.int_msk = VID_E_INT_MSK,
.int_stat = VID_E_INT_STAT,
.int_mstat = VID_E_INT_MSTAT,
.dma_ctl = VID_DST_E_DMA_CTL,
.gpcnt_ctl = VID_DST_E_GPCNT_CTL,
.gpcnt = VID_DST_E_GPCNT,
.vip_ctl = VID_DST_E_VIP_CTL,
.pix_frmt = VID_DST_E_PIX_FRMT,
},
[SRAM_CH05] = {
.i = SRAM_CH05,
.name = "VID F",
.cmds_start = VID_F_DOWN_CMDS,
.ctrl_start = VID_F_IQ,
.cdt = VID_F_CDT,
.fifo_start = VID_F_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA6_PTR1,
.ptr2_reg = DMA6_PTR2,
.cnt1_reg = DMA6_CNT1,
.cnt2_reg = DMA6_CNT2,
.int_msk = VID_F_INT_MSK,
.int_stat = VID_F_INT_STAT,
.int_mstat = VID_F_INT_MSTAT,
.dma_ctl = VID_DST_F_DMA_CTL,
.gpcnt_ctl = VID_DST_F_GPCNT_CTL,
.gpcnt = VID_DST_F_GPCNT,
.vip_ctl = VID_DST_F_VIP_CTL,
.pix_frmt = VID_DST_F_PIX_FRMT,
},
[SRAM_CH06] = {
.i = SRAM_CH06,
.name = "VID G",
.cmds_start = VID_G_DOWN_CMDS,
.ctrl_start = VID_G_IQ,
.cdt = VID_G_CDT,
.fifo_start = VID_G_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA7_PTR1,
.ptr2_reg = DMA7_PTR2,
.cnt1_reg = DMA7_CNT1,
.cnt2_reg = DMA7_CNT2,
.int_msk = VID_G_INT_MSK,
.int_stat = VID_G_INT_STAT,
.int_mstat = VID_G_INT_MSTAT,
.dma_ctl = VID_DST_G_DMA_CTL,
.gpcnt_ctl = VID_DST_G_GPCNT_CTL,
.gpcnt = VID_DST_G_GPCNT,
.vip_ctl = VID_DST_G_VIP_CTL,
.pix_frmt = VID_DST_G_PIX_FRMT,
},
[SRAM_CH07] = {
.i = SRAM_CH07,
.name = "VID H",
.cmds_start = VID_H_DOWN_CMDS,
.ctrl_start = VID_H_IQ,
.cdt = VID_H_CDT,
.fifo_start = VID_H_DOWN_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA8_PTR1,
.ptr2_reg = DMA8_PTR2,
.cnt1_reg = DMA8_CNT1,
.cnt2_reg = DMA8_CNT2,
.int_msk = VID_H_INT_MSK,
.int_stat = VID_H_INT_STAT,
.int_mstat = VID_H_INT_MSTAT,
.dma_ctl = VID_DST_H_DMA_CTL,
.gpcnt_ctl = VID_DST_H_GPCNT_CTL,
.gpcnt = VID_DST_H_GPCNT,
.vip_ctl = VID_DST_H_VIP_CTL,
.pix_frmt = VID_DST_H_PIX_FRMT,
},
[SRAM_CH08] = {
.name = "audio from",
.cmds_start = AUD_A_DOWN_CMDS,
.ctrl_start = AUD_A_IQ,
.cdt = AUD_A_CDT,
.fifo_start = AUD_A_DOWN_CLUSTER_1,
.fifo_size = AUDIO_CLUSTER_SIZE * 3,
.ptr1_reg = DMA17_PTR1,
.ptr2_reg = DMA17_PTR2,
.cnt1_reg = DMA17_CNT1,
.cnt2_reg = DMA17_CNT2,
},
[SRAM_CH09] = {
.i = SRAM_CH09,
.name = "VID Upstream I",
.cmds_start = VID_I_UP_CMDS,
.ctrl_start = VID_I_IQ,
.cdt = VID_I_CDT,
.fifo_start = VID_I_UP_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA15_PTR1,
.ptr2_reg = DMA15_PTR2,
.cnt1_reg = DMA15_CNT1,
.cnt2_reg = DMA15_CNT2,
.int_msk = VID_I_INT_MSK,
.int_stat = VID_I_INT_STAT,
.int_mstat = VID_I_INT_MSTAT,
.dma_ctl = VID_SRC_I_DMA_CTL,
.gpcnt_ctl = VID_SRC_I_GPCNT_CTL,
.gpcnt = VID_SRC_I_GPCNT,
.vid_fmt_ctl = VID_SRC_I_FMT_CTL,
.vid_active_ctl1 = VID_SRC_I_ACTIVE_CTL1,
.vid_active_ctl2 = VID_SRC_I_ACTIVE_CTL2,
.vid_cdt_size = VID_SRC_I_CDT_SZ,
.irq_bit = 8,
},
[SRAM_CH10] = {
.i = SRAM_CH10,
.name = "VID Upstream J",
.cmds_start = VID_J_UP_CMDS,
.ctrl_start = VID_J_IQ,
.cdt = VID_J_CDT,
.fifo_start = VID_J_UP_CLUSTER_1,
.fifo_size = (VID_CLUSTER_SIZE << 2),
.ptr1_reg = DMA16_PTR1,
.ptr2_reg = DMA16_PTR2,
.cnt1_reg = DMA16_CNT1,
.cnt2_reg = DMA16_CNT2,
.int_msk = VID_J_INT_MSK,
.int_stat = VID_J_INT_STAT,
.int_mstat = VID_J_INT_MSTAT,
.dma_ctl = VID_SRC_J_DMA_CTL,
.gpcnt_ctl = VID_SRC_J_GPCNT_CTL,
.gpcnt = VID_SRC_J_GPCNT,
.vid_fmt_ctl = VID_SRC_J_FMT_CTL,
.vid_active_ctl1 = VID_SRC_J_ACTIVE_CTL1,
.vid_active_ctl2 = VID_SRC_J_ACTIVE_CTL2,
.vid_cdt_size = VID_SRC_J_CDT_SZ,
.irq_bit = 9,
},
[SRAM_CH11] = {
.i = SRAM_CH11,
.name = "Audio Upstream Channel B",
.cmds_start = AUD_B_UP_CMDS,
.ctrl_start = AUD_B_IQ,
.cdt = AUD_B_CDT,
.fifo_start = AUD_B_UP_CLUSTER_1,
.fifo_size = (AUDIO_CLUSTER_SIZE * 3),
.ptr1_reg = DMA22_PTR1,
.ptr2_reg = DMA22_PTR2,
.cnt1_reg = DMA22_CNT1,
.cnt2_reg = DMA22_CNT2,
.int_msk = AUD_B_INT_MSK,
.int_stat = AUD_B_INT_STAT,
.int_mstat = AUD_B_INT_MSTAT,
.dma_ctl = AUD_INT_DMA_CTL,
.gpcnt_ctl = AUD_B_GPCNT_CTL,
.gpcnt = AUD_B_GPCNT,
.aud_length = AUD_B_LNGTH,
.aud_cfg = AUD_B_CFG,
.fld_aud_fifo_en = FLD_AUD_SRC_B_FIFO_EN,
.fld_aud_risc_en = FLD_AUD_SRC_B_RISC_EN,
.irq_bit = 11,
},
};
EXPORT_SYMBOL(cx25821_sram_channels);
struct sram_channel *channel0 = &cx25821_sram_channels[SRAM_CH00];
struct sram_channel *channel1 = &cx25821_sram_channels[SRAM_CH01];
struct sram_channel *channel2 = &cx25821_sram_channels[SRAM_CH02];
struct sram_channel *channel3 = &cx25821_sram_channels[SRAM_CH03];
struct sram_channel *channel4 = &cx25821_sram_channels[SRAM_CH04];
struct sram_channel *channel5 = &cx25821_sram_channels[SRAM_CH05];
struct sram_channel *channel6 = &cx25821_sram_channels[SRAM_CH06];
struct sram_channel *channel7 = &cx25821_sram_channels[SRAM_CH07];
struct sram_channel *channel9 = &cx25821_sram_channels[SRAM_CH09];
struct sram_channel *channel10 = &cx25821_sram_channels[SRAM_CH10];
struct sram_channel *channel11 = &cx25821_sram_channels[SRAM_CH11];
struct cx25821_dmaqueue mpegq;
static int cx25821_risc_decode(u32 risc)
{
static const char * const instr[16] = {
[RISC_SYNC >> 28] = "sync",
[RISC_WRITE >> 28] = "write",
[RISC_WRITEC >> 28] = "writec",
[RISC_READ >> 28] = "read",
[RISC_READC >> 28] = "readc",
[RISC_JUMP >> 28] = "jump",
[RISC_SKIP >> 28] = "skip",
[RISC_WRITERM >> 28] = "writerm",
[RISC_WRITECM >> 28] = "writecm",
[RISC_WRITECR >> 28] = "writecr",
};
static const int incr[16] = {
[RISC_WRITE >> 28] = 3,
[RISC_JUMP >> 28] = 3,
[RISC_SKIP >> 28] = 1,
[RISC_SYNC >> 28] = 1,
[RISC_WRITERM >> 28] = 3,
[RISC_WRITECM >> 28] = 3,
[RISC_WRITECR >> 28] = 4,
};
static const char * const bits[] = {
"12", "13", "14", "resync",
"cnt0", "cnt1", "18", "19",
"20", "21", "22", "23",
"irq1", "irq2", "eol", "sol",
};
int i;
pr_cont("0x%08x [ %s",
risc, instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--) {
if (risc & (1 << (i + 12)))
pr_cont(" %s", bits[i]);
}
pr_cont(" count=%d ]\n", risc & 0xfff);
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
static inline int i2c_slave_did_ack(struct i2c_adapter *i2c_adap)
{
struct cx25821_i2c *bus = i2c_adap->algo_data;
struct cx25821_dev *dev = bus->dev;
return cx_read(bus->reg_stat) & 0x01;
}
void cx_i2c_read_print(struct cx25821_dev *dev, u32 reg, const char *reg_string)
{
int tmp = 0;
u32 value = 0;
value = cx25821_i2c_read(&dev->i2c_bus[0], reg, &tmp);
}
static void cx25821_registers_init(struct cx25821_dev *dev)
{
u32 tmp;
/* enable RUN_RISC in Pecos */
cx_write(DEV_CNTRL2, 0x20);
/* Set the master PCI interrupt masks to enable video, audio, MBIF,
* and GPIO interrupts
* I2C interrupt masking is handled by the I2C objects themselves. */
cx_write(PCI_INT_MSK, 0x2001FFFF);
tmp = cx_read(RDR_TLCTL0);
tmp &= ~FLD_CFG_RCB_CK_EN; /* Clear the RCB_CK_EN bit */
cx_write(RDR_TLCTL0, tmp);
/* PLL-A setting for the Audio Master Clock */
cx_write(PLL_A_INT_FRAC, 0x9807A58B);
/* PLL_A_POST = 0x1C, PLL_A_OUT_TO_PIN = 0x1 */
cx_write(PLL_A_POST_STAT_BIST, 0x8000019C);
/* clear reset bit [31] */
tmp = cx_read(PLL_A_INT_FRAC);
cx_write(PLL_A_INT_FRAC, tmp & 0x7FFFFFFF);
/* PLL-B setting for Mobilygen Host Bus Interface */
cx_write(PLL_B_INT_FRAC, 0x9883A86F);
/* PLL_B_POST = 0xD, PLL_B_OUT_TO_PIN = 0x0 */
cx_write(PLL_B_POST_STAT_BIST, 0x8000018D);
/* clear reset bit [31] */
tmp = cx_read(PLL_B_INT_FRAC);
cx_write(PLL_B_INT_FRAC, tmp & 0x7FFFFFFF);
/* PLL-C setting for video upstream channel */
cx_write(PLL_C_INT_FRAC, 0x96A0EA3F);
/* PLL_C_POST = 0x3, PLL_C_OUT_TO_PIN = 0x0 */
cx_write(PLL_C_POST_STAT_BIST, 0x80000103);
/* clear reset bit [31] */
tmp = cx_read(PLL_C_INT_FRAC);
cx_write(PLL_C_INT_FRAC, tmp & 0x7FFFFFFF);
/* PLL-D setting for audio upstream channel */
cx_write(PLL_D_INT_FRAC, 0x98757F5B);
/* PLL_D_POST = 0x13, PLL_D_OUT_TO_PIN = 0x0 */
cx_write(PLL_D_POST_STAT_BIST, 0x80000113);
/* clear reset bit [31] */
tmp = cx_read(PLL_D_INT_FRAC);
cx_write(PLL_D_INT_FRAC, tmp & 0x7FFFFFFF);
/* This selects the PLL C clock source for the video upstream channel
* I and J */
tmp = cx_read(VID_CH_CLK_SEL);
cx_write(VID_CH_CLK_SEL, (tmp & 0x00FFFFFF) | 0x24000000);
/* 656/VIP SRC Upstream Channel I & J and 7 - Host Bus Interface for
* channel A-C
* select 656/VIP DST for downstream Channel A - C */
tmp = cx_read(VID_CH_MODE_SEL);
/* cx_write( VID_CH_MODE_SEL, tmp | 0x1B0001FF); */
cx_write(VID_CH_MODE_SEL, tmp & 0xFFFFFE00);
/* enables 656 port I and J as output */
tmp = cx_read(CLK_RST);
/* use external ALT_PLL_REF pin as its reference clock instead */
tmp |= FLD_USE_ALT_PLL_REF;
cx_write(CLK_RST, tmp & ~(FLD_VID_I_CLK_NOE | FLD_VID_J_CLK_NOE));
mdelay(100);
}
int cx25821_sram_channel_setup(struct cx25821_dev *dev,
struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
unsigned int i, lines;
u32 cdt;
if (ch->cmds_start == 0) {
cx_write(ch->ptr1_reg, 0);
cx_write(ch->ptr2_reg, 0);
cx_write(ch->cnt2_reg, 0);
cx_write(ch->cnt1_reg, 0);
return 0;
}
bpl = (bpl + 7) & ~7; /* alignment */
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
if (lines > 4)
lines = 4;
BUG_ON(lines < 2);
cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
cx_write(8 + 4, 8);
cx_write(8 + 8, 0);
/* write CDT */
for (i = 0; i < lines; i++) {
cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
cx_write(cdt + 16 * i + 4, 0);
cx_write(cdt + 16 * i + 8, 0);
cx_write(cdt + 16 * i + 12, 0);
}
/* init the first cdt buffer */
for (i = 0; i < 128; i++)
cx_write(ch->fifo_start + 4 * i, i);
/* write CMDS */
if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
else
cx_write(ch->cmds_start + 0, risc);
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
if (ch->jumponly)
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
else
cx_write(ch->cmds_start + 20, 64 >> 2);
for (i = 24; i < 80; i += 4)
cx_write(ch->cmds_start + i, 0);
/* fill registers */
cx_write(ch->ptr1_reg, ch->fifo_start);
cx_write(ch->ptr2_reg, cdt);
cx_write(ch->cnt2_reg, (lines * 16) >> 3);
cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
return 0;
}
EXPORT_SYMBOL(cx25821_sram_channel_setup);
int cx25821_sram_channel_setup_audio(struct cx25821_dev *dev,
struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
unsigned int i, lines;
u32 cdt;
if (ch->cmds_start == 0) {
cx_write(ch->ptr1_reg, 0);
cx_write(ch->ptr2_reg, 0);
cx_write(ch->cnt2_reg, 0);
cx_write(ch->cnt1_reg, 0);
return 0;
}
bpl = (bpl + 7) & ~7; /* alignment */
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
if (lines > 3)
lines = 3; /* for AUDIO */
BUG_ON(lines < 2);
cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
cx_write(8 + 4, 8);
cx_write(8 + 8, 0);
/* write CDT */
for (i = 0; i < lines; i++) {
cx_write(cdt + 16 * i, ch->fifo_start + bpl * i);
cx_write(cdt + 16 * i + 4, 0);
cx_write(cdt + 16 * i + 8, 0);
cx_write(cdt + 16 * i + 12, 0);
}
/* write CMDS */
if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
else
cx_write(ch->cmds_start + 0, risc);
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
cx_write(ch->cmds_start + 12, (lines * 16) >> 3);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
/* IQ size */
if (ch->jumponly)
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
else
cx_write(ch->cmds_start + 20, 64 >> 2);
/* zero out */
for (i = 24; i < 80; i += 4)
cx_write(ch->cmds_start + i, 0);
/* fill registers */
cx_write(ch->ptr1_reg, ch->fifo_start);
cx_write(ch->ptr2_reg, cdt);
cx_write(ch->cnt2_reg, (lines * 16) >> 3);
cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
return 0;
}
EXPORT_SYMBOL(cx25821_sram_channel_setup_audio);
void cx25821_sram_channel_dump(struct cx25821_dev *dev, struct sram_channel *ch)
{
static char *name[] = {
"init risc lo",
"init risc hi",
"cdt base",
"cdt size",
"iq base",
"iq size",
"risc pc lo",
"risc pc hi",
"iq wr ptr",
"iq rd ptr",
"cdt current",
"pci target lo",
"pci target hi",
"line / byte",
};
u32 risc;
unsigned int i, j, n;
pr_warn("%s: %s - dma channel status dump\n", dev->name, ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
pr_warn("cmds + 0x%2x: %-15s: 0x%08x\n",
i * 4, name[i], cx_read(ch->cmds_start + 4 * i));
j = i * 4;
for (i = 0; i < 4;) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
pr_warn("cmds + 0x%2x: risc%d: ", j + i * 4, i);
i += cx25821_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
pr_warn("ctrl + 0x%2x (0x%08x): iq %x: ",
i * 4, ch->ctrl_start + 4 * i, i);
n = cx25821_risc_decode(risc);
for (j = 1; j < n; j++) {
risc = cx_read(ch->ctrl_start + 4 * (i + j));
pr_warn("ctrl + 0x%2x : iq %x: 0x%08x [ arg #%d ]\n",
4 * (i + j), i + j, risc, j);
}
}
pr_warn(" : fifo: 0x%08x -> 0x%x\n",
ch->fifo_start, ch->fifo_start + ch->fifo_size);
pr_warn(" : ctrl: 0x%08x -> 0x%x\n",
ch->ctrl_start, ch->ctrl_start + 6 * 16);
pr_warn(" : ptr1_reg: 0x%08x\n",
cx_read(ch->ptr1_reg));
pr_warn(" : ptr2_reg: 0x%08x\n",
cx_read(ch->ptr2_reg));
pr_warn(" : cnt1_reg: 0x%08x\n",
cx_read(ch->cnt1_reg));
pr_warn(" : cnt2_reg: 0x%08x\n",
cx_read(ch->cnt2_reg));
}
EXPORT_SYMBOL(cx25821_sram_channel_dump);
void cx25821_sram_channel_dump_audio(struct cx25821_dev *dev,
struct sram_channel *ch)
{
static const char * const name[] = {
"init risc lo",
"init risc hi",
"cdt base",
"cdt size",
"iq base",
"iq size",
"risc pc lo",
"risc pc hi",
"iq wr ptr",
"iq rd ptr",
"cdt current",
"pci target lo",
"pci target hi",
"line / byte",
};
u32 risc, value, tmp;
unsigned int i, j, n;
pr_info("\n%s: %s - dma Audio channel status dump\n",
dev->name, ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
pr_info("%s: cmds + 0x%2x: %-15s: 0x%08x\n",
dev->name, i * 4, name[i],
cx_read(ch->cmds_start + 4 * i));
j = i * 4;
for (i = 0; i < 4;) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
pr_warn("cmds + 0x%2x: risc%d: ", j + i * 4, i);
i += cx25821_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
pr_warn("ctrl + 0x%2x (0x%08x): iq %x: ",
i * 4, ch->ctrl_start + 4 * i, i);
n = cx25821_risc_decode(risc);
for (j = 1; j < n; j++) {
risc = cx_read(ch->ctrl_start + 4 * (i + j));
pr_warn("ctrl + 0x%2x : iq %x: 0x%08x [ arg #%d ]\n",
4 * (i + j), i + j, risc, j);
}
}
pr_warn(" : fifo: 0x%08x -> 0x%x\n",
ch->fifo_start, ch->fifo_start + ch->fifo_size);
pr_warn(" : ctrl: 0x%08x -> 0x%x\n",
ch->ctrl_start, ch->ctrl_start + 6 * 16);
pr_warn(" : ptr1_reg: 0x%08x\n",
cx_read(ch->ptr1_reg));
pr_warn(" : ptr2_reg: 0x%08x\n",
cx_read(ch->ptr2_reg));
pr_warn(" : cnt1_reg: 0x%08x\n",
cx_read(ch->cnt1_reg));
pr_warn(" : cnt2_reg: 0x%08x\n",
cx_read(ch->cnt2_reg));
for (i = 0; i < 4; i++) {
risc = cx_read(ch->cmds_start + 56 + (i * 4));
pr_warn("instruction %d = 0x%x\n", i, risc);
}
/* read data from the first cdt buffer */
risc = cx_read(AUD_A_CDT);
pr_warn("\nread cdt loc=0x%x\n", risc);
for (i = 0; i < 8; i++) {
n = cx_read(risc + i * 4);
pr_cont("0x%x ", n);
}
pr_cont("\n\n");
value = cx_read(CLK_RST);
CX25821_INFO(" CLK_RST = 0x%x\n\n", value);
value = cx_read(PLL_A_POST_STAT_BIST);
CX25821_INFO(" PLL_A_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_A_INT_FRAC);
CX25821_INFO(" PLL_A_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_B_POST_STAT_BIST);
CX25821_INFO(" PLL_B_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_B_INT_FRAC);
CX25821_INFO(" PLL_B_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_C_POST_STAT_BIST);
CX25821_INFO(" PLL_C_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_C_INT_FRAC);
CX25821_INFO(" PLL_C_INT_FRAC = 0x%x\n\n", value);
value = cx_read(PLL_D_POST_STAT_BIST);
CX25821_INFO(" PLL_D_POST_STAT_BIST = 0x%x\n\n", value);
value = cx_read(PLL_D_INT_FRAC);
CX25821_INFO(" PLL_D_INT_FRAC = 0x%x\n\n", value);
value = cx25821_i2c_read(&dev->i2c_bus[0], AFE_AB_DIAG_CTRL, &tmp);
CX25821_INFO(" AFE_AB_DIAG_CTRL (0x10900090) = 0x%x\n\n", value);
}
EXPORT_SYMBOL(cx25821_sram_channel_dump_audio);
static void cx25821_shutdown(struct cx25821_dev *dev)
{
int i;
/* disable RISC controller */
cx_write(DEV_CNTRL2, 0);
/* Disable Video A/B activity */
for (i = 0; i < VID_CHANNEL_NUM; i++) {
cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
cx_write(dev->channels[i].sram_channels->int_msk, 0);
}
for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
i <= VID_UPSTREAM_SRAM_CHANNEL_J; i++) {
cx_write(dev->channels[i].sram_channels->dma_ctl, 0);
cx_write(dev->channels[i].sram_channels->int_msk, 0);
}
/* Disable Audio activity */
cx_write(AUD_INT_DMA_CTL, 0);
/* Disable Serial port */
cx_write(UART_CTL, 0);
/* Disable Interrupts */
cx_write(PCI_INT_MSK, 0);
cx_write(AUD_A_INT_MSK, 0);
}
void cx25821_set_pixel_format(struct cx25821_dev *dev, int channel_select,
u32 format)
{
if (channel_select <= 7 && channel_select >= 0) {
cx_write(dev->channels[channel_select].sram_channels->pix_frmt,
format);
dev->channels[channel_select].pixel_formats = format;
}
}
static void cx25821_set_vip_mode(struct cx25821_dev *dev,
struct sram_channel *ch)
{
cx_write(ch->pix_frmt, PIXEL_FRMT_422);
cx_write(ch->vip_ctl, PIXEL_ENGINE_VIP1);
}
static void cx25821_initialize(struct cx25821_dev *dev)
{
int i;
dprintk(1, "%s()\n", __func__);
cx25821_shutdown(dev);
cx_write(PCI_INT_STAT, 0xffffffff);
for (i = 0; i < VID_CHANNEL_NUM; i++)
cx_write(dev->channels[i].sram_channels->int_stat, 0xffffffff);
cx_write(AUD_A_INT_STAT, 0xffffffff);
cx_write(AUD_B_INT_STAT, 0xffffffff);
cx_write(AUD_C_INT_STAT, 0xffffffff);
cx_write(AUD_D_INT_STAT, 0xffffffff);
cx_write(AUD_E_INT_STAT, 0xffffffff);
cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
cx_write(PAD_CTRL, 0x12); /* for I2C */
cx25821_registers_init(dev); /* init Pecos registers */
mdelay(100);
for (i = 0; i < VID_CHANNEL_NUM; i++) {
cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
cx25821_sram_channel_setup(dev, dev->channels[i].sram_channels,
1440, 0);
dev->channels[i].pixel_formats = PIXEL_FRMT_422;
dev->channels[i].use_cif_resolution = FALSE;
}
/* Probably only affect Downstream */
for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
i <= VID_UPSTREAM_SRAM_CHANNEL_J; i++) {
cx25821_set_vip_mode(dev, dev->channels[i].sram_channels);
}
cx25821_sram_channel_setup_audio(dev,
dev->channels[SRAM_CH08].sram_channels, 128, 0);
cx25821_gpio_init(dev);
}
static int cx25821_get_resources(struct cx25821_dev *dev)
{
if (request_mem_region(pci_resource_start(dev->pci, 0),
pci_resource_len(dev->pci, 0), dev->name))
return 0;
pr_err("%s: can't get MMIO memory @ 0x%llx\n",
dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
return -EBUSY;
}
static void cx25821_dev_checkrevision(struct cx25821_dev *dev)
{
dev->hwrevision = cx_read(RDR_CFG2) & 0xff;
pr_info("%s(): Hardware revision = 0x%02x\n",
__func__, dev->hwrevision);
}
static void cx25821_iounmap(struct cx25821_dev *dev)
{
if (dev == NULL)
return;
/* Releasing IO memory */
if (dev->lmmio != NULL) {
CX25821_INFO("Releasing lmmio.\n");
iounmap(dev->lmmio);
dev->lmmio = NULL;
}
}
static int cx25821_dev_setup(struct cx25821_dev *dev)
{
int io_size = 0, i;
pr_info("\n***********************************\n");
pr_info("cx25821 set up\n");
pr_info("***********************************\n\n");
mutex_init(&dev->lock);
atomic_inc(&dev->refcount);
dev->nr = ++cx25821_devcount;
sprintf(dev->name, "cx25821[%d]", dev->nr);
mutex_lock(&cx25821_devlist_mutex);
list_add_tail(&dev->devlist, &cx25821_devlist);
mutex_unlock(&cx25821_devlist_mutex);
strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
if (dev->pci->device != 0x8210) {
pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
__func__, dev->pci->device);
return -1;
} else {
pr_info("Athena Hardware device = 0x%02x\n", dev->pci->device);
}
/* Apply a sensible clock frequency for the PCIe bridge */
dev->clk_freq = 28000000;
for (i = 0; i < MAX_VID_CHANNEL_NUM; i++)
dev->channels[i].sram_channels = &cx25821_sram_channels[i];
if (dev->nr > 1)
CX25821_INFO("dev->nr > 1!");
/* board config */
dev->board = 1; /* card[dev->nr]; */
dev->_max_num_decoders = MAX_DECODERS;
dev->pci_bus = dev->pci->bus->number;
dev->pci_slot = PCI_SLOT(dev->pci->devfn);
dev->pci_irqmask = 0x001f00;
/* External Master 1 Bus */
dev->i2c_bus[0].nr = 0;
dev->i2c_bus[0].dev = dev;
dev->i2c_bus[0].reg_stat = I2C1_STAT;
dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
dev->i2c_bus[0].reg_addr = I2C1_ADDR;
dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
dev->i2c_bus[0].i2c_period = (0x07 << 24); /* 1.95MHz */
if (cx25821_get_resources(dev) < 0) {
pr_err("%s: No more PCIe resources for subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device);
cx25821_devcount--;
return -EBUSY;
}
/* PCIe stuff */
dev->base_io_addr = pci_resource_start(dev->pci, 0);
io_size = pci_resource_len(dev->pci, 0);
if (!dev->base_io_addr) {
CX25821_ERR("No PCI Memory resources, exiting!\n");
return -ENODEV;
}
dev->lmmio = ioremap(dev->base_io_addr, pci_resource_len(dev->pci, 0));
if (!dev->lmmio) {
CX25821_ERR("ioremap failed, maybe increasing __VMALLOC_RESERVE in page.h\n");
cx25821_iounmap(dev);
return -ENOMEM;
}
dev->bmmio = (u8 __iomem *) dev->lmmio;
pr_info("%s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device, cx25821_boards[dev->board].name,
dev->board, card[dev->nr] == dev->board ?
"insmod option" : "autodetected");
/* init hardware */
cx25821_initialize(dev);
cx25821_i2c_register(&dev->i2c_bus[0]);
/* cx25821_i2c_register(&dev->i2c_bus[1]);
* cx25821_i2c_register(&dev->i2c_bus[2]); */
CX25821_INFO("i2c register! bus->i2c_rc = %d\n",
dev->i2c_bus[0].i2c_rc);
cx25821_card_setup(dev);
if (medusa_video_init(dev) < 0)
CX25821_ERR("%s(): Failed to initialize medusa!\n", __func__);
cx25821_video_register(dev);
/* register IOCTL device */
dev->ioctl_dev = cx25821_vdev_init(dev, dev->pci,
&cx25821_videoioctl_template, "video");
if (video_register_device
(dev->ioctl_dev, VFL_TYPE_GRABBER, VIDEO_IOCTL_CH) < 0) {
cx25821_videoioctl_unregister(dev);
pr_err("%s(): Failed to register video adapter for IOCTL, so unregistering videoioctl device\n",
__func__);
}
cx25821_dev_checkrevision(dev);
CX25821_INFO("setup done!\n");
return 0;
}
void cx25821_start_upstream_video_ch1(struct cx25821_dev *dev,
struct upstream_user_struct *up_data)
{
dev->_isNTSC = !strcmp(dev->vid_stdname, "NTSC") ? 1 : 0;
dev->tvnorm = !dev->_isNTSC ? V4L2_STD_PAL_BG : V4L2_STD_NTSC_M;
medusa_set_videostandard(dev);
cx25821_vidupstream_init_ch1(dev, dev->channel_select,
dev->pixel_format);
}
void cx25821_start_upstream_video_ch2(struct cx25821_dev *dev,
struct upstream_user_struct *up_data)
{
dev->_isNTSC_ch2 = !strcmp(dev->vid_stdname_ch2, "NTSC") ? 1 : 0;
dev->tvnorm = !dev->_isNTSC_ch2 ? V4L2_STD_PAL_BG : V4L2_STD_NTSC_M;
medusa_set_videostandard(dev);
cx25821_vidupstream_init_ch2(dev, dev->channel_select_ch2,
dev->pixel_format_ch2);
}
void cx25821_start_upstream_audio(struct cx25821_dev *dev,
struct upstream_user_struct *up_data)
{
cx25821_audio_upstream_init(dev, AUDIO_UPSTREAM_SRAM_CHANNEL_B);
}
void cx25821_dev_unregister(struct cx25821_dev *dev)
{
int i;
if (!dev->base_io_addr)
return;
cx25821_free_mem_upstream_ch1(dev);
cx25821_free_mem_upstream_ch2(dev);
cx25821_free_mem_upstream_audio(dev);
release_mem_region(dev->base_io_addr, pci_resource_len(dev->pci, 0));
if (!atomic_dec_and_test(&dev->refcount))
return;
for (i = 0; i < VID_CHANNEL_NUM; i++)
cx25821_video_unregister(dev, i);
for (i = VID_UPSTREAM_SRAM_CHANNEL_I;
i <= AUDIO_UPSTREAM_SRAM_CHANNEL_B; i++) {
cx25821_video_unregister(dev, i);
}
cx25821_videoioctl_unregister(dev);
cx25821_i2c_unregister(&dev->i2c_bus[0]);
cx25821_iounmap(dev);
}
EXPORT_SYMBOL(cx25821_dev_unregister);
static __le32 *cx25821_risc_field(__le32 * rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
unsigned int lines)
{
struct scatterlist *sg;
unsigned int line, todo;
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
/* scan lines */
sg = sglist;
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
sg++;
}
if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL | RISC_EOL |
bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_SOL |
(sg_dma_len(sg) - offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg) - offset);
offset = 0;
sg++;
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE |
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
sg++;
}
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += todo;
}
offset += padding;
}
return rp;
}
int cx25821_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
struct scatterlist *sglist, unsigned int top_offset,
unsigned int bottom_offset, unsigned int bpl,
unsigned int padding, unsigned int lines)
{
u32 instructions;
u32 fields;
__le32 *rp;
int rc;
fields = 0;
if (UNSET != top_offset)
fields++;
if (UNSET != bottom_offset)
fields++;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 2 dwords). Padding
can cause next bpl to start close to a page border. First DMA
region may be smaller than PAGE_SIZE */
/* write and jump need and extra dword */
instructions = fields * (1 + ((bpl + padding) * lines) / PAGE_SIZE +
lines);
instructions += 2;
rc = btcx_riscmem_alloc(pci, risc, instructions * 12);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
if (UNSET != top_offset) {
rp = cx25821_risc_field(rp, sglist, top_offset, 0, bpl, padding,
lines);
}
if (UNSET != bottom_offset) {
rp = cx25821_risc_field(rp, sglist, bottom_offset, 0x200, bpl,
padding, lines);
}
/* save pointer to jmp instruction address */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
static __le32 *cx25821_risc_field_audio(__le32 * rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
unsigned int lines, unsigned int lpi)
{
struct scatterlist *sg;
unsigned int line, todo, sol;
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
/* scan lines */
sg = sglist;
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
sg++;
}
if (lpi && line > 0 && !(line % lpi))
sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
else
sol = RISC_SOL;
if (bpl <= sg_dma_len(sg) - offset) {
/* fits into current chunk */
*(rp++) = cpu_to_le32(RISC_WRITE | sol | RISC_EOL |
bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
*(rp++) = cpu_to_le32(RISC_WRITE | sol |
(sg_dma_len(sg) - offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg) + offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg) - offset);
offset = 0;
sg++;
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE |
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
sg++;
}
*(rp++) = cpu_to_le32(RISC_WRITE | RISC_EOL | todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += todo;
}
offset += padding;
}
return rp;
}
int cx25821_risc_databuffer_audio(struct pci_dev *pci,
struct btcx_riscmem *risc,
struct scatterlist *sglist,
unsigned int bpl,
unsigned int lines, unsigned int lpi)
{
u32 instructions;
__le32 *rp;
int rc;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 2 dwords). Here
there is no padding and no sync. First DMA region may be smaller
than PAGE_SIZE */
/* Jump and write need an extra dword */
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
instructions += 1;
rc = btcx_riscmem_alloc(pci, risc, instructions * 12);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
rp = cx25821_risc_field_audio(rp, sglist, 0, NO_SYNC_LINE, bpl, 0,
lines, lpi);
/* save pointer to jmp instruction address */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
EXPORT_SYMBOL(cx25821_risc_databuffer_audio);
int cx25821_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
u32 reg, u32 mask, u32 value)
{
__le32 *rp;
int rc;
rc = btcx_riscmem_alloc(pci, risc, 4 * 16);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
*(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ1);
*(rp++) = cpu_to_le32(reg);
*(rp++) = cpu_to_le32(value);
*(rp++) = cpu_to_le32(mask);
*(rp++) = cpu_to_le32(RISC_JUMP);
*(rp++) = cpu_to_le32(risc->dma);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
return 0;
}
void cx25821_free_buffer(struct videobuf_queue *q, struct cx25821_buffer *buf)
{
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
static irqreturn_t cx25821_irq(int irq, void *dev_id)
{
struct cx25821_dev *dev = dev_id;
u32 pci_status, pci_mask;
u32 vid_status;
int i, handled = 0;
u32 mask[8] = { 1, 2, 4, 8, 16, 32, 64, 128 };
pci_status = cx_read(PCI_INT_STAT);
pci_mask = cx_read(PCI_INT_MSK);
if (pci_status == 0)
goto out;
for (i = 0; i < VID_CHANNEL_NUM; i++) {
if (pci_status & mask[i]) {
vid_status = cx_read(dev->channels[i].
sram_channels->int_stat);
if (vid_status)
handled += cx25821_video_irq(dev, i,
vid_status);
cx_write(PCI_INT_STAT, mask[i]);
}
}
out:
return IRQ_RETVAL(handled);
}
void cx25821_print_irqbits(char *name, char *tag, char **strings,
int len, u32 bits, u32 mask)
{
unsigned int i;
printk(KERN_DEBUG pr_fmt("%s: %s [0x%x]"), name, tag, bits);
for (i = 0; i < len; i++) {
if (!(bits & (1 << i)))
continue;
if (strings[i])
pr_cont(" %s", strings[i]);
else
pr_cont(" %d", i);
if (!(mask & (1 << i)))
continue;
pr_cont("*");
}
pr_cont("\n");
}
EXPORT_SYMBOL(cx25821_print_irqbits);
struct cx25821_dev *cx25821_dev_get(struct pci_dev *pci)
{
struct cx25821_dev *dev = pci_get_drvdata(pci);
return dev;
}
EXPORT_SYMBOL(cx25821_dev_get);
static int __devinit cx25821_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
struct cx25821_dev *dev;
int err = 0;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (NULL == dev)
return -ENOMEM;
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err < 0)
goto fail_free;
/* pci init */
dev->pci = pci_dev;
if (pci_enable_device(pci_dev)) {
err = -EIO;
pr_info("pci enable failed!\n");
goto fail_unregister_device;
}
pr_info("Athena pci enable !\n");
err = cx25821_dev_setup(dev);
if (err) {
if (err == -EBUSY)
goto fail_unregister_device;
else
goto fail_unregister_pci;
}
/* print pci info */
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
dev->name, pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat, (unsigned long long)dev->base_io_addr);
pci_set_master(pci_dev);
if (!pci_dma_supported(pci_dev, 0xffffffff)) {
pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
err = -EIO;
goto fail_irq;
}
err = request_irq(pci_dev->irq, cx25821_irq,
IRQF_SHARED, dev->name, dev);
if (err < 0) {
pr_err("%s: can't get IRQ %d\n", dev->name, pci_dev->irq);
goto fail_irq;
}
return 0;
fail_irq:
pr_info("cx25821_initdev() can't get IRQ !\n");
cx25821_dev_unregister(dev);
fail_unregister_pci:
pci_disable_device(pci_dev);
fail_unregister_device:
v4l2_device_unregister(&dev->v4l2_dev);
fail_free:
kfree(dev);
return err;
}
static void __devexit cx25821_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct cx25821_dev *dev = get_cx25821(v4l2_dev);
cx25821_shutdown(dev);
pci_disable_device(pci_dev);
/* unregister stuff */
if (pci_dev->irq)
free_irq(pci_dev->irq, dev);
mutex_lock(&cx25821_devlist_mutex);
list_del(&dev->devlist);
mutex_unlock(&cx25821_devlist_mutex);
cx25821_dev_unregister(dev);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
}
static DEFINE_PCI_DEVICE_TABLE(cx25821_pci_tbl) = {
{
/* CX25821 Athena */
.vendor = 0x14f1,
.device = 0x8210,
.subvendor = 0x14f1,
.subdevice = 0x0920,
}, {
/* CX25821 No Brand */
.vendor = 0x14f1,
.device = 0x8210,
.subvendor = 0x0000,
.subdevice = 0x0000,
}, {
/* --- end of list --- */
}
};
MODULE_DEVICE_TABLE(pci, cx25821_pci_tbl);
static struct pci_driver cx25821_pci_driver = {
.name = "cx25821",
.id_table = cx25821_pci_tbl,
.probe = cx25821_initdev,
.remove = __devexit_p(cx25821_finidev),
/* TODO */
.suspend = NULL,
.resume = NULL,
};
static int __init cx25821_init(void)
{
pr_info("driver version %d.%d.%d loaded\n",
(CX25821_VERSION_CODE >> 16) & 0xff,
(CX25821_VERSION_CODE >> 8) & 0xff,
CX25821_VERSION_CODE & 0xff);
return pci_register_driver(&cx25821_pci_driver);
}
static void __exit cx25821_fini(void)
{
pci_unregister_driver(&cx25821_pci_driver);
}
module_init(cx25821_init);
module_exit(cx25821_fini);
| gpl-2.0 |
ktoonsez/KTNOTE4 | drivers/mmc/core/sdio_ops.c | 4293 | 4965 | /*
* linux/drivers/mmc/sdio_ops.c
*
* Copyright 2006-2007 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include "core.h"
#include "sdio_ops.h"
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
{
struct mmc_command cmd = {0};
int i, err = 0;
BUG_ON(!host);
cmd.opcode = SD_IO_SEND_OP_COND;
cmd.arg = ocr;
cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR;
for (i = 100; i; i--) {
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
break;
/* if we're just probing, do a single pass */
if (ocr == 0)
break;
/* otherwise wait until reset completes */
if (mmc_host_is_spi(host)) {
/*
* Both R1_SPI_IDLE and MMC_CARD_BUSY indicate
* an initialized card under SPI, but some cards
* (Marvell's) only behave when looking at this
* one.
*/
if (cmd.resp[1] & MMC_CARD_BUSY)
break;
} else {
if (cmd.resp[0] & MMC_CARD_BUSY)
break;
}
err = -ETIMEDOUT;
mmc_delay(10);
}
if (rocr)
*rocr = cmd.resp[mmc_host_is_spi(host) ? 1 : 0];
return err;
}
static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,
unsigned addr, u8 in, u8 *out)
{
struct mmc_command cmd = {0};
int err;
BUG_ON(!host);
BUG_ON(fn > 7);
/* sanity check */
if (addr & ~0x1FFFF)
return -EINVAL;
cmd.opcode = SD_IO_RW_DIRECT;
cmd.arg = write ? 0x80000000 : 0x00000000;
cmd.arg |= fn << 28;
cmd.arg |= (write && out) ? 0x08000000 : 0x00000000;
cmd.arg |= addr << 9;
cmd.arg |= in;
cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
if (mmc_host_is_spi(host)) {
/* host driver already reported errors */
} else {
if (cmd.resp[0] & R5_ERROR)
return -EIO;
if (cmd.resp[0] & R5_FUNCTION_NUMBER)
return -EINVAL;
if (cmd.resp[0] & R5_OUT_OF_RANGE)
return -ERANGE;
}
if (out) {
if (mmc_host_is_spi(host))
*out = (cmd.resp[0] >> 8) & 0xFF;
else
*out = cmd.resp[0] & 0xFF;
}
return 0;
}
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
unsigned addr, u8 in, u8 *out)
{
BUG_ON(!card);
return mmc_io_rw_direct_host(card->host, write, fn, addr, in, out);
}
int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)
{
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
struct scatterlist sg, *sg_ptr;
struct sg_table sgtable;
unsigned int nents, left_size, i;
unsigned int seg_size = card->host->max_seg_size;
BUG_ON(!card);
BUG_ON(fn > 7);
WARN_ON(blksz == 0);
/* sanity check */
if (addr & ~0x1FFFF)
return -EINVAL;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = SD_IO_RW_EXTENDED;
cmd.arg = write ? 0x80000000 : 0x00000000;
cmd.arg |= fn << 28;
cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;
cmd.arg |= addr << 9;
if (blocks == 0)
cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */
else
cmd.arg |= 0x08000000 | blocks; /* block mode */
cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
data.blksz = blksz;
/* Code in host drivers/fwk assumes that "blocks" always is >=1 */
data.blocks = blocks ? blocks : 1;
data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
left_size = data.blksz * data.blocks;
nents = (left_size - 1) / seg_size + 1;
if (nents > 1) {
if (sg_alloc_table(&sgtable, nents, GFP_KERNEL))
return -ENOMEM;
data.sg = sgtable.sgl;
data.sg_len = nents;
for_each_sg(data.sg, sg_ptr, data.sg_len, i) {
sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)),
min(seg_size, left_size),
offset_in_page(buf + (i * seg_size)));
left_size = left_size - seg_size;
}
} else {
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, buf, left_size);
}
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq);
if (nents > 1)
sg_free_table(&sgtable);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
if (mmc_host_is_spi(card->host)) {
/* host driver already reported errors */
} else {
if (cmd.resp[0] & R5_ERROR)
return -EIO;
if (cmd.resp[0] & R5_FUNCTION_NUMBER)
return -EINVAL;
if (cmd.resp[0] & R5_OUT_OF_RANGE)
return -ERANGE;
}
return 0;
}
int sdio_reset(struct mmc_host *host)
{
int ret;
u8 abort;
/* SDIO Simplified Specification V2.0, 4.4 Reset for SDIO */
ret = mmc_io_rw_direct_host(host, 0, 0, SDIO_CCCR_ABORT, 0, &abort);
if (ret)
abort = 0x08;
else
abort |= 0x08;
ret = mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
return ret;
}
| gpl-2.0 |
rwaterspf1/android_kernel_lge_hammerhead | drivers/usb/host/ohci-sa1111.c | 4805 | 6108 | /*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
* (C) Copyright 2002 Hewlett-Packard Company
*
* SA1111 Bus Glue
*
* Written by Christopher Hoover <ch@hpl.hp.com>
* Based on fragments of previous driver by Russell King et al.
*
* This file is licenced under the GPL.
*/
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <mach/assabet.h>
#include <asm/hardware/sa1111.h>
#ifndef CONFIG_SA1111
#error "This file is SA-1111 bus glue. CONFIG_SA1111 must be defined."
#endif
#define USB_STATUS 0x0118
#define USB_RESET 0x011c
#define USB_IRQTEST 0x0120
#define USB_RESET_FORCEIFRESET (1 << 0)
#define USB_RESET_FORCEHCRESET (1 << 1)
#define USB_RESET_CLKGENRESET (1 << 2)
#define USB_RESET_SIMSCALEDOWN (1 << 3)
#define USB_RESET_USBINTTEST (1 << 4)
#define USB_RESET_SLEEPSTBYEN (1 << 5)
#define USB_RESET_PWRSENSELOW (1 << 6)
#define USB_RESET_PWRCTRLLOW (1 << 7)
#define USB_STATUS_IRQHCIRMTWKUP (1 << 7)
#define USB_STATUS_IRQHCIBUFFACC (1 << 8)
#define USB_STATUS_NIRQHCIM (1 << 9)
#define USB_STATUS_NHCIMFCLR (1 << 10)
#define USB_STATUS_USBPWRSENSE (1 << 11)
#if 0
static void dump_hci_status(struct usb_hcd *hcd, const char *label)
{
unsigned long status = sa1111_readl(hcd->regs + USB_STATUS);
dbg("%s USB_STATUS = { %s%s%s%s%s}", label,
((status & USB_STATUS_IRQHCIRMTWKUP) ? "IRQHCIRMTWKUP " : ""),
((status & USB_STATUS_IRQHCIBUFFACC) ? "IRQHCIBUFFACC " : ""),
((status & USB_STATUS_NIRQHCIM) ? "" : "IRQHCIM "),
((status & USB_STATUS_NHCIMFCLR) ? "" : "HCIMFCLR "),
((status & USB_STATUS_USBPWRSENSE) ? "USBPWRSENSE " : ""));
}
#endif
static int ohci_sa1111_reset(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
ohci_hcd_init(ohci);
return ohci_init(ohci);
}
static int __devinit ohci_sa1111_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
ret = ohci_run(ohci);
if (ret < 0) {
ohci_err(ohci, "can't start\n");
ohci_stop(hcd);
}
return ret;
}
static const struct hc_driver ohci_sa1111_hc_driver = {
.description = hcd_name,
.product_desc = "SA-1111 OHCI",
.hcd_priv_size = sizeof(struct ohci_hcd),
/*
* generic hardware linkage
*/
.irq = ohci_irq,
.flags = HCD_USB11 | HCD_MEMORY,
/*
* basic lifecycle operations
*/
.reset = ohci_sa1111_reset,
.start = ohci_sa1111_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ohci_get_frame,
/*
* root hub support
*/
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
static int sa1111_start_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst = 0;
int ret;
dev_dbg(&dev->dev, "starting SA-1111 OHCI USB Controller\n");
if (machine_is_xp860() ||
machine_has_neponset() ||
machine_is_pfs168() ||
machine_is_badge4())
usb_rst = USB_RESET_PWRSENSELOW | USB_RESET_PWRCTRLLOW;
/*
* Configure the power sense and control lines. Place the USB
* host controller in reset.
*/
sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + USB_RESET);
/*
* Now, carefully enable the USB clock, and take
* the USB host controller out of reset.
*/
ret = sa1111_enable_device(dev);
if (ret == 0) {
udelay(11);
sa1111_writel(usb_rst, dev->mapbase + USB_RESET);
}
return ret;
}
static void sa1111_stop_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst;
dev_dbg(&dev->dev, "stopping SA-1111 OHCI USB Controller\n");
/*
* Put the USB host controller into reset.
*/
usb_rst = sa1111_readl(dev->mapbase + USB_RESET);
sa1111_writel(usb_rst | USB_RESET_FORCEIFRESET | USB_RESET_FORCEHCRESET,
dev->mapbase + USB_RESET);
/*
* Stop the USB clock.
*/
sa1111_disable_device(dev);
}
/**
* ohci_hcd_sa1111_probe - initialize SA-1111-based HCDs
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it.
*/
static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
{
struct usb_hcd *hcd;
int ret;
if (usb_disabled())
return -ENODEV;
hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
if (!hcd)
return -ENOMEM;
hcd->rsrc_start = dev->res.start;
hcd->rsrc_len = resource_size(&dev->res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
dbg("request_mem_region failed");
ret = -EBUSY;
goto err1;
}
hcd->regs = dev->mapbase;
ret = sa1111_start_hc(dev);
if (ret)
goto err2;
ret = usb_add_hcd(hcd, dev->irq[1], 0);
if (ret == 0)
return ret;
sa1111_stop_hc(dev);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return ret;
}
/**
* ohci_hcd_sa1111_remove - shutdown processing for SA-1111-based HCDs
* @dev: USB Host Controller being removed
*
* Reverses the effect of ohci_hcd_sa1111_probe(), first invoking
* the HCD's stop() method.
*/
static int ohci_hcd_sa1111_remove(struct sa1111_dev *dev)
{
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
usb_remove_hcd(hcd);
sa1111_stop_hc(dev);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
return 0;
}
static void ohci_hcd_sa1111_shutdown(struct sa1111_dev *dev)
{
struct usb_hcd *hcd = sa1111_get_drvdata(dev);
if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
hcd->driver->shutdown(hcd);
sa1111_stop_hc(dev);
}
}
static struct sa1111_driver ohci_hcd_sa1111_driver = {
.drv = {
.name = "sa1111-ohci",
.owner = THIS_MODULE,
},
.devid = SA1111_DEVID_USB,
.probe = ohci_hcd_sa1111_probe,
.remove = ohci_hcd_sa1111_remove,
.shutdown = ohci_hcd_sa1111_shutdown,
};
| gpl-2.0 |
wyldstallyns/M4CH3T3_kernel_m8 | drivers/net/ethernet/mellanox/mlx4/en_rx.c | 4805 | 27070 | /*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include "mlx4_en.h"
static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
struct page_frag *skb_frags,
struct mlx4_en_rx_alloc *ring_alloc,
int i)
{
struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
struct page *page;
dma_addr_t dma;
if (page_alloc->offset == frag_info->last_offset) {
/* Allocate new page */
page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
if (!page)
return -ENOMEM;
skb_frags[i].page = page_alloc->page;
skb_frags[i].offset = page_alloc->offset;
page_alloc->page = page;
page_alloc->offset = frag_info->frag_align;
} else {
page = page_alloc->page;
get_page(page);
skb_frags[i].page = page;
skb_frags[i].offset = page_alloc->offset;
page_alloc->offset += frag_info->frag_stride;
}
dma = dma_map_single(priv->ddev, page_address(skb_frags[i].page) +
skb_frags[i].offset, frag_info->frag_size,
PCI_DMA_FROMDEVICE);
rx_desc->data[i].addr = cpu_to_be64(dma);
return 0;
}
static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
struct mlx4_en_rx_alloc *page_alloc;
int i;
for (i = 0; i < priv->num_frags; i++) {
page_alloc = &ring->page_alloc[i];
page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
MLX4_EN_ALLOC_ORDER);
if (!page_alloc->page)
goto out;
page_alloc->offset = priv->frag_info[i].frag_align;
en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
i, page_alloc->page);
}
return 0;
out:
while (i--) {
page_alloc = &ring->page_alloc[i];
put_page(page_alloc->page);
page_alloc->page = NULL;
}
return -ENOMEM;
}
static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
struct mlx4_en_rx_alloc *page_alloc;
int i;
for (i = 0; i < priv->num_frags; i++) {
page_alloc = &ring->page_alloc[i];
en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
i, page_count(page_alloc->page));
put_page(page_alloc->page);
page_alloc->page = NULL;
}
}
static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
struct skb_frag_struct *skb_frags = ring->rx_info +
(index << priv->log_rx_info);
int possible_frags;
int i;
/* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) {
skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
}
/* If the number of used fragments does not fill up the ring stride,
* remaining (unused) fragments must be padded with null address/size
* and a special memory key */
possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
for (i = priv->num_frags; i < possible_frags; i++) {
rx_desc->data[i].byte_count = 0;
rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
rx_desc->data[i].addr = 0;
}
}
static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
struct page_frag *skb_frags = ring->rx_info +
(index << priv->log_rx_info);
int i;
for (i = 0; i < priv->num_frags; i++)
if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
goto err;
return 0;
err:
while (i--) {
dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
PCI_DMA_FROMDEVICE);
put_page(skb_frags[i].page);
}
return -ENOMEM;
}
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
}
static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
struct page_frag *skb_frags;
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
dma_addr_t dma;
int nr;
skb_frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
dma = be64_to_cpu(rx_desc->data[nr].addr);
en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
dma_unmap_single(priv->ddev, dma, skb_frags[nr].size,
PCI_DMA_FROMDEVICE);
put_page(skb_frags[nr].page);
}
}
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
{
struct mlx4_en_rx_ring *ring;
int ring_ind;
int buf_ind;
int new_size;
for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->actual_size)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
en_err(priv, "Failed to allocate "
"enough rx buffers\n");
return -ENOMEM;
} else {
new_size = rounddown_pow_of_two(ring->actual_size);
en_warn(priv, "Only %d buffers allocated "
"reducing ring size to %d",
ring->actual_size, new_size);
goto reduce_rings;
}
}
ring->actual_size++;
ring->prod++;
}
}
return 0;
reduce_rings:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
while (ring->actual_size > new_size) {
ring->actual_size--;
ring->prod--;
mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
}
}
return 0;
}
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
int index;
en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
ring->cons, ring->prod);
/* Unmap and free Rx buffers */
BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
while (ring->cons != ring->prod) {
index = ring->cons & ring->size_mask;
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
mlx4_en_free_rx_desc(priv, ring, index);
++ring->cons;
}
}
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
int err;
int tmp;
ring->prod = 0;
ring->cons = 0;
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
sizeof(struct skb_frag_struct));
ring->rx_info = vmalloc(tmp);
if (!ring->rx_info)
return -ENOMEM;
en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
ring->rx_info, tmp);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE);
if (err)
goto err_ring;
err = mlx4_en_map_buffer(&ring->wqres.buf);
if (err) {
en_err(priv, "Failed to map RX buffer\n");
goto err_hwq;
}
ring->buf = ring->wqres.buf.direct.buf;
return 0;
err_hwq:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_ring:
vfree(ring->rx_info);
ring->rx_info = NULL;
return err;
}
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
{
struct mlx4_en_rx_ring *ring;
int i;
int ring_ind;
int err;
int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * priv->num_frags);
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
ring->prod = 0;
ring->cons = 0;
ring->actual_size = 0;
ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
ring->stride = stride;
if (ring->stride <= TXBB_SIZE)
ring->buf += TXBB_SIZE;
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride;
memset(ring->buf, 0, ring->buf_size);
mlx4_en_update_rx_prod_db(ring);
/* Initailize all descriptors */
for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i);
/* Initialize page allocators */
err = mlx4_en_init_allocator(priv, ring);
if (err) {
en_err(priv, "Failed initializing ring allocator\n");
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
ring_ind--;
goto err_allocator;
}
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
goto err_buffers;
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
ring = &priv->rx_ring[ring_ind];
ring->size_mask = ring->actual_size - 1;
mlx4_en_update_rx_prod_db(ring);
}
return 0;
err_buffers:
for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
ring_ind = priv->rx_ring_num - 1;
err_allocator:
while (ring_ind >= 0) {
if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
ring_ind--;
}
return err;
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
mlx4_en_destroy_allocator(priv, ring);
}
/* Unmap a completed descriptor and free unused pages */
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
struct page_frag *skb_frags,
struct sk_buff *skb,
struct mlx4_en_rx_alloc *page_alloc,
int length)
{
struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
struct mlx4_en_frag_info *frag_info;
int nr;
dma_addr_t dma;
/* Collect used fragments while replacing them in the HW descirptors */
for (nr = 0; nr < priv->num_frags; nr++) {
frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size)
break;
/* Save page reference in skb */
__skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
skb->truesize += frag_info->frag_stride;
dma = be64_to_cpu(rx_desc->data[nr].addr);
/* Allocate a replacement page */
if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
goto fail;
/* Unmap buffer */
dma_unmap_single(priv->ddev, dma, skb_frag_size(&skb_frags_rx[nr]),
PCI_DMA_FROMDEVICE);
}
/* Adjust size of last fragment to match actual length */
if (nr > 0)
skb_frag_size_set(&skb_frags_rx[nr - 1],
length - priv->frag_info[nr - 1].frag_prefix_size);
return nr;
fail:
/* Drop all accumulated fragments (which have already been replaced in
* the descriptor) of this packet; remaining fragments are reused... */
while (nr > 0) {
nr--;
__skb_frag_unref(&skb_frags_rx[nr]);
}
return 0;
}
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
struct page_frag *skb_frags,
struct mlx4_en_rx_alloc *page_alloc,
unsigned int length)
{
struct sk_buff *skb;
void *va;
int used_frags;
dma_addr_t dma;
skb = netdev_alloc_skb(priv->dev, SMALL_PACKET_SIZE + NET_IP_ALIGN);
if (!skb) {
en_dbg(RX_ERR, priv, "Failed allocating skb\n");
return NULL;
}
skb_reserve(skb, NET_IP_ALIGN);
skb->len = length;
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
va = page_address(skb_frags[0].page) + skb_frags[0].offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
* synch buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_for_cpu(priv->ddev, dma, length,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
dma_sync_single_for_device(priv->ddev, dma, length,
DMA_FROM_DEVICE);
skb->tail += length;
} else {
/* Move relevant fragments to skb */
used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
skb, page_alloc, length);
if (unlikely(!used_frags)) {
kfree_skb(skb);
return NULL;
}
skb_shinfo(skb)->nr_frags = used_frags;
/* Copy headers into the skb linear buffer */
memcpy(skb->data, va, HEADER_COPY_SIZE);
skb->tail += HEADER_COPY_SIZE;
/* Skip headers in first fragment */
skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
/* Adjust size of first fragment */
skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE);
skb->data_len = length - HEADER_COPY_SIZE;
}
return skb;
}
static void validate_loopback(struct mlx4_en_priv *priv, struct sk_buff *skb)
{
int i;
int offset = ETH_HLEN;
for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++, offset++) {
if (*(skb->data + offset) != (unsigned char) (i & 0xff))
goto out_loopback;
}
/* Loopback found */
priv->loopback_ok = 1;
out_loopback:
dev_kfree_skb_any(skb);
}
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
struct page_frag *skb_frags;
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
int index;
int nr;
unsigned int length;
int polled = 0;
int ip_summed;
struct ethhdr *ethh;
u64 s_mac;
if (!priv->port_up)
return 0;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
index = cq->mcq.cons_index & ring->size_mask;
cqe = &cq->buf[index];
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
skb_frags = ring->rx_info + (index << priv->log_rx_info);
rx_desc = ring->buf + (index << ring->log_stride);
/*
* make sure we read the CQE after we read the ownership bit
*/
rmb();
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
en_err(priv, "CQE completed in error - vendor "
"syndrom:%d syndrom:%d\n",
((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
((struct mlx4_err_cqe *) cqe)->syndrome);
goto next;
}
if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
goto next;
}
/* Get pointer to first fragment since we haven't skb yet and
* cast it to ethhdr struct */
ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
skb_frags[0].offset);
s_mac = mlx4_en_mac_to_u64(ethh->h_source);
/* If source MAC is equal to our own MAC and not performing
* the selftest or flb disabled - drop the packet */
if (s_mac == priv->mac &&
(!(dev->features & NETIF_F_LOOPBACK) ||
!priv->validate_loopback))
goto next;
/*
* Packet is OK - process it.
*/
length = be32_to_cpu(cqe->byte_cnt);
length -= ring->fcs_del;
ring->bytes += length;
ring->packets++;
if (likely(dev->features & NETIF_F_RXCSUM)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
(cqe->checksum == cpu_to_be16(0xffff))) {
ring->csum_ok++;
/* This packet is eligible for LRO if it is:
* - DIX Ethernet (type interpretation)
* - TCP/IP (v4)
* - without IP options
* - not an IP fragment */
if (dev->features & NETIF_F_GRO) {
struct sk_buff *gro_skb = napi_get_frags(&cq->napi);
if (!gro_skb)
goto next;
nr = mlx4_en_complete_rx_desc(
priv, rx_desc,
skb_frags, gro_skb,
ring->page_alloc, length);
if (!nr)
goto next;
skb_shinfo(gro_skb)->nr_frags = nr;
gro_skb->len = length;
gro_skb->data_len = length;
gro_skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) {
u16 vid = be16_to_cpu(cqe->sl_vid);
__vlan_hwaccel_put_tag(gro_skb, vid);
}
if (dev->features & NETIF_F_RXHASH)
gro_skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
skb_record_rx_queue(gro_skb, cq->ring);
napi_gro_frags(&cq->napi);
goto next;
}
/* LRO not possible, complete processing here */
ip_summed = CHECKSUM_UNNECESSARY;
} else {
ip_summed = CHECKSUM_NONE;
ring->csum_none++;
}
} else {
ip_summed = CHECKSUM_NONE;
ring->csum_none++;
}
skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
ring->page_alloc, length);
if (!skb) {
priv->stats.rx_dropped++;
goto next;
}
if (unlikely(priv->validate_loopback)) {
validate_loopback(priv, skb);
goto next;
}
skb->ip_summed = ip_summed;
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, cq->ring);
if (dev->features & NETIF_F_RXHASH)
skb->rxhash = be32_to_cpu(cqe->immed_rss_invalid);
if (be32_to_cpu(cqe->vlan_my_qpn) &
MLX4_CQE_VLAN_PRESENT_MASK)
__vlan_hwaccel_put_tag(skb, be16_to_cpu(cqe->sl_vid));
/* Push it up the stack */
netif_receive_skb(skb);
next:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = &cq->buf[index];
if (++polled == budget) {
/* We are here because we reached the NAPI budget -
* flush only pending LRO sessions */
goto out;
}
}
out:
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
ring->prod += polled; /* Polled descriptors were realocated in place */
mlx4_en_update_rx_prod_db(ring);
return polled;
}
void mlx4_en_rx_irq(struct mlx4_cq *mcq)
{
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
if (priv->port_up)
napi_schedule(&cq->napi);
else
mlx4_en_arm_cq(priv, cq);
}
/* Rx CQ polling - called by NAPI */
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
{
struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
struct net_device *dev = cq->dev;
struct mlx4_en_priv *priv = netdev_priv(dev);
int done;
done = mlx4_en_process_rx_cq(dev, cq, budget);
/* If we used up all the quota - we're probably not done yet... */
if (done == budget)
INC_PERF_COUNTER(priv->pstats.napi_quota);
else {
/* Done for now */
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
return done;
}
/* Calculate the last offset position that accommodates a full fragment
* (assuming fagment size = stride-align) */
static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
{
u16 res = MLX4_EN_ALLOC_SIZE % stride;
u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
"res:%d offset:%d\n", stride, align, res, offset);
return offset;
}
static int frag_sizes[] = {
FRAG_SZ0,
FRAG_SZ1,
FRAG_SZ2,
FRAG_SZ3
};
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
int buf_size = 0;
int i = 0;
while (buf_size < eff_mtu) {
priv->frag_info[i].frag_size =
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size;
if (!i) {
priv->frag_info[i].frag_align = NET_IP_ALIGN;
priv->frag_info[i].frag_stride =
ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
} else {
priv->frag_info[i].frag_align = 0;
priv->frag_info[i].frag_stride =
ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
}
priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
priv, priv->frag_info[i].frag_stride,
priv->frag_info[i].frag_align);
buf_size += priv->frag_info[i].frag_size;
i++;
}
priv->num_frags = i;
priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
"num_frags:%d):\n", eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
"stride:%d last_offset:%d\n", i,
priv->frag_info[i].frag_size,
priv->frag_info[i].frag_prefix_size,
priv->frag_info[i].frag_align,
priv->frag_info[i].frag_stride,
priv->frag_info[i].last_offset);
}
}
/* RSS related functions */
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_rx_ring *ring,
enum mlx4_qp_state *state,
struct mlx4_qp *qp)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_qp_context *context;
int err = 0;
context = kmalloc(sizeof *context , GFP_KERNEL);
if (!context) {
en_err(priv, "Failed to allocate qp context\n");
return -ENOMEM;
}
err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
en_err(priv, "Failed to allocate qp #%x\n", qpn);
goto out;
}
qp->event = mlx4_en_sqp_event;
memset(context, 0, sizeof *context);
mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
qpn, ring->cqn, context);
context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
/* Cancel FCS removal if FW allows */
if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
context->param3 |= cpu_to_be32(1 << 29);
ring->fcs_del = ETH_FCS_LEN;
} else
ring->fcs_del = 0;
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
if (err) {
mlx4_qp_remove(mdev->dev, qp);
mlx4_qp_free(mdev->dev, qp);
}
mlx4_en_update_rx_prod_db(ring);
out:
kfree(context);
return err;
}
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
struct mlx4_qp_context context;
struct mlx4_rss_context *rss_context;
int rss_rings;
void *ptr;
u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
MLX4_RSS_TCP_IPV6);
int i, qpn;
int err = 0;
int good_qps = 0;
static const u32 rsskey[10] = { 0xD181C62C, 0xF7F4DB5B, 0x1983A2FC,
0x943E1ADB, 0xD9389E6B, 0xD1039C2C, 0xA74499AD,
0x593D56D9, 0xF3253C06, 0x2ADC1FFC};
en_dbg(DRV, priv, "Configuring rss steering\n");
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
priv->rx_ring_num,
&rss_map->base_qpn);
if (err) {
en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
return err;
}
for (i = 0; i < priv->rx_ring_num; i++) {
qpn = rss_map->base_qpn + i;
err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i],
&rss_map->state[i],
&rss_map->qps[i]);
if (err)
goto rss_err;
++good_qps;
}
/* Configure RSS indirection qp */
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
goto rss_err;
}
rss_map->indir_qp.event = mlx4_en_sqp_event;
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0].cqn, &context);
if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
rss_rings = priv->rx_ring_num;
else
rss_rings = priv->prof->rss_rings;
ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
if (priv->mdev->profile.udp_rss) {
rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
rss_context->base_qpn_udp = rss_context->default_qpn;
}
rss_context->flags = rss_mask;
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
rss_context->rss_key[i] = cpu_to_be32(rsskey[i]);
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
goto indir_err;
return 0;
indir_err:
mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
rss_err:
for (i = 0; i < good_qps; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
}
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
return err;
}
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
int i;
mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
}
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
}
| gpl-2.0 |
HaoZeke/kernel | arch/mips/ath79/dev-leds-gpio.c | 4805 | 1177 | /*
* Atheros AR71XX/AR724X/AR913X common GPIO LEDs support
*
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include "dev-leds-gpio.h"
void __init ath79_register_leds_gpio(int id,
unsigned num_leds,
struct gpio_led *leds)
{
struct platform_device *pdev;
struct gpio_led_platform_data pdata;
struct gpio_led *p;
int err;
p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL);
if (!p)
return;
pdev = platform_device_alloc("leds-gpio", id);
if (!pdev)
goto err_free_leds;
memset(&pdata, 0, sizeof(pdata));
pdata.num_leds = num_leds;
pdata.leds = p;
err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
if (err)
goto err_put_pdev;
err = platform_device_add(pdev);
if (err)
goto err_put_pdev;
return;
err_put_pdev:
platform_device_put(pdev);
err_free_leds:
kfree(p);
}
| gpl-2.0 |
arpitjain9819/FIRE-AND-FLAMES | drivers/mtd/maps/intel_vr_nor.c | 4805 | 7271 | /*
* drivers/mtd/maps/intel_vr_nor.c
*
* An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
* Vermilion Range chipset.
*
* The Vermilion Range Expansion Bus supports four chip selects, each of which
* has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
* is a 256MiB memory region containing the address spaces for all four of the
* chip selects, with start addresses hardcoded on 64MiB boundaries.
*
* This map driver only supports NOR flash on chip select 0. The buswidth
* (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
* and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
* not modify the value in the EXP_TIMING_CS0 register except to enable writing
* and disable boot acceleration. The timing parameters in the register are
* assumed to have been properly initialized by the BIOS. The reset default
* timing parameters are maximally conservative (slow), so access to the flash
* will be slower than it should be if the BIOS has not initialized the timing
* parameters.
*
* Author: Andy Lowe <alowe@mvista.com>
*
* 2006 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#define DRV_NAME "vr_nor"
struct vr_nor_mtd {
void __iomem *csr_base;
struct map_info map;
struct mtd_info *info;
struct pci_dev *dev;
};
/* Expansion Bus Configuration and Status Registers are in BAR 0 */
#define EXP_CSR_MBAR 0
/* Expansion Bus Memory Window is BAR 1 */
#define EXP_WIN_MBAR 1
/* Maximum address space for Chip Select 0 is 64MiB */
#define CS0_SIZE 0x04000000
/* Chip Select 0 is at offset 0 in the Memory Window */
#define CS0_START 0x0
/* Chip Select 0 Timing Register is at offset 0 in CSR */
#define EXP_TIMING_CS0 0x00
#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
#define TIMING_WR_EN (1 << 1) /* Write Enable */
#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
#define TIMING_MASK 0x3FFF0000
static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
mtd_device_unregister(p->info);
}
static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
}
static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
{
map_destroy(p->info);
}
static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
static const char *probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
const char **type;
for (type = probe_types; !p->info && *type; type++)
p->info = do_map_probe(*type, &p->map);
if (!p->info)
return -ENODEV;
p->info->owner = THIS_MODULE;
return 0;
}
static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
{
unsigned int exp_timing_cs0;
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
}
/*
* Initialize the map_info structure and map the flash.
* Returns 0 on success, nonzero otherwise.
*/
static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p)
{
unsigned long csr_phys, csr_len;
unsigned long win_phys, win_len;
unsigned int exp_timing_cs0;
int err;
csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
if (!csr_phys || !csr_len || !win_phys || !win_len)
return -ENODEV;
if (win_len < (CS0_START + CS0_SIZE))
return -ENXIO;
p->csr_base = ioremap_nocache(csr_phys, csr_len);
if (!p->csr_base)
return -ENOMEM;
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
if (!(exp_timing_cs0 & TIMING_CS_EN)) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is disabled.\n");
err = -ENODEV;
goto release;
}
if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is configured for maximally slow access times.\n");
}
p->map.name = DRV_NAME;
p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
p->map.phys = win_phys + CS0_START;
p->map.size = CS0_SIZE;
p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
if (!p->map.virt) {
err = -ENOMEM;
goto release;
}
simple_map_init(&p->map);
/* Enable writes to flash bank */
exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
return 0;
release:
iounmap(p->csr_base);
return err;
}
static struct pci_device_id vr_nor_pci_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
{0,}
};
static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
pci_set_drvdata(dev, NULL);
vr_nor_destroy_partitions(p);
vr_nor_destroy_mtd_setup(p);
vr_nor_destroy_maps(p);
kfree(p);
pci_release_regions(dev);
pci_disable_device(dev);
}
static int __devinit
vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vr_nor_mtd *p = NULL;
unsigned int exp_timing_cs0;
int err;
err = pci_enable_device(dev);
if (err)
goto out;
err = pci_request_regions(dev, DRV_NAME);
if (err)
goto disable_dev;
p = kzalloc(sizeof(*p), GFP_KERNEL);
err = -ENOMEM;
if (!p)
goto release;
p->dev = dev;
err = vr_nor_init_maps(p);
if (err)
goto release;
err = vr_nor_mtd_setup(p);
if (err)
goto destroy_maps;
err = vr_nor_init_partitions(p);
if (err)
goto destroy_mtd_setup;
pci_set_drvdata(dev, p);
return 0;
destroy_mtd_setup:
map_destroy(p->info);
destroy_maps:
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
release:
kfree(p);
pci_release_regions(dev);
disable_dev:
pci_disable_device(dev);
out:
return err;
}
static struct pci_driver vr_nor_pci_driver = {
.name = DRV_NAME,
.probe = vr_nor_pci_probe,
.remove = __devexit_p(vr_nor_pci_remove),
.id_table = vr_nor_pci_ids,
};
static int __init vr_nor_mtd_init(void)
{
return pci_register_driver(&vr_nor_pci_driver);
}
static void __exit vr_nor_mtd_exit(void)
{
pci_unregister_driver(&vr_nor_pci_driver);
}
module_init(vr_nor_mtd_init);
module_exit(vr_nor_mtd_exit);
MODULE_AUTHOR("Andy Lowe");
MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
| gpl-2.0 |
arco/samsung-kernel-msm7x30 | drivers/mtd/maps/intel_vr_nor.c | 4805 | 7271 | /*
* drivers/mtd/maps/intel_vr_nor.c
*
* An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
* Vermilion Range chipset.
*
* The Vermilion Range Expansion Bus supports four chip selects, each of which
* has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
* is a 256MiB memory region containing the address spaces for all four of the
* chip selects, with start addresses hardcoded on 64MiB boundaries.
*
* This map driver only supports NOR flash on chip select 0. The buswidth
* (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
* and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
* not modify the value in the EXP_TIMING_CS0 register except to enable writing
* and disable boot acceleration. The timing parameters in the register are
* assumed to have been properly initialized by the BIOS. The reset default
* timing parameters are maximally conservative (slow), so access to the flash
* will be slower than it should be if the BIOS has not initialized the timing
* parameters.
*
* Author: Andy Lowe <alowe@mvista.com>
*
* 2006 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#define DRV_NAME "vr_nor"
struct vr_nor_mtd {
void __iomem *csr_base;
struct map_info map;
struct mtd_info *info;
struct pci_dev *dev;
};
/* Expansion Bus Configuration and Status Registers are in BAR 0 */
#define EXP_CSR_MBAR 0
/* Expansion Bus Memory Window is BAR 1 */
#define EXP_WIN_MBAR 1
/* Maximum address space for Chip Select 0 is 64MiB */
#define CS0_SIZE 0x04000000
/* Chip Select 0 is at offset 0 in the Memory Window */
#define CS0_START 0x0
/* Chip Select 0 Timing Register is at offset 0 in CSR */
#define EXP_TIMING_CS0 0x00
#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
#define TIMING_WR_EN (1 << 1) /* Write Enable */
#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
#define TIMING_MASK 0x3FFF0000
static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
mtd_device_unregister(p->info);
}
static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
{
/* register the flash bank */
/* partition the flash bank */
return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0);
}
static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
{
map_destroy(p->info);
}
static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
static const char *probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
const char **type;
for (type = probe_types; !p->info && *type; type++)
p->info = do_map_probe(*type, &p->map);
if (!p->info)
return -ENODEV;
p->info->owner = THIS_MODULE;
return 0;
}
static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
{
unsigned int exp_timing_cs0;
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
}
/*
* Initialize the map_info structure and map the flash.
* Returns 0 on success, nonzero otherwise.
*/
static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p)
{
unsigned long csr_phys, csr_len;
unsigned long win_phys, win_len;
unsigned int exp_timing_cs0;
int err;
csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
if (!csr_phys || !csr_len || !win_phys || !win_len)
return -ENODEV;
if (win_len < (CS0_START + CS0_SIZE))
return -ENXIO;
p->csr_base = ioremap_nocache(csr_phys, csr_len);
if (!p->csr_base)
return -ENOMEM;
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
if (!(exp_timing_cs0 & TIMING_CS_EN)) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is disabled.\n");
err = -ENODEV;
goto release;
}
if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is configured for maximally slow access times.\n");
}
p->map.name = DRV_NAME;
p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
p->map.phys = win_phys + CS0_START;
p->map.size = CS0_SIZE;
p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
if (!p->map.virt) {
err = -ENOMEM;
goto release;
}
simple_map_init(&p->map);
/* Enable writes to flash bank */
exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
return 0;
release:
iounmap(p->csr_base);
return err;
}
static struct pci_device_id vr_nor_pci_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
{0,}
};
static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
pci_set_drvdata(dev, NULL);
vr_nor_destroy_partitions(p);
vr_nor_destroy_mtd_setup(p);
vr_nor_destroy_maps(p);
kfree(p);
pci_release_regions(dev);
pci_disable_device(dev);
}
static int __devinit
vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vr_nor_mtd *p = NULL;
unsigned int exp_timing_cs0;
int err;
err = pci_enable_device(dev);
if (err)
goto out;
err = pci_request_regions(dev, DRV_NAME);
if (err)
goto disable_dev;
p = kzalloc(sizeof(*p), GFP_KERNEL);
err = -ENOMEM;
if (!p)
goto release;
p->dev = dev;
err = vr_nor_init_maps(p);
if (err)
goto release;
err = vr_nor_mtd_setup(p);
if (err)
goto destroy_maps;
err = vr_nor_init_partitions(p);
if (err)
goto destroy_mtd_setup;
pci_set_drvdata(dev, p);
return 0;
destroy_mtd_setup:
map_destroy(p->info);
destroy_maps:
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
release:
kfree(p);
pci_release_regions(dev);
disable_dev:
pci_disable_device(dev);
out:
return err;
}
static struct pci_driver vr_nor_pci_driver = {
.name = DRV_NAME,
.probe = vr_nor_pci_probe,
.remove = __devexit_p(vr_nor_pci_remove),
.id_table = vr_nor_pci_ids,
};
static int __init vr_nor_mtd_init(void)
{
return pci_register_driver(&vr_nor_pci_driver);
}
static void __exit vr_nor_mtd_exit(void)
{
pci_unregister_driver(&vr_nor_pci_driver);
}
module_init(vr_nor_mtd_init);
module_exit(vr_nor_mtd_exit);
MODULE_AUTHOR("Andy Lowe");
MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
| gpl-2.0 |
izzaeroth/FunkNA-KERNEL | net/netfilter/ipvs/ip_vs_dh.c | 5061 | 6264 | /*
* IPVS: Destination Hashing scheduling module
*
* Authors: Wensong Zhang <wensong@gnuchina.org>
*
* Inspired by the consistent hashing scheduler patch from
* Thomas Proell <proellt@gmx.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
*/
/*
* The dh algorithm is to select server by the hash key of destination IP
* address. The pseudo code is as follows:
*
* n <- servernode[dest_ip];
* if (n is dead) OR
* (n is overloaded) OR (n.weight <= 0) then
* return NULL;
*
* return n;
*
* Notes that servernode is a 256-bucket hash table that maps the hash
* index derived from packet destination IP address to the current server
* array. If the dh scheduler is used in cache cluster, it is good to
* combine it with cache_bypass feature. When the statically assigned
* server is dead or overloaded, the load balancer can bypass the cache
* server and send requests to the original server directly.
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <net/ip_vs.h>
/*
* IPVS DH bucket
*/
struct ip_vs_dh_bucket {
struct ip_vs_dest *dest; /* real server (cache) */
};
/*
* for IPVS DH entry hash table
*/
#ifndef CONFIG_IP_VS_DH_TAB_BITS
#define CONFIG_IP_VS_DH_TAB_BITS 8
#endif
#define IP_VS_DH_TAB_BITS CONFIG_IP_VS_DH_TAB_BITS
#define IP_VS_DH_TAB_SIZE (1 << IP_VS_DH_TAB_BITS)
#define IP_VS_DH_TAB_MASK (IP_VS_DH_TAB_SIZE - 1)
/*
* Returns hash value for IPVS DH entry
*/
static inline unsigned ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
{
__be32 addr_fold = addr->ip;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
addr_fold = addr->ip6[0]^addr->ip6[1]^
addr->ip6[2]^addr->ip6[3];
#endif
return (ntohl(addr_fold)*2654435761UL) & IP_VS_DH_TAB_MASK;
}
/*
* Get ip_vs_dest associated with supplied parameters.
*/
static inline struct ip_vs_dest *
ip_vs_dh_get(int af, struct ip_vs_dh_bucket *tbl,
const union nf_inet_addr *addr)
{
return (tbl[ip_vs_dh_hashkey(af, addr)]).dest;
}
/*
* Assign all the hash buckets of the specified table with the service.
*/
static int
ip_vs_dh_assign(struct ip_vs_dh_bucket *tbl, struct ip_vs_service *svc)
{
int i;
struct ip_vs_dh_bucket *b;
struct list_head *p;
struct ip_vs_dest *dest;
b = tbl;
p = &svc->destinations;
for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
if (list_empty(p)) {
b->dest = NULL;
} else {
if (p == &svc->destinations)
p = p->next;
dest = list_entry(p, struct ip_vs_dest, n_list);
atomic_inc(&dest->refcnt);
b->dest = dest;
p = p->next;
}
b++;
}
return 0;
}
/*
* Flush all the hash buckets of the specified table.
*/
static void ip_vs_dh_flush(struct ip_vs_dh_bucket *tbl)
{
int i;
struct ip_vs_dh_bucket *b;
b = tbl;
for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
if (b->dest) {
atomic_dec(&b->dest->refcnt);
b->dest = NULL;
}
b++;
}
}
static int ip_vs_dh_init_svc(struct ip_vs_service *svc)
{
struct ip_vs_dh_bucket *tbl;
/* allocate the DH table for this service */
tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE,
GFP_ATOMIC);
if (tbl == NULL)
return -ENOMEM;
svc->sched_data = tbl;
IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for "
"current service\n",
sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE);
/* assign the hash buckets with the updated service */
ip_vs_dh_assign(tbl, svc);
return 0;
}
static int ip_vs_dh_done_svc(struct ip_vs_service *svc)
{
struct ip_vs_dh_bucket *tbl = svc->sched_data;
/* got to clean up hash buckets here */
ip_vs_dh_flush(tbl);
/* release the table itself */
kfree(svc->sched_data);
IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) released\n",
sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE);
return 0;
}
static int ip_vs_dh_update_svc(struct ip_vs_service *svc)
{
struct ip_vs_dh_bucket *tbl = svc->sched_data;
/* got to clean up hash buckets here */
ip_vs_dh_flush(tbl);
/* assign the hash buckets with the updated service */
ip_vs_dh_assign(tbl, svc);
return 0;
}
/*
* If the dest flags is set with IP_VS_DEST_F_OVERLOAD,
* consider that the server is overloaded here.
*/
static inline int is_overloaded(struct ip_vs_dest *dest)
{
return dest->flags & IP_VS_DEST_F_OVERLOAD;
}
/*
* Destination hashing scheduling
*/
static struct ip_vs_dest *
ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
{
struct ip_vs_dest *dest;
struct ip_vs_dh_bucket *tbl;
struct ip_vs_iphdr iph;
ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
tbl = (struct ip_vs_dh_bucket *)svc->sched_data;
dest = ip_vs_dh_get(svc->af, tbl, &iph.daddr);
if (!dest
|| !(dest->flags & IP_VS_DEST_F_AVAILABLE)
|| atomic_read(&dest->weight) <= 0
|| is_overloaded(dest)) {
return NULL;
}
IP_VS_DBG_BUF(6, "DH: destination IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, &iph.daddr),
IP_VS_DBG_ADDR(svc->af, &dest->addr),
ntohs(dest->port));
return dest;
}
/*
* IPVS DH Scheduler structure
*/
static struct ip_vs_scheduler ip_vs_dh_scheduler =
{
.name = "dh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
.n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
.init_service = ip_vs_dh_init_svc,
.done_service = ip_vs_dh_done_svc,
.update_service = ip_vs_dh_update_svc,
.schedule = ip_vs_dh_schedule,
};
static int __init ip_vs_dh_init(void)
{
return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
}
static void __exit ip_vs_dh_cleanup(void)
{
unregister_ip_vs_scheduler(&ip_vs_dh_scheduler);
}
module_init(ip_vs_dh_init);
module_exit(ip_vs_dh_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
DooMLoRD/android_kernel_sony_apq8064 | net/rds/iw.c | 5317 | 8988 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "rds.h"
#include "iw.h"
unsigned int fastreg_pool_size = RDS_FASTREG_POOL_SIZE;
unsigned int fastreg_message_size = RDS_FASTREG_SIZE + 1; /* +1 allows for unaligned MRs */
module_param(fastreg_pool_size, int, 0444);
MODULE_PARM_DESC(fastreg_pool_size, " Max number of fastreg MRs per device");
module_param(fastreg_message_size, int, 0444);
MODULE_PARM_DESC(fastreg_message_size, " Max size of a RDMA transfer (fastreg MRs)");
struct list_head rds_iw_devices;
/* NOTE: if also grabbing iwdev lock, grab this first */
DEFINE_SPINLOCK(iw_nodev_conns_lock);
LIST_HEAD(iw_nodev_conns);
static void rds_iw_add_one(struct ib_device *device)
{
struct rds_iw_device *rds_iwdev;
struct ib_device_attr *dev_attr;
/* Only handle iwarp devices */
if (device->node_type != RDMA_NODE_RNIC)
return;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
if (!dev_attr)
return;
if (ib_query_device(device, dev_attr)) {
rdsdebug("Query device failed for %s\n", device->name);
goto free_attr;
}
rds_iwdev = kmalloc(sizeof *rds_iwdev, GFP_KERNEL);
if (!rds_iwdev)
goto free_attr;
spin_lock_init(&rds_iwdev->spinlock);
rds_iwdev->dma_local_lkey = !!(dev_attr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY);
rds_iwdev->max_wrs = dev_attr->max_qp_wr;
rds_iwdev->max_sge = min(dev_attr->max_sge, RDS_IW_MAX_SGE);
rds_iwdev->dev = device;
rds_iwdev->pd = ib_alloc_pd(device);
if (IS_ERR(rds_iwdev->pd))
goto free_dev;
if (!rds_iwdev->dma_local_lkey) {
rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd,
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(rds_iwdev->mr))
goto err_pd;
} else
rds_iwdev->mr = NULL;
rds_iwdev->mr_pool = rds_iw_create_mr_pool(rds_iwdev);
if (IS_ERR(rds_iwdev->mr_pool)) {
rds_iwdev->mr_pool = NULL;
goto err_mr;
}
INIT_LIST_HEAD(&rds_iwdev->cm_id_list);
INIT_LIST_HEAD(&rds_iwdev->conn_list);
list_add_tail(&rds_iwdev->list, &rds_iw_devices);
ib_set_client_data(device, &rds_iw_client, rds_iwdev);
goto free_attr;
err_mr:
if (rds_iwdev->mr)
ib_dereg_mr(rds_iwdev->mr);
err_pd:
ib_dealloc_pd(rds_iwdev->pd);
free_dev:
kfree(rds_iwdev);
free_attr:
kfree(dev_attr);
}
static void rds_iw_remove_one(struct ib_device *device)
{
struct rds_iw_device *rds_iwdev;
struct rds_iw_cm_id *i_cm_id, *next;
rds_iwdev = ib_get_client_data(device, &rds_iw_client);
if (!rds_iwdev)
return;
spin_lock_irq(&rds_iwdev->spinlock);
list_for_each_entry_safe(i_cm_id, next, &rds_iwdev->cm_id_list, list) {
list_del(&i_cm_id->list);
kfree(i_cm_id);
}
spin_unlock_irq(&rds_iwdev->spinlock);
rds_iw_destroy_conns(rds_iwdev);
if (rds_iwdev->mr_pool)
rds_iw_destroy_mr_pool(rds_iwdev->mr_pool);
if (rds_iwdev->mr)
ib_dereg_mr(rds_iwdev->mr);
while (ib_dealloc_pd(rds_iwdev->pd)) {
rdsdebug("Failed to dealloc pd %p\n", rds_iwdev->pd);
msleep(1);
}
list_del(&rds_iwdev->list);
kfree(rds_iwdev);
}
struct ib_client rds_iw_client = {
.name = "rds_iw",
.add = rds_iw_add_one,
.remove = rds_iw_remove_one
};
static int rds_iw_conn_info_visitor(struct rds_connection *conn,
void *buffer)
{
struct rds_info_rdma_connection *iinfo = buffer;
struct rds_iw_connection *ic;
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_iw_transport)
return 0;
iinfo->src_addr = conn->c_laddr;
iinfo->dst_addr = conn->c_faddr;
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_iw_device *rds_iwdev;
struct rdma_dev_addr *dev_addr;
ic = conn->c_transport_data;
dev_addr = &ic->i_cm_id->route.addr.dev_addr;
rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
iinfo->max_send_wr = ic->i_send_ring.w_nr;
iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
iinfo->max_send_sge = rds_iwdev->max_sge;
rds_iw_get_mr_info(rds_iwdev, iinfo);
}
return 1;
}
static void rds_iw_ic_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
rds_for_each_conn_info(sock, len, iter, lens,
rds_iw_conn_info_visitor,
sizeof(struct rds_info_rdma_connection));
}
/*
* Early RDS/IB was built to only bind to an address if there is an IPoIB
* device with that address set.
*
* If it were me, I'd advocate for something more flexible. Sending and
* receiving should be device-agnostic. Transports would try and maintain
* connections between peers who have messages queued. Userspace would be
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
static int rds_iw_laddr_check(__be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
struct sockaddr_in sin;
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = addr;
/* rdma_bind_addr will only succeed for IB & iWARP devices */
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support IB devices unless we
check node_type. */
if (ret || cm_id->device->node_type != RDMA_NODE_RNIC)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
&addr, ret,
cm_id->device ? cm_id->device->node_type : -1);
rdma_destroy_id(cm_id);
return ret;
}
void rds_iw_exit(void)
{
rds_info_deregister_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
rds_iw_destroy_nodev_conns();
ib_unregister_client(&rds_iw_client);
rds_iw_sysctl_exit();
rds_iw_recv_exit();
rds_trans_unregister(&rds_iw_transport);
}
struct rds_transport rds_iw_transport = {
.laddr_check = rds_iw_laddr_check,
.xmit_complete = rds_iw_xmit_complete,
.xmit = rds_iw_xmit,
.xmit_rdma = rds_iw_xmit_rdma,
.recv = rds_iw_recv,
.conn_alloc = rds_iw_conn_alloc,
.conn_free = rds_iw_conn_free,
.conn_connect = rds_iw_conn_connect,
.conn_shutdown = rds_iw_conn_shutdown,
.inc_copy_to_user = rds_iw_inc_copy_to_user,
.inc_free = rds_iw_inc_free,
.cm_initiate_connect = rds_iw_cm_initiate_connect,
.cm_handle_connect = rds_iw_cm_handle_connect,
.cm_connect_complete = rds_iw_cm_connect_complete,
.stats_info_copy = rds_iw_stats_info_copy,
.exit = rds_iw_exit,
.get_mr = rds_iw_get_mr,
.sync_mr = rds_iw_sync_mr,
.free_mr = rds_iw_free_mr,
.flush_mrs = rds_iw_flush_mrs,
.t_owner = THIS_MODULE,
.t_name = "iwarp",
.t_type = RDS_TRANS_IWARP,
.t_prefer_loopback = 1,
};
int rds_iw_init(void)
{
int ret;
INIT_LIST_HEAD(&rds_iw_devices);
ret = ib_register_client(&rds_iw_client);
if (ret)
goto out;
ret = rds_iw_sysctl_init();
if (ret)
goto out_ibreg;
ret = rds_iw_recv_init();
if (ret)
goto out_sysctl;
ret = rds_trans_register(&rds_iw_transport);
if (ret)
goto out_recv;
rds_info_register_func(RDS_INFO_IWARP_CONNECTIONS, rds_iw_ic_info);
goto out;
out_recv:
rds_iw_recv_exit();
out_sysctl:
rds_iw_sysctl_exit();
out_ibreg:
ib_unregister_client(&rds_iw_client);
out:
return ret;
}
MODULE_LICENSE("GPL");
| gpl-2.0 |
zparallax/amplitude_aosp_12_1 | arch/frv/kernel/pm-mb93093.c | 13765 | 1429 | /*
* FR-V MB93093 Power Management Routines
*
* Copyright (c) 2004 Red Hat, Inc.
*
* Written by: msalter@redhat.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*
*/
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/mb86943a.h>
#include "local.h"
static unsigned long imask;
/*
* Setup interrupt masks, etc to enable wakeup by power switch
*/
static void mb93093_power_switch_setup(void)
{
/* mask all but FPGA interrupt sources. */
imask = *(volatile unsigned long *)0xfeff9820;
*(volatile unsigned long *)0xfeff9820 = ~(1 << (IRQ_XIRQ2_LEVEL + 16)) & 0xfffe0000;
}
/*
* Cleanup interrupt masks, etc after wakeup by power switch
*/
static void mb93093_power_switch_cleanup(void)
{
*(volatile unsigned long *)0xfeff9820 = imask;
}
/*
* Return non-zero if wakeup irq was caused by power switch
*/
static int mb93093_power_switch_check(void)
{
return 1;
}
/*
* Initialize power interface
*/
static int __init mb93093_pm_init(void)
{
__power_switch_wake_setup = mb93093_power_switch_setup;
__power_switch_wake_check = mb93093_power_switch_check;
__power_switch_wake_cleanup = mb93093_power_switch_cleanup;
return 0;
}
__initcall(mb93093_pm_init);
| gpl-2.0 |
ArtVandelae/steamos_kernel | arch/frv/kernel/pm-mb93093.c | 13765 | 1429 | /*
* FR-V MB93093 Power Management Routines
*
* Copyright (c) 2004 Red Hat, Inc.
*
* Written by: msalter@redhat.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License.
*
*/
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/mb86943a.h>
#include "local.h"
static unsigned long imask;
/*
* Setup interrupt masks, etc to enable wakeup by power switch
*/
static void mb93093_power_switch_setup(void)
{
/* mask all but FPGA interrupt sources. */
imask = *(volatile unsigned long *)0xfeff9820;
*(volatile unsigned long *)0xfeff9820 = ~(1 << (IRQ_XIRQ2_LEVEL + 16)) & 0xfffe0000;
}
/*
* Cleanup interrupt masks, etc after wakeup by power switch
*/
static void mb93093_power_switch_cleanup(void)
{
*(volatile unsigned long *)0xfeff9820 = imask;
}
/*
* Return non-zero if wakeup irq was caused by power switch
*/
static int mb93093_power_switch_check(void)
{
return 1;
}
/*
* Initialize power interface
*/
static int __init mb93093_pm_init(void)
{
__power_switch_wake_setup = mb93093_power_switch_setup;
__power_switch_wake_check = mb93093_power_switch_check;
__power_switch_wake_cleanup = mb93093_power_switch_cleanup;
return 0;
}
__initcall(mb93093_pm_init);
| gpl-2.0 |
jameskdev/android_kernel_sky_ef30s | net/ipv4/xfrm4_mode_transport.c | 14277 | 2135 | /*
* xfrm4_mode_transport.c - Transport mode encapsulation for IPv4.
*
* Copyright (c) 2004-2006 Herbert Xu <herbert@gondor.apana.org.au>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/stringify.h>
#include <net/dst.h>
#include <net/ip.h>
#include <net/xfrm.h>
/* Add encapsulation header.
*
* The IP header will be moved forward to make space for the encapsulation
* header.
*/
static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct iphdr *iph = ip_hdr(skb);
int ihl = iph->ihl * 4;
skb_set_network_header(skb, -x->props.header_len);
skb->mac_header = skb->network_header +
offsetof(struct iphdr, protocol);
skb->transport_header = skb->network_header + ihl;
__skb_pull(skb, ihl);
memmove(skb_network_header(skb), iph, ihl);
return 0;
}
/* Remove encapsulation header.
*
* The IP header will be moved over the top of the encapsulation header.
*
* On entry, skb->h shall point to where the IP header should be and skb->nh
* shall be set to where the IP header currently is. skb->data shall point
* to the start of the payload.
*/
static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ihl = skb->data - skb_transport_header(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
skb_network_header(skb), ihl);
skb->network_header = skb->transport_header;
}
ip_hdr(skb)->tot_len = htons(skb->len + ihl);
skb_reset_transport_header(skb);
return 0;
}
static struct xfrm_mode xfrm4_transport_mode = {
.input = xfrm4_transport_input,
.output = xfrm4_transport_output,
.owner = THIS_MODULE,
.encap = XFRM_MODE_TRANSPORT,
};
static int __init xfrm4_transport_init(void)
{
return xfrm_register_mode(&xfrm4_transport_mode, AF_INET);
}
static void __exit xfrm4_transport_exit(void)
{
int err;
err = xfrm_unregister_mode(&xfrm4_transport_mode, AF_INET);
BUG_ON(err);
}
module_init(xfrm4_transport_init);
module_exit(xfrm4_transport_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_MODE(AF_INET, XFRM_MODE_TRANSPORT);
| gpl-2.0 |
dsb9938/GT-N8013-JB-Kernel | net/llc/llc_pdu.c | 15045 | 10888 | /*
* llc_pdu.c - access to PDU internals
*
* Copyright (c) 1997 by Procom Technology, Inc.
* 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program can be redistributed or modified under the terms of the
* GNU General Public License as published by the Free Software Foundation.
* This program is distributed without any warranty or implied warranty
* of merchantability or fitness for a particular purpose.
*
* See the GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include <net/llc_pdu.h>
static void llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type);
static u8 llc_pdu_get_pf_bit(struct llc_pdu_sn *pdu);
void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 pdu_type)
{
llc_pdu_un_hdr(skb)->ssap |= pdu_type;
}
/**
* pdu_set_pf_bit - sets poll/final bit in LLC header
* @pdu_frame: input frame that p/f bit must be set into it.
* @bit_value: poll/final bit (0 or 1).
*
* This function sets poll/final bit in LLC header (based on type of PDU).
* in I or S pdus, p/f bit is right bit of fourth byte in header. in U
* pdus p/f bit is fifth bit of third byte.
*/
void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value)
{
u8 pdu_type;
struct llc_pdu_sn *pdu;
llc_pdu_decode_pdu_type(skb, &pdu_type);
pdu = llc_pdu_sn_hdr(skb);
switch (pdu_type) {
case LLC_PDU_TYPE_I:
case LLC_PDU_TYPE_S:
pdu->ctrl_2 = (pdu->ctrl_2 & 0xFE) | bit_value;
break;
case LLC_PDU_TYPE_U:
pdu->ctrl_1 |= (pdu->ctrl_1 & 0xEF) | (bit_value << 4);
break;
}
}
/**
* llc_pdu_decode_pf_bit - extracs poll/final bit from LLC header
* @skb: input skb that p/f bit must be extracted from it
* @pf_bit: poll/final bit (0 or 1)
*
* This function extracts poll/final bit from LLC header (based on type of
* PDU). In I or S pdus, p/f bit is right bit of fourth byte in header. In
* U pdus p/f bit is fifth bit of third byte.
*/
void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit)
{
u8 pdu_type;
struct llc_pdu_sn *pdu;
llc_pdu_decode_pdu_type(skb, &pdu_type);
pdu = llc_pdu_sn_hdr(skb);
switch (pdu_type) {
case LLC_PDU_TYPE_I:
case LLC_PDU_TYPE_S:
*pf_bit = pdu->ctrl_2 & LLC_S_PF_BIT_MASK;
break;
case LLC_PDU_TYPE_U:
*pf_bit = (pdu->ctrl_1 & LLC_U_PF_BIT_MASK) >> 4;
break;
}
}
/**
* llc_pdu_init_as_disc_cmd - Builds DISC PDU
* @skb: Address of the skb to build
* @p_bit: The P bit to set in the PDU
*
* Builds a pdu frame as a DISC command.
*/
void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit)
{
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_U;
pdu->ctrl_1 |= LLC_2_PDU_CMD_DISC;
pdu->ctrl_1 |= ((p_bit & 1) << 4) & LLC_U_PF_BIT_MASK;
}
/**
* llc_pdu_init_as_i_cmd - builds I pdu
* @skb: Address of the skb to build
* @p_bit: The P bit to set in the PDU
* @ns: The sequence number of the data PDU
* @nr: The seq. number of the expected I PDU from the remote
*
* Builds a pdu frame as an I command.
*/
void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr)
{
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_I;
pdu->ctrl_2 = 0;
pdu->ctrl_2 |= (p_bit & LLC_I_PF_BIT_MASK); /* p/f bit */
pdu->ctrl_1 |= (ns << 1) & 0xFE; /* set N(S) in bits 2..8 */
pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */
}
/**
* llc_pdu_init_as_rej_cmd - builds REJ PDU
* @skb: Address of the skb to build
* @p_bit: The P bit to set in the PDU
* @nr: The seq. number of the expected I PDU from the remote
*
* Builds a pdu frame as a REJ command.
*/
void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr)
{
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_S;
pdu->ctrl_1 |= LLC_2_PDU_CMD_REJ;
pdu->ctrl_2 = 0;
pdu->ctrl_2 |= p_bit & LLC_S_PF_BIT_MASK;
pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */
pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */
}
/**
* llc_pdu_init_as_rnr_cmd - builds RNR pdu
* @skb: Address of the skb to build
* @p_bit: The P bit to set in the PDU
* @nr: The seq. number of the expected I PDU from the remote
*
* Builds a pdu frame as an RNR command.
*/
void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr)
{
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_S;
pdu->ctrl_1 |= LLC_2_PDU_CMD_RNR;
pdu->ctrl_2 = 0;
pdu->ctrl_2 |= p_bit & LLC_S_PF_BIT_MASK;
pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */
pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */
}
/**
* llc_pdu_init_as_rr_cmd - Builds RR pdu
* @skb: Address of the skb to build
* @p_bit: The P bit to set in the PDU
* @nr: The seq. number of the expected I PDU from the remote
*
* Builds a pdu frame as an RR command.
*/
void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr)
{
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_S;
pdu->ctrl_1 |= LLC_2_PDU_CMD_RR;
pdu->ctrl_2 = p_bit & LLC_S_PF_BIT_MASK;
pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */
pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */
}
/**
* llc_pdu_init_as_sabme_cmd - builds SABME pdu
* @skb: Address of the skb to build
* @p_bit: The P bit to set in the PDU
*
* Builds a pdu frame as an SABME command.
*/
void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit)
{
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_U;
pdu->ctrl_1 |= LLC_2_PDU_CMD_SABME;
pdu->ctrl_1 |= ((p_bit & 1) << 4) & LLC_U_PF_BIT_MASK;
}
/**
* llc_pdu_init_as_dm_rsp - builds DM response pdu
* @skb: Address of the skb to build
* @f_bit: The F bit to set in the PDU
*
* Builds a pdu frame as a DM response.
*/
void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit)
{
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_U;
pdu->ctrl_1 |= LLC_2_PDU_RSP_DM;
pdu->ctrl_1 |= ((f_bit & 1) << 4) & LLC_U_PF_BIT_MASK;
}
/**
* llc_pdu_init_as_frmr_rsp - builds FRMR response PDU
* @skb: Address of the frame to build
* @prev_pdu: The rejected PDU frame
* @f_bit: The F bit to set in the PDU
* @vs: tx state vari value for the data link conn at the rejecting LLC
* @vr: rx state var value for the data link conn at the rejecting LLC
* @vzyxw: completely described in the IEEE Std 802.2 document (Pg 55)
*
* Builds a pdu frame as a FRMR response.
*/
void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
u8 f_bit, u8 vs, u8 vr, u8 vzyxw)
{
struct llc_frmr_info *frmr_info;
u8 prev_pf = 0;
u8 *ctrl;
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_U;
pdu->ctrl_1 |= LLC_2_PDU_RSP_FRMR;
pdu->ctrl_1 |= ((f_bit & 1) << 4) & LLC_U_PF_BIT_MASK;
frmr_info = (struct llc_frmr_info *)&pdu->ctrl_2;
ctrl = (u8 *)&prev_pdu->ctrl_1;
FRMR_INFO_SET_REJ_CNTRL(frmr_info,ctrl);
FRMR_INFO_SET_Vs(frmr_info, vs);
FRMR_INFO_SET_Vr(frmr_info, vr);
prev_pf = llc_pdu_get_pf_bit(prev_pdu);
FRMR_INFO_SET_C_R_BIT(frmr_info, prev_pf);
FRMR_INFO_SET_INVALID_PDU_CTRL_IND(frmr_info, vzyxw);
FRMR_INFO_SET_INVALID_PDU_INFO_IND(frmr_info, vzyxw);
FRMR_INFO_SET_PDU_INFO_2LONG_IND(frmr_info, vzyxw);
FRMR_INFO_SET_PDU_INVALID_Nr_IND(frmr_info, vzyxw);
FRMR_INFO_SET_PDU_INVALID_Ns_IND(frmr_info, vzyxw);
skb_put(skb, sizeof(struct llc_frmr_info));
}
/**
* llc_pdu_init_as_rr_rsp - builds RR response pdu
* @skb: Address of the skb to build
* @f_bit: The F bit to set in the PDU
* @nr: The seq. number of the expected data PDU from the remote
*
* Builds a pdu frame as an RR response.
*/
void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr)
{
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_S;
pdu->ctrl_1 |= LLC_2_PDU_RSP_RR;
pdu->ctrl_2 = 0;
pdu->ctrl_2 |= f_bit & LLC_S_PF_BIT_MASK;
pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */
pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */
}
/**
* llc_pdu_init_as_rej_rsp - builds REJ response pdu
* @skb: Address of the skb to build
* @f_bit: The F bit to set in the PDU
* @nr: The seq. number of the expected data PDU from the remote
*
* Builds a pdu frame as a REJ response.
*/
void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr)
{
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_S;
pdu->ctrl_1 |= LLC_2_PDU_RSP_REJ;
pdu->ctrl_2 = 0;
pdu->ctrl_2 |= f_bit & LLC_S_PF_BIT_MASK;
pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */
pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */
}
/**
* llc_pdu_init_as_rnr_rsp - builds RNR response pdu
* @skb: Address of the frame to build
* @f_bit: The F bit to set in the PDU
* @nr: The seq. number of the expected data PDU from the remote
*
* Builds a pdu frame as an RNR response.
*/
void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr)
{
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_S;
pdu->ctrl_1 |= LLC_2_PDU_RSP_RNR;
pdu->ctrl_2 = 0;
pdu->ctrl_2 |= f_bit & LLC_S_PF_BIT_MASK;
pdu->ctrl_1 &= 0x0F; /* setting bits 5..8 to zero(reserved) */
pdu->ctrl_2 |= (nr << 1) & 0xFE; /* set N(R) in bits 10..16 */
}
/**
* llc_pdu_init_as_ua_rsp - builds UA response pdu
* @skb: Address of the frame to build
* @f_bit: The F bit to set in the PDU
*
* Builds a pdu frame as a UA response.
*/
void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit)
{
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
pdu->ctrl_1 = LLC_PDU_TYPE_U;
pdu->ctrl_1 |= LLC_2_PDU_RSP_UA;
pdu->ctrl_1 |= ((f_bit & 1) << 4) & LLC_U_PF_BIT_MASK;
}
/**
* llc_pdu_decode_pdu_type - designates PDU type
* @skb: input skb that type of it must be designated.
* @type: type of PDU (output argument).
*
* This function designates type of PDU (I, S or U).
*/
static void llc_pdu_decode_pdu_type(struct sk_buff *skb, u8 *type)
{
struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
if (pdu->ctrl_1 & 1) {
if ((pdu->ctrl_1 & LLC_PDU_TYPE_U) == LLC_PDU_TYPE_U)
*type = LLC_PDU_TYPE_U;
else
*type = LLC_PDU_TYPE_S;
} else
*type = LLC_PDU_TYPE_I;
}
/**
* llc_pdu_get_pf_bit - extracts p/f bit of input PDU
* @pdu: pointer to LLC header.
*
* This function extracts p/f bit of input PDU. at first examines type of
* PDU and then extracts p/f bit. Returns the p/f bit.
*/
static u8 llc_pdu_get_pf_bit(struct llc_pdu_sn *pdu)
{
u8 pdu_type;
u8 pf_bit = 0;
if (pdu->ctrl_1 & 1) {
if ((pdu->ctrl_1 & LLC_PDU_TYPE_U) == LLC_PDU_TYPE_U)
pdu_type = LLC_PDU_TYPE_U;
else
pdu_type = LLC_PDU_TYPE_S;
} else
pdu_type = LLC_PDU_TYPE_I;
switch (pdu_type) {
case LLC_PDU_TYPE_I:
case LLC_PDU_TYPE_S:
pf_bit = pdu->ctrl_2 & LLC_S_PF_BIT_MASK;
break;
case LLC_PDU_TYPE_U:
pf_bit = (pdu->ctrl_1 & LLC_U_PF_BIT_MASK) >> 4;
break;
}
return pf_bit;
}
| gpl-2.0 |
poondog/venom-vivo | drivers/gpu/drm/i915/intel_display.c | 198 | 226144 | /*
* Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include "drmP.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
typedef struct {
/* given values */
int n;
int m1, m2;
int p1, p2;
/* derived values */
int dot;
int vco;
int m;
int p;
} intel_clock_t;
typedef struct {
int min, max;
} intel_range_t;
typedef struct {
int dot_limit;
int p2_slow, p2_fast;
} intel_p2_t;
#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
int, int, intel_clock_t *);
};
/* FDI */
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
if (IS_GEN5(dev)) {
struct drm_i915_private *dev_priv = dev->dev_private;
return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
} else
return 27;
}
static const intel_limit_t intel_limits_i8xx_dvo = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
.p = { .min = 4, .max = 128 },
.p1 = { .min = 2, .max = 33 },
.p2 = { .dot_limit = 165000,
.p2_slow = 4, .p2_fast = 2 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
.m = { .min = 96, .max = 140 },
.m1 = { .min = 18, .max = 26 },
.m2 = { .min = 6, .max = 16 },
.p = { .min = 4, .max = 128 },
.p1 = { .min = 1, .max = 6 },
.p2 = { .dot_limit = 165000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i9xx_sdvo = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
.m1 = { .min = 10, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_i9xx_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
.m1 = { .min = 10, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 7, .max = 98 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 7 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_sdvo = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
.m = { .min = 104, .max = 138 },
.m1 = { .min = 17, .max = 23 },
.m2 = { .min = 5, .max = 11 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 1, .max = 3},
.p2 = { .dot_limit = 270000,
.p2_slow = 10,
.p2_fast = 10
},
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_hdmi = {
.dot = { .min = 22000, .max = 400000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 4 },
.m = { .min = 104, .max = 138 },
.m1 = { .min = 16, .max = 23 },
.m2 = { .min = 5, .max = 11 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8},
.p2 = { .dot_limit = 165000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
.dot = { .min = 20000, .max = 115000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 104, .max = 138 },
.m1 = { .min = 17, .max = 23 },
.m2 = { .min = 5, .max = 11 },
.p = { .min = 28, .max = 112 },
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 0,
.p2_slow = 14, .p2_fast = 14
},
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
.dot = { .min = 80000, .max = 224000 },
.vco = { .min = 1750000, .max = 3500000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 104, .max = 138 },
.m1 = { .min = 17, .max = 23 },
.m2 = { .min = 5, .max = 11 },
.p = { .min = 14, .max = 42 },
.p1 = { .min = 2, .max = 6 },
.p2 = { .dot_limit = 0,
.p2_slow = 7, .p2_fast = 7
},
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_g4x_display_port = {
.dot = { .min = 161670, .max = 227000 },
.vco = { .min = 1750000, .max = 3500000},
.n = { .min = 1, .max = 2 },
.m = { .min = 97, .max = 108 },
.m1 = { .min = 0x10, .max = 0x12 },
.m2 = { .min = 0x05, .max = 0x06 },
.p = { .min = 10, .max = 20 },
.p1 = { .min = 1, .max = 2},
.p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
.find_pll = intel_find_pll_g4x_dp,
};
static const intel_limit_t intel_limits_pineview_sdvo = {
.dot = { .min = 20000, .max = 400000},
.vco = { .min = 1700000, .max = 3500000 },
/* Pineview's Ncounter is a ring counter */
.n = { .min = 3, .max = 6 },
.m = { .min = 2, .max = 256 },
/* Pineview only has one combined m divider, which we treat as m2. */
.m1 = { .min = 0, .max = 0 },
.m2 = { .min = 0, .max = 254 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_find_best_PLL,
};
static const intel_limit_t intel_limits_pineview_lvds = {
.dot = { .min = 20000, .max = 400000 },
.vco = { .min = 1700000, .max = 3500000 },
.n = { .min = 3, .max = 6 },
.m = { .min = 2, .max = 256 },
.m1 = { .min = 0, .max = 0 },
.m2 = { .min = 0, .max = 254 },
.p = { .min = 7, .max = 112 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_find_best_PLL,
};
/* Ironlake / Sandybridge
*
* We calculate clock using (register_value + 2) for N/M1/M2, so here
* the range value for them is (actual_value - 2).
*/
static const intel_limit_t intel_limits_ironlake_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 5 },
.m = { .min = 79, .max = 127 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 10, .p2_fast = 5 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_single_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 79, .max = 118 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 28, .max = 112 },
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_dual_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 79, .max = 127 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 14, .max = 56 },
.p1 = { .min = 2, .max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
/* LVDS 100mhz refclk limits. */
static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 2 },
.m = { .min = 79, .max = 126 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 28, .max = 112 },
.p1 = { .min = 2,.max = 8 },
.p2 = { .dot_limit = 225000,
.p2_slow = 14, .p2_fast = 14 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000 },
.n = { .min = 1, .max = 3 },
.m = { .min = 79, .max = 126 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 14, .max = 42 },
.p1 = { .min = 2,.max = 6 },
.p2 = { .dot_limit = 225000,
.p2_slow = 7, .p2_fast = 7 },
.find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_display_port = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 1760000, .max = 3510000},
.n = { .min = 1, .max = 2 },
.m = { .min = 81, .max = 90 },
.m1 = { .min = 12, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.p = { .min = 10, .max = 20 },
.p1 = { .min = 1, .max = 2},
.p2 = { .dot_limit = 0,
.p2_slow = 10, .p2_fast = 10 },
.find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
limit = &intel_limits_ironlake_dual_lvds;
} else {
if (refclk == 100000)
limit = &intel_limits_ironlake_single_lvds_100m;
else
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
HAS_eDP)
limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_dac;
return limit;
}
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
/* LVDS with dual channel */
limit = &intel_limits_g4x_single_channel_lvds;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits_g4x_hdmi;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits_g4x_sdvo;
} else if (intel_pipe_has_type (crtc, INTEL_OUTPUT_DISPLAYPORT)) {
limit = &intel_limits_g4x_display_port;
} else /* The option is for other outputs */
limit = &intel_limits_i9xx_sdvo;
return limit;
}
static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
limit = intel_ironlake_limit(crtc, refclk);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
else
limit = &intel_limits_i9xx_sdvo;
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
else
limit = &intel_limits_i8xx_dvo;
}
return limit;
}
/* m1 is reserved as 0 in Pineview, n is a ring counter */
static void pineview_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / clock->n;
clock->dot = clock->vco / clock->p;
}
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
{
if (IS_PINEVIEW(dev)) {
pineview_clock(refclk, clock);
return;
}
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
clock->dot = clock->vco / clock->p;
}
/**
* Returns whether any output on the specified pipe is of the specified type
*/
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->base.crtc == crtc && encoder->type == type)
return true;
return false;
}
#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
/**
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
*/
static bool intel_PLL_is_valid(struct drm_device *dev,
const intel_limit_t *limit,
const intel_clock_t *clock)
{
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid ("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid ("p out of range\n");
if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
INTELPllInvalid ("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid ("m1 out of range\n");
if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
INTELPllInvalid ("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
INTELPllInvalid ("m out of range\n");
if (clock->n < limit->n.min || limit->n.max < clock->n)
INTELPllInvalid ("n out of range\n");
if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
INTELPllInvalid ("vco out of range\n");
/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
* connector, etc., rather than just a single range.
*/
if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
INTELPllInvalid ("dot out of range\n");
return true;
}
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int err = target;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
(I915_READ(LVDS)) != 0) {
/*
* For LVDS, if the panel is on, just rely on its current
* settings for dual-channel. We haven't figured out how to
* reliably set up different single/dual channel state, if we
* even can.
*/
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
memset (best_clock, 0, sizeof (*best_clock));
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
clock.m1++) {
for (clock.m2 = limit->m2.min;
clock.m2 <= limit->m2.max; clock.m2++) {
/* m1 is always 0 in Pineview */
if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
break;
for (clock.n = limit->n.min;
clock.n <= limit->n.max; clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
intel_clock(dev, refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
this_err = abs(clock.dot - target);
if (this_err < err) {
*best_clock = clock;
err = this_err;
}
}
}
}
}
return (err != target);
}
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int max_n;
bool found;
/* approximately equals target * 0.00585 */
int err_most = (target >> 8) + (target >> 9);
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
int lvds_reg;
if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
/* based on hardware requirement, prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
/* based on hardware requirement, prefere larger m1,m2 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
clock.m2 >= limit->m2.min; clock.m2--) {
for (clock.p1 = limit->p1.max;
clock.p1 >= limit->p1.min; clock.p1--) {
int this_err;
intel_clock(dev, refclk, &clock);
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
this_err = abs(clock.dot - target);
if (this_err < err_most) {
*best_clock = clock;
err_most = this_err;
max_n = clock.n;
found = true;
}
}
}
}
}
return found;
}
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
clock.p2 = 10;
clock.m1 = 12;
clock.m2 = 9;
} else {
clock.n = 2;
clock.p1 = 1;
clock.p2 = 10;
clock.m1 = 14;
clock.m2 = 8;
}
intel_clock(dev, refclk, &clock);
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
intel_clock_t clock;
if (target < 200000) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 2;
clock.m1 = 23;
clock.m2 = 8;
} else {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 1;
clock.m1 = 14;
clock.m2 = 2;
}
clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
clock.p = (clock.p1 * clock.p2);
clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
clock.vco = 0;
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
/**
* intel_wait_for_vblank - wait for vblank on a given pipe
* @dev: drm device
* @pipe: pipe to wait for
*
* Wait for vblank to occur on a given pipe. Needed for various bits of
* mode setting code.
*/
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
*
* This races with i915_driver_irq_handler() with the result
* that either function could miss a vblank event. Here it is not
* fatal, as we will either wait upon the next vblank interrupt or
* timeout. Generally speaking intel_wait_for_vblank() is only
* called during modeset at which time the GPU should be idle and
* should *not* be performing page flips and thus not waiting on
* vblanks...
* Currently, the result of us stealing a vblank from the irq
* handler is that a single frame will be skipped during swapbuffers.
*/
I915_WRITE(pipestat_reg,
I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
/* Wait for vblank interrupt bit to set */
if (wait_for(I915_READ(pipestat_reg) &
PIPE_VBLANK_INTERRUPT_STATUS,
50))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
/*
* intel_wait_for_pipe_off - wait for pipe to turn off
* @dev: drm device
* @pipe: pipe to wait for
*
* After disabling a pipe, we can't wait for vblank in the usual way,
* spinning on the vblank interrupt status bit, since we won't actually
* see an interrupt when the pipe is disabled.
*
* On Gen4 and above:
* wait for the pipe register state bit to turn off
*
* Otherwise:
* wait for the display line value to settle (it usually
* ends up stopping at the start of the next frame).
*
*/
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen >= 4) {
int reg = PIPECONF(pipe);
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
100))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} else {
u32 last_line;
int reg = PIPEDSL(pipe);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
/* Wait for the display line to settle */
do {
last_line = I915_READ(reg) & DSL_LINEMASK;
mdelay(5);
} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
}
}
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
}
/* Only for pre-ILK configs */
static void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
"PCH PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_TX_ENABLE);
WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
static void assert_fdi_rx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_ENABLE);
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* ILK FDI PLL is always enabled */
if (dev_priv->info->gen == 5)
return;
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
}
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int pp_reg, lvds_reg;
u32 val;
enum pipe panel_pipe = PIPE_A;
bool locked = locked;
if (HAS_PCH_SPLIT(dev_priv->dev)) {
pp_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
} else {
pp_reg = PP_CONTROL;
lvds_reg = LVDS;
}
val = I915_READ(pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
locked = false;
if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
panel_pipe = PIPE_B;
WARN(panel_pipe == pipe && locked,
"panel assertion failure, pipe %c regs locked\n",
pipe_name(pipe));
}
static void assert_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
{
int reg;
u32 val;
bool cur_state;
reg = PIPECONF(pipe);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
enum plane plane)
{
int reg;
u32 val;
reg = DSPCNTR(plane);
val = I915_READ(reg);
WARN(!(val & DISPLAY_PLANE_ENABLE),
"plane %c assertion failure, should be active but is disabled\n",
plane_name(plane));
}
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg, i;
u32 val;
int cur_pipe;
/* Planes are fixed to pipes on ILK+ */
if (HAS_PCH_SPLIT(dev_priv->dev))
return;
/* Need to check both planes against the pipe */
for (i = 0; i < 2; i++) {
reg = DSPCNTR(i);
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
"plane %c assertion failure, should be off on pipe %c but is still active\n",
plane_name(i), pipe_name(pipe));
}
}
static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
bool enabled;
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
}
static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
bool enabled;
reg = TRANSCONF(pipe);
val = I915_READ(reg);
enabled = !!(val & TRANS_ENABLE);
WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
pipe_name(pipe));
}
static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
WARN(DP_PIPE_ENABLED(val, pipe),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
WARN(HDMI_PIPE_ENABLED(val, pipe),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
}
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
reg = PCH_ADPA;
val = I915_READ(reg);
WARN(ADPA_PIPE_ENABLED(val, pipe),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
reg = PCH_LVDS;
val = I915_READ(reg);
WARN(LVDS_PIPE_ENABLED(val, pipe),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
}
/**
* intel_enable_pll - enable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
* make sure the PLL reg is writable first though, since the panel write
* protect mechanism may be enabled.
*
* Note! This is for pre-ILK only.
*/
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
int reg;
u32 val;
/* No really, not for ILK+ */
BUG_ON(dev_priv->info->gen >= 5);
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
assert_panel_unlocked(dev_priv, pipe);
reg = DPLL(pipe);
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
/* We do this three times for luck */
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
/**
* intel_disable_pll - disable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to disable
*
* Disable the PLL for @pipe, making sure the pipe is off first.
*
* Note! This is for pre-ILK only.
*/
static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
int reg;
u32 val;
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
reg = DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
/**
* intel_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
}
static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, pipe);
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
}
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
assert_pch_pll_enabled(dev_priv, pipe);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
reg = TRANSCONF(pipe);
val = I915_READ(reg);
/*
* make the BPC in transcoder be consistent with
* that in pipeconf reg.
*/
val &= ~PIPE_BPC_MASK;
val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
}
static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
/* Ports must be off as well */
assert_pch_ports_disabled(dev_priv, pipe);
reg = TRANSCONF(pipe);
val = I915_READ(reg);
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder\n");
}
/**
* intel_enable_pipe - enable a pipe, asserting requirements
* @dev_priv: i915 private structure
* @pipe: pipe to enable
* @pch_port: on ILK+, is this pipe driving a PCH port or not
*
* Enable @pipe, making sure that various hardware specific requirements
* are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
*
* @pipe should be %PIPE_A or %PIPE_B.
*
* Will wait until the pipe is actually running (i.e. first vblank) before
* returning.
*/
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port)
{
int reg;
u32 val;
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
* need the check.
*/
if (!HAS_PCH_SPLIT(dev_priv->dev))
assert_pll_enabled(dev_priv, pipe);
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pipe);
assert_fdi_tx_pll_enabled(dev_priv, pipe);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
reg = PIPECONF(pipe);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE)
return;
I915_WRITE(reg, val | PIPECONF_ENABLE);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
/**
* intel_disable_pipe - disable a pipe, asserting requirements
* @dev_priv: i915 private structure
* @pipe: pipe to disable
*
* Disable @pipe, making sure that various hardware specific requirements
* are met, if applicable, e.g. plane disabled, panel fitter off, etc.
*
* @pipe should be %PIPE_A or %PIPE_B.
*
* Will wait until the pipe has shut down before returning.
*/
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
/*
* Make sure planes won't keep trying to pump pixels to us,
* or we might hang the display.
*/
assert_planes_disabled(dev_priv, pipe);
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
reg = PIPECONF(pipe);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
I915_WRITE(reg, val & ~PIPECONF_ENABLE);
intel_wait_for_pipe_off(dev_priv->dev, pipe);
}
/**
* intel_enable_plane - enable a display plane on a given pipe
* @dev_priv: i915 private structure
* @plane: plane to enable
* @pipe: pipe being fed
*
* Enable @plane on @pipe, making sure that @pipe is running first.
*/
static void intel_enable_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
int reg;
u32 val;
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, pipe);
reg = DSPCNTR(plane);
val = I915_READ(reg);
if (val & DISPLAY_PLANE_ENABLE)
return;
I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
/*
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
u32 reg = DSPADDR(plane);
I915_WRITE(reg, I915_READ(reg));
}
/**
* intel_disable_plane - disable a display plane
* @dev_priv: i915 private structure
* @plane: plane to disable
* @pipe: pipe consuming the data
*
* Disable @plane; should be an independent operation.
*/
static void intel_disable_plane(struct drm_i915_private *dev_priv,
enum plane plane, enum pipe pipe)
{
int reg;
u32 val;
reg = DSPCNTR(plane);
val = I915_READ(reg);
if ((val & DISPLAY_PLANE_ENABLE) == 0)
return;
I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
intel_flush_display_plane(dev_priv, plane);
intel_wait_for_vblank(dev_priv->dev, pipe);
}
static void disable_pch_dp(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
if (DP_PIPE_ENABLED(val, pipe))
I915_WRITE(reg, val & ~DP_PORT_EN);
}
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
if (HDMI_PIPE_ENABLED(val, pipe))
I915_WRITE(reg, val & ~PORT_ENABLE);
}
/* Disable any ports connected to this transcoder */
static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
u32 reg, val;
val = I915_READ(PCH_PP_CONTROL);
I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
disable_pch_dp(dev_priv, pipe, PCH_DP_B);
disable_pch_dp(dev_priv, pipe, PCH_DP_C);
disable_pch_dp(dev_priv, pipe, PCH_DP_D);
reg = PCH_ADPA;
val = I915_READ(reg);
if (ADPA_PIPE_ENABLED(val, pipe))
I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
reg = PCH_LVDS;
val = I915_READ(reg);
if (LVDS_PIPE_ENABLED(val, pipe)) {
I915_WRITE(reg, val & ~LVDS_PORT_EN);
POSTING_READ(reg);
udelay(100);
}
disable_pch_hdmi(dev_priv, pipe, HDMIB);
disable_pch_hdmi(dev_priv, pipe, HDMIC);
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane, i;
u32 fbc_ctl, fbc_ctl2;
if (fb->pitch == dev_priv->cfb_pitch &&
obj->fence_reg == dev_priv->cfb_fence &&
intel_crtc->plane == dev_priv->cfb_plane &&
I915_READ(FBC_CONTROL) & FBC_CTL_EN)
return;
i8xx_disable_fbc(dev);
dev_priv->cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
if (fb->pitch < dev_priv->cfb_pitch)
dev_priv->cfb_pitch = fb->pitch;
/* FBC_CTL wants 64B units */
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl);
DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
}
void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
DRM_DEBUG_KMS("disabled FBC\n");
}
static bool i8xx_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_y == crtc->y)
return;
I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
dev_priv->cfb_y = crtc->y;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
I915_WRITE(DPFC_CHICKEN, ~DPFC_HT_MODIFY);
}
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
void g4x_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
DRM_DEBUG_KMS("disabled FBC\n");
}
}
static bool g4x_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
static void sandybridge_blit_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
gen6_gt_force_wake_get(dev_priv);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
gen6_gt_force_wake_put(dev_priv);
}
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_offset == obj->gtt_offset &&
dev_priv->cfb_y == crtc->y)
return;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
dev_priv->cfb_offset = obj->gtt_offset;
dev_priv->cfb_y = crtc->y;
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
}
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
}
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
void ironlake_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
DRM_DEBUG_KMS("disabled FBC\n");
}
}
static bool ironlake_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->display.fbc_enabled)
return false;
return dev_priv->display.fbc_enabled(dev);
}
void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
if (!dev_priv->display.enable_fbc)
return;
dev_priv->display.enable_fbc(crtc, interval);
}
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!dev_priv->display.disable_fbc)
return;
dev_priv->display.disable_fbc(dev);
}
/**
* intel_update_fbc - enable/disable FBC as needed
* @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
* - plane A only (on pre-965)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= 2048 in width, 1536 in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
* one. It also must reside (along with the line length buffer) in
* stolen memory.
*
* We need to enable/disable FBC on a global basis.
*/
static void intel_update_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
DRM_DEBUG_KMS("\n");
if (!i915_powersave)
return;
if (!I915_HAS_FBC(dev))
return;
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (tmp_crtc->enabled && tmp_crtc->fb) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
goto out_disable;
}
crtc = tmp_crtc;
}
}
if (!crtc || crtc->fb == NULL) {
DRM_DEBUG_KMS("no output, disabling\n");
dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
if (!i915_enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param (default off)\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
if ((crtc->mode.hdisplay > 2048) ||
(crtc->mode.vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
if (obj->tiling_mode != I915_TILING_X) {
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master())
goto out_disable;
intel_enable_fbc(crtc, 500);
return;
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_enabled(dev)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
}
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
int ret;
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
else if (INTEL_INFO(dev)->gen >= 4)
alignment = 4 * 1024;
else
alignment = 64 * 1024;
break;
case I915_TILING_X:
/* pin() will align the object as required by fence */
alignment = 0;
break;
case I915_TILING_Y:
/* FIXME: Is this true? */
DRM_ERROR("Y tiled not allowed for scan out buffers\n");
return -EINVAL;
default:
BUG();
}
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
goto err_interruptible;
ret = i915_gem_object_set_to_display_plane(obj, pipelined);
if (ret)
goto err_unpin;
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
if (obj->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
}
dev_priv->mm.interruptible = true;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret;
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
static int
intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
u32 dspcntr;
u32 reg;
switch (plane) {
case 0:
case 1:
break;
default:
DRM_ERROR("Can't update plane %d in SAREA\n", plane);
return -EINVAL;
}
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
case 8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
break;
case 24:
case 32:
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
DRM_ERROR("Unknown color depth\n");
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
}
if (HAS_PCH_SPLIT(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitch);
I915_WRITE(DSPSTRIDE(plane), fb->pitch);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
} else
I915_WRITE(DSPADDR(plane), Start + Offset);
POSTING_READ(reg);
intel_update_fbc(dev);
intel_increase_pllclock(crtc);
return 0;
}
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int ret;
/* no fb bound */
if (!crtc->fb) {
DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
switch (intel_crtc->plane) {
case 0:
case 1:
break;
default:
return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
to_intel_framebuffer(crtc->fb)->obj,
NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
if (old_fb) {
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj);
(void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET);
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
}
mutex_unlock(&dev->struct_mutex);
if (!dev->primary->master)
return 0;
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return 0;
if (intel_crtc->pipe) {
master_priv->sarea_priv->pipeB_x = x;
master_priv->sarea_priv->pipeB_y = y;
} else {
master_priv->sarea_priv->pipeA_x = x;
master_priv->sarea_priv->pipeA_y = y;
}
return 0;
}
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
if (clock < 200000) {
u32 temp;
dpa_ctl |= DP_PLL_FREQ_160MHZ;
/* workaround for 160Mhz:
1) program 0x4600c bits 15:0 = 0x8124
2) program 0x46010 bit 0 = 1
3) program 0x46034 bit 24 = 1
4) program 0x64000 bit 14 = 1
*/
temp = I915_READ(0x4600c);
temp &= 0xffff0000;
I915_WRITE(0x4600c, temp | 0x8124);
temp = I915_READ(0x46010);
I915_WRITE(0x46010, temp | 1);
temp = I915_READ(0x46034);
I915_WRITE(0x46034, temp | (1 << 24));
} else {
dpa_ctl |= DP_PLL_FREQ_270MHZ;
}
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
udelay(500);
}
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
/* enable normal train */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
if (IS_IVYBRIDGE(dev)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
}
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE;
}
I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
/* wait one idle pattern time */
POSTING_READ(reg);
udelay(1000);
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev))
I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp, tries;
/* FDI needs bits from pipe & plane first */
assert_pipe_enabled(dev_priv, pipe);
assert_plane_enabled(dev_priv, plane);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
I915_WRITE(reg, temp);
I915_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
FDI_RX_PHASE_SYNC_POINTER_EN);
}
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
DRM_DEBUG_KMS("FDI train 1 done.\n");
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (tries == 5)
DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done\n");
}
static const int snb_b_fdi_train_param [] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
FDI_LINK_TRAIN_800MV_0DB_SNB_B,
};
/* The FDI link training functions for SNB/Cougarpoint. */
static void gen6_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
if (IS_GEN6(dev)) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done.\n");
}
/* Manual link training for Ivy Bridge A0 parts */
static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
temp = I915_READ(reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(7 << 19);
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(150);
for (i = 0; i < 4; i++ ) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(500);
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done.\n");
}
static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
/* Write the TU size bits so error detection works */
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~((0x7 << 19) | (0x7 << 16));
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
POSTING_READ(reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
temp = I915_READ(reg);
I915_WRITE(reg, temp | FDI_PCDCLK);
POSTING_READ(reg);
udelay(200);
/* Enable CPU FDI TX PLL, always on for Ironlake */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
POSTING_READ(reg);
udelay(100);
}
}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
/* disable CPU FDI tx and PCH FDI rx */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
POSTING_READ(reg);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(0x7 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_EN));
}
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(100);
}
/*
* When we disable a pipe, we need to clear any pending scanline wait events
* to avoid hanging the ring, which we assume we are waiting on.
*/
static void intel_clear_scanline_wait(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
u32 tmp;
if (IS_GEN2(dev))
/* Can't break the hang on i8xx */
return;
ring = LP_RING(dev_priv);
tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT)
I915_WRITE_CTL(ring, tmp);
}
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv;
if (crtc->fb == NULL)
return;
obj = to_intel_framebuffer(crtc->fb)->obj;
dev_priv = crtc->dev->dev_private;
wait_event(dev_priv->pending_flip_queue,
atomic_read(&obj->pending_flip) == 0);
}
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
return false;
continue;
}
}
return true;
}
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
* - FDI training & RX/TX
* - update transcoder timings
* - DP transcoding bits
* - transcoder
*/
static void ironlake_pch_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
intel_enable_pch_pll(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0)
temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0)
temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
I915_WRITE(PCH_DPLL_SEL, temp);
}
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
temp |= TRANS_DP_8BPC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
switch (intel_trans_dp_port_sel(crtc)) {
case PCH_DP_B:
temp |= TRANS_DP_PORT_SEL_B;
break;
case PCH_DP_C:
temp |= TRANS_DP_PORT_SEL_C;
break;
case PCH_DP_D:
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
temp |= TRANS_DP_PORT_SEL_B;
break;
}
I915_WRITE(reg, temp);
}
intel_enable_transcoder(dev_priv, pipe);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
bool is_pch_port;
if (intel_crtc->active)
return;
intel_crtc->active = true;
intel_update_watermarks(dev);
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
temp = I915_READ(PCH_LVDS);
if ((temp & LVDS_PORT_EN) == 0)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
is_pch_port = intel_crtc_driving_pch(crtc);
if (is_pch_port)
ironlake_fdi_pll_enable(crtc);
else
ironlake_fdi_disable(crtc);
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
intel_crtc_load_lut(crtc);
intel_enable_pipe(dev_priv, pipe, is_pch_port);
intel_enable_plane(dev_priv, plane, pipe);
if (is_pch_port)
ironlake_pch_enable(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
intel_crtc_update_cursor(crtc, true);
}
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
if (!intel_crtc->active)
return;
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
intel_crtc_update_cursor(crtc, false);
intel_disable_plane(dev_priv, plane, pipe);
if (dev_priv->cfb_plane == plane &&
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
intel_disable_pipe(dev_priv, pipe);
/* Disable PF */
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
ironlake_fdi_disable(crtc);
/* This is a horrible layering violation; we should be doing this in
* the connector/encoder ->prepare instead, but we don't always have
* enough information there about the config to know whether it will
* actually be necessary or just cause undesired flicker.
*/
intel_disable_pch_ports(dev_priv, pipe);
intel_disable_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
temp |= TRANS_DP_PORT_SEL_NONE;
I915_WRITE(reg, temp);
/* disable DPLL_SEL */
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
break;
case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
break;
case 2:
/* FIXME: manage transcoder PLLs? */
temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
break;
default:
BUG(); /* wtf */
}
I915_WRITE(PCH_DPLL_SEL, temp);
}
/* disable PCH DPLL */
intel_disable_pch_pll(dev_priv, pipe);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_PCDCLK);
/* Disable CPU FDI TX PLL */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
POSTING_READ(reg);
udelay(100);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
/* Wait for the clocks to turn off. */
POSTING_READ(reg);
udelay(100);
intel_crtc->active = false;
intel_update_watermarks(dev);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
intel_clear_scanline_wait(dev);
mutex_unlock(&dev->struct_mutex);
}
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
ironlake_crtc_enable(crtc);
break;
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
ironlake_crtc_disable(crtc);
break;
}
}
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
dev_priv->mm.interruptible = false;
(void) intel_overlay_switch_off(intel_crtc->overlay);
dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
/* Let userspace switch the overlay on again. In most cases userspace
* has to recompute where to put it anyway.
*/
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
if (intel_crtc->active)
return;
intel_crtc->active = true;
intel_update_watermarks(dev);
intel_enable_pll(dev_priv, pipe);
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
intel_crtc_update_cursor(crtc, true);
}
static void i9xx_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
if (!intel_crtc->active)
return;
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
if (dev_priv->cfb_plane == plane &&
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
intel_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
intel_clear_scanline_wait(dev);
}
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
{
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
i9xx_crtc_enable(crtc);
break;
case DRM_MODE_DPMS_OFF:
i9xx_crtc_disable(crtc);
break;
}
}
/**
* Sets the power management mode of the pipe and plane.
*/
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool enabled;
if (intel_crtc->dpms_mode == mode)
return;
intel_crtc->dpms_mode = mode;
dev_priv->display.dpms(crtc, mode);
if (!dev->primary->master)
return;
master_priv = dev->primary->master->driver_priv;
if (!master_priv->sarea_priv)
return;
enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
break;
case 1:
master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
break;
default:
DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
break;
}
}
static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->fb) {
mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
}
}
/* Prepare for a mode set.
*
* Note we could be a lot smarter here. We need to figure out which outputs
* will be enabled, which disabled (in short, how the config will changes)
* and perform the minimum necessary steps to accomplish that, e.g. updating
* watermarks, FBC configuration, making sure PLLs are programmed correctly,
* panel fitting is in the proper state, etc.
*/
static void i9xx_crtc_prepare(struct drm_crtc *crtc)
{
i9xx_crtc_disable(crtc);
}
static void i9xx_crtc_commit(struct drm_crtc *crtc)
{
i9xx_crtc_enable(crtc);
}
static void ironlake_crtc_prepare(struct drm_crtc *crtc)
{
ironlake_crtc_disable(crtc);
}
static void ironlake_crtc_commit(struct drm_crtc *crtc)
{
ironlake_crtc_enable(crtc);
}
void intel_encoder_prepare (struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
/* lvds has its own version of prepare see intel_lvds_prepare */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
void intel_encoder_commit (struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
return false;
}
/* XXX some encoders set the crtcinfo, others don't.
* Obviously we need some form of conflict resolution here...
*/
if (adjusted_mode->crtc_htotal == 0)
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
static int i945_get_display_clock_speed(struct drm_device *dev)
{
return 400000;
}
static int i915_get_display_clock_speed(struct drm_device *dev)
{
return 333000;
}
static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
{
return 200000;
}
static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
return 133000;
else {
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_MHZ:
return 333000;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
return 190000;
}
}
}
static int i865_get_display_clock_speed(struct drm_device *dev)
{
return 266000;
}
static int i855_get_display_clock_speed(struct drm_device *dev)
{
u16 hpllcc = 0;
/* Assume that the hardware is in the high speed state. This
* should be the default.
*/
switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
case GC_CLOCK_133_200:
case GC_CLOCK_100_200:
return 200000;
case GC_CLOCK_166_250:
return 250000;
case GC_CLOCK_100_133:
return 133000;
}
/* Shouldn't happen */
return 0;
}
static int i830_get_display_clock_speed(struct drm_device *dev)
{
return 133000;
}
struct fdi_m_n {
u32 tu;
u32 gmch_m;
u32 gmch_n;
u32 link_m;
u32 link_n;
};
static void
fdi_reduce_ratio(u32 *num, u32 *den)
{
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
*den >>= 1;
}
}
static void
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
int link_clock, struct fdi_m_n *m_n)
{
m_n->tu = 64; /* default size */
/* BUG_ON(pixel_clock > INT_MAX / 36); */
m_n->gmch_m = bits_per_pixel * pixel_clock;
m_n->gmch_n = link_clock * nlanes * 8;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
struct intel_watermark_params {
unsigned long fifo_size;
unsigned long max_wm;
unsigned long default_wm;
unsigned long guard_size;
unsigned long cacheline_size;
};
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_HPLLOFF_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params pineview_cursor_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
G4X_MAX_WM,
G4X_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i965_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i915_wm_info = {
I915_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i855_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_display_wm_info = {
ILK_DISPLAY_FIFO,
ILK_DISPLAY_MAXWM,
ILK_DISPLAY_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_wm_info = {
ILK_CURSOR_FIFO,
ILK_CURSOR_MAXWM,
ILK_CURSOR_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
ILK_DISPLAY_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_CURSOR_SR_FIFO,
ILK_CURSOR_MAX_SRWM,
ILK_CURSOR_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_display_wm_info = {
SNB_DISPLAY_FIFO,
SNB_DISPLAY_MAXWM,
SNB_DISPLAY_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
SNB_CURSOR_FIFO,
SNB_CURSOR_MAXWM,
SNB_CURSOR_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_display_srwm_info = {
SNB_DISPLAY_SR_FIFO,
SNB_DISPLAY_MAX_SRWM,
SNB_DISPLAY_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
SNB_CURSOR_SR_FIFO,
SNB_CURSOR_MAX_SRWM,
SNB_CURSOR_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
* @wm: chip FIFO params
* @pixel_size: display pixel size
* @latency_ns: memory latency for the platform
*
* Calculate the watermark level (the level at which the display plane will
* start fetching from memory again). Each chip has a different display
* FIFO size and allocation, so the caller needs to figure that out and pass
* in the correct intel_watermark_params structure.
*
* As the pixel clock runs, the FIFO will be drained at a rate that depends
* on the pixel size. When it reaches the watermark level, it'll start
* fetching FIFO line sized based chunks from memory until the FIFO fills
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
const struct intel_watermark_params *wm,
int fifo_size,
int pixel_size,
unsigned long latency_ns)
{
long entries_required, wm_size;
/*
* Note: we need to make sure we don't overflow for various clock &
* latency values.
* clocks go from a few thousand to several hundred thousand.
* latency is usually a few thousand
*/
entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1000;
entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
wm_size = fifo_size - (entries_required + wm->guard_size);
DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
wm_size = wm->max_wm;
if (wm_size <= 0)
wm_size = wm->default_wm;
return wm_size;
}
struct cxsr_latency {
int is_desktop;
int is_ddr3;
unsigned long fsb_freq;
unsigned long mem_freq;
unsigned long display_sr;
unsigned long display_hpll_disable;
unsigned long cursor_sr;
unsigned long cursor_hpll_disable;
};
static const struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
{1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
{1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
{1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
{1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
{1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
{1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
{1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
{1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
{1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
{1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
{1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
{1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
{1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
{1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
{0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
{0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
{0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
{0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
{0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
{0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
{0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
{0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
{0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
{0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
{0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
{0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
{0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
{0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
int is_ddr3,
int fsb,
int mem)
{
const struct cxsr_latency *latency;
int i;
if (fsb == 0 || mem == 0)
return NULL;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
is_ddr3 == latency->is_ddr3 &&
fsb == latency->fsb_freq && mem == latency->mem_freq)
return latency;
}
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
return NULL;
}
static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* deactivate cxsr */
I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
}
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
* - chipset
* - current MCH state
* It can be fairly high in some situations, so here we assume a fairly
* pessimal value. It's a tradeoff between extra memory fetches (if we
* set this value too high, the FIFO will fetch frequently to stay full)
* and power consumption (set it too low to save power and we might see
* FIFO underruns and display "flicker").
*
* A value of 5us seems to be a good balance; safe for very low end
* platforms but not overly aggressive on lower latency configs.
*/
static const int latency_ns = 5000;
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
if (plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
return size;
}
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x1ff;
if (plane)
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
return size;
}
static int i845_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A",
size);
return size;
}
static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
return size;
}
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
{
struct drm_crtc *crtc, *enabled = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled && crtc->fb) {
if (enabled)
return NULL;
enabled = crtc;
}
}
return enabled;
}
static void pineview_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
pineview_disable_cxsr(dev);
return;
}
crtc = single_enabled_crtc(dev);
if (crtc) {
int clock = crtc->mode.clock;
int pixel_size = crtc->fb->bits_per_pixel / 8;
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= wm << DSPFW_SR_SHIFT;
I915_WRITE(DSPFW1, reg);
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
/* cursor SR */
wm = intel_calculate_wm(clock, &pineview_cursor_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= wm & DSPFW_HPLL_SR_MASK;
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
/* activate cxsr */
I915_WRITE(DSPFW3,
I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else {
pineview_disable_cxsr(dev);
DRM_DEBUG_KMS("Self-refresh is disabled\n");
}
}
static bool g4x_compute_wm0(struct drm_device *dev,
int plane,
const struct intel_watermark_params *display,
int display_latency_ns,
const struct intel_watermark_params *cursor,
int cursor_latency_ns,
int *plane_wm,
int *cursor_wm)
{
struct drm_crtc *crtc;
int htotal, hdisplay, clock, pixel_size;
int line_time_us, line_count;
int entries, tlb_miss;
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !crtc->enabled) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
}
htotal = crtc->mode.htotal;
hdisplay = crtc->mode.hdisplay;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, display->cacheline_size);
*plane_wm = entries + display->guard_size;
if (*plane_wm > (int)display->max_wm)
*plane_wm = display->max_wm;
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
if (*cursor_wm > (int)cursor->max_wm)
*cursor_wm = (int)cursor->max_wm;
return true;
}
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool g4x_check_srwm(struct drm_device *dev,
int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
display_wm, cursor_wm);
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
display_wm, display->max_wm);
return false;
}
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
cursor_wm, cursor->max_wm);
return false;
}
if (!(display_wm || cursor_wm)) {
DRM_DEBUG_KMS("SR latency is 0, disabling\n");
return false;
}
return true;
}
static bool g4x_compute_srwm(struct drm_device *dev,
int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
int hdisplay, htotal, pixel_size, clock;
unsigned long line_time_us;
int line_count, line_size;
int small, large;
int entries;
if (!latency_ns) {
*display_wm = *cursor_wm = 0;
return false;
}
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
return g4x_check_srwm(dev,
*display_wm, *cursor_wm,
display, cursor);
}
#define single_plane_enabled(mask) is_power_of_2(mask)
static void g4x_update_wm(struct drm_device *dev)
{
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
if (g4x_compute_wm0(dev, 0,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1;
if (g4x_compute_wm0(dev, 1,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 2;
plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
else
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
plane_sr, cursor_sr);
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void i965_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
int clock = crtc->mode.clock;
int htotal = crtc->mode.htotal;
int hdisplay = crtc->mode.hdisplay;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
line_time_us = ((htotal * 1000) / clock);
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
entries, srwm);
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * 64;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
(entries + i965_cursor_wm_info.guard_size);
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
/* 965 has limitations... */
I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
(8 << 16) | (8 << 8) | (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void i9xx_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
struct drm_crtc *crtc, *enabled = NULL;
if (IS_I945GM(dev))
wm_info = &i945_wm_info;
else if (!IS_GEN2(dev))
wm_info = &i915_wm_info;
else
wm_info = &i855_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (crtc->enabled && crtc->fb) {
planea_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
latency_ns);
enabled = crtc;
} else
planea_wm = fifo_size - wm_info->guard_size;
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (crtc->enabled && crtc->fb) {
planeb_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
latency_ns);
if (enabled == NULL)
enabled = crtc;
else
enabled = NULL;
} else
planeb_wm = fifo_size - wm_info->guard_size;
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
/*
* Overlay gets an aggressive default since video jitter is bad.
*/
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
int clock = enabled->mode.clock;
int htotal = enabled->mode.htotal;
int hdisplay = enabled->mode.hdisplay;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
line_time_us = (htotal * 1000) / clock;
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else if (IS_I915GM(dev))
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
/* Set request length to 8 cachelines per fetch */
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
if (HAS_FW_BLC(dev)) {
if (enabled) {
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
DRM_DEBUG_KMS("memory self refresh enabled\n");
} else
DRM_DEBUG_KMS("memory self refresh disabled\n");
}
}
static void i830_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
uint32_t fwater_lo;
int planea_wm;
crtc = single_enabled_crtc(dev);
if (crtc == NULL)
return;
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
crtc->fb->bits_per_pixel / 8,
latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
I915_WRITE(FW_BLC, fwater_lo);
}
#define ILK_LP0_PLANE_LATENCY 700
#define ILK_LP0_CURSOR_LATENCY 1300
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool ironlake_check_srwm(struct drm_device *dev, int level,
int fbc_wm, int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
struct drm_i915_private *dev_priv = dev->dev_private;
DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
" cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
if (fbc_wm > SNB_FBC_MAX_SRWM) {
DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
fbc_wm, SNB_FBC_MAX_SRWM, level);
/* fbc has it's own way to disable FBC WM */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
return false;
}
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
display_wm, SNB_DISPLAY_MAX_SRWM, level);
return false;
}
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
cursor_wm, SNB_CURSOR_MAX_SRWM, level);
return false;
}
if (!(fbc_wm || display_wm || cursor_wm)) {
DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
return false;
}
return true;
}
/*
* Compute watermark values of WM[1-3],
*/
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
unsigned long line_time_us;
int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
int small, large;
int entries;
if (!latency_ns) {
*fbc_wm = *display_wm = *cursor_wm = 0;
return false;
}
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
/*
* Spec says:
* FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
*/
*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
return ironlake_check_srwm(dev, level,
*fbc_wm, *display_wm, *cursor_wm,
display, cursor);
}
static void ironlake_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1;
}
if (g4x_compute_wm0(dev, 1,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
if (!single_plane_enabled(enabled))
return;
enabled = ffs(enabled) - 1;
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
ILK_READ_WM1_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
ILK_READ_WM2_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/*
* WM3 is unsupported on ILK, probably because we don't have latency
* data for that power state
*/
}
static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1;
}
if (g4x_compute_wm0(dev, 1,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*
* SNB support 3 levels of watermark.
*
* WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
* and disabled in the descending order
*
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
if (!single_plane_enabled(enabled))
return;
enabled = ffs(enabled) - 1;
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
SNB_READ_WM1_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
SNB_READ_WM2_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
(SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
*
* There are several cases to deal with here:
* - normal (i.e. non-self-refresh)
* - self-refresh (SR) mode
* - lines are large relative to FIFO size (buffer can hold up to 2)
* - lines are small relative to FIFO size (buffer can hold more than 2
* lines), so need to account for TLB latency
*
* The normal calculation is:
* watermark = dotclock * bytes per pixel * latency
* where latency is platform & configuration dependent (we assume pessimal
* values here).
*
* The SR calculation is:
* watermark = (trunc(latency/line time)+1) * surface width *
* bytes per pixel
* where
* line time = htotal / dotclock
* surface width = hdisplay for normal plane and 64 for cursor
* and latency is assumed to be high, as above.
*
* The final value programmed to the register should always be rounded up,
* and include an extra 2 entries to account for clock crossings.
*
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(dev);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
return dev_priv->lvds_use_ssc && i915_panel_use_ssc
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
u32 temp;
u32 lvds_sync = 0;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_DVO:
is_dvo = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
}
num_connectors++;
}
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
} else if (!IS_GEN2(dev)) {
refclk = 96000;
} else {
refclk = 48000;
}
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&reduced_clock);
if (has_reduced_clock && (clock.p != reduced_clock.p)) {
/*
* If the different P is found, it means that we can't
* switch the display clock by using the FP0/FP1.
* In such case we will disable the LVDS downclock
* feature.
*/
DRM_DEBUG_KMS("Different P is found for "
"LVDS clock/downclock\n");
has_reduced_clock = 0;
}
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
&& adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
} else if (adjusted_mode->clock >= 140500
&& adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
clock.m1 = 12;
clock.m2 = 8;
}
}
if (IS_PINEVIEW(dev)) {
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = (1 << reduced_clock.n) << 16 |
reduced_clock.m1 << 8 | reduced_clock.m2;
} else {
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
}
dpll = DPLL_VGA_MODE_DIS;
if (!IS_GEN2(dev)) {
if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
if (is_dp)
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
if (IS_PINEVIEW(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && has_reduced_clock)
dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
}
switch (clock.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
case 7:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
break;
case 10:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
break;
case 14:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock.p1 == 2)
dpll |= PLL_P1_DIVIDE_BY_TWO;
else
dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
if (clock.p2 == 4)
dpll |= PLL_P2_DIVIDE_BY_4;
}
}
if (is_sdvo && is_tv)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (is_tv)
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
/* Ironlake's plane is forced to pipe, bit 24 is to
enable color space conversion */
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
dspcntr |= DISPPLANE_SEL_PIPE_B;
if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
/* Enable pixel doubling when the dot clock is > 90% of the (display)
* core speed.
*
* XXX: No double-wide on 915GM pipe B. Is that the only reason for the
* pipe == 0 check?
*/
if (mode->clock >
dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
pipeconf |= PIPECONF_DOUBLE_WIDE;
else
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
I915_WRITE(FP0(pipe), fp);
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
udelay(150);
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
temp = I915_READ(LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
temp |= LVDS_PIPEB_SELECT;
} else {
temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
/* set the dithering flag on LVDS as needed */
if (INTEL_INFO(dev)->gen >= 4) {
if (dev_priv->lvds_dither)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
lvds_sync |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
lvds_sync |= LVDS_VSYNC_POLARITY;
if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
!= lvds_sync) {
char flags[2] = "-+";
DRM_INFO("Changing LVDS panel from "
"(%chsync, %cvsync) to (%chsync, %cvsync)\n",
flags[!(temp & LVDS_HSYNC_POLARITY)],
flags[!(temp & LVDS_VSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
temp |= lvds_sync;
}
I915_WRITE(LVDS, temp);
}
if (is_dp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
}
I915_WRITE(DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
if (INTEL_INFO(dev)->gen >= 4) {
temp = 0;
if (is_sdvo) {
temp = intel_mode_get_pixel_multiplier(adjusted_mode);
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
}
intel_crtc->lowfreq_avail = false;
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(FP1(pipe), fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(HBLANK(pipe),
(adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
I915_WRITE(HSYNC(pipe),
(adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
I915_WRITE(VTOTAL(pipe),
(adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
I915_WRITE(VBLANK(pipe),
(adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
I915_WRITE(VSYNC(pipe),
(adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
I915_WRITE(DSPSIZE(plane),
((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(DSPPOS(plane), 0);
I915_WRITE(PIPESRC(pipe),
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
intel_enable_pipe(dev_priv, pipe, false);
intel_wait_for_vblank(dev, pipe);
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
intel_update_watermarks(dev);
return ret;
}
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
u32 lvds_sync = 0;
int target_clock, pixel_multiplier, lane, link_bw, bpp, factor;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
has_edp_encoder = encoder;
break;
}
num_connectors++;
}
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
} else {
refclk = 96000;
if (!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base))
refclk = 120000; /* 120Mhz refclk */
}
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
/* Ensure that the cursor is valid for the new mode before changing... */
intel_crtc_update_cursor(crtc, true);
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&reduced_clock);
if (has_reduced_clock && (clock.p != reduced_clock.p)) {
/*
* If the different P is found, it means that we can't
* switch the display clock by using the FP0/FP1.
* In such case we will disable the LVDS downclock
* feature.
*/
DRM_DEBUG_KMS("Different P is found for "
"LVDS clock/downclock\n");
has_reduced_clock = 0;
}
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
&& adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
} else if (adjusted_mode->clock >= 140500
&& adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
clock.m1 = 12;
clock.m2 = 8;
}
}
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (has_edp_encoder &&
!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
target_clock = mode->clock;
intel_edp_link_config(has_edp_encoder,
&lane, &link_bw);
} else {
/* [e]DP over FDI requires target mode clock
instead of link clock */
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
* mode pixel clock is stored in units of 1KHz.
* Hence the bw of each lane in terms of the mode signal
* is:
*/
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
if (is_lvds) {
/* the BPC will be 6 if it is 18-bit LVDS panel */
if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
} else if (has_edp_encoder) {
switch (dev_priv->edp.bpp/3) {
case 8:
temp |= PIPE_8BPC;
break;
case 10:
temp |= PIPE_10BPC;
break;
case 6:
temp |= PIPE_6BPC;
break;
case 12:
temp |= PIPE_12BPC;
break;
}
} else
temp |= PIPE_8BPC;
I915_WRITE(PIPECONF(pipe), temp);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
bpp = 24;
break;
case PIPE_10BPC:
bpp = 30;
break;
case PIPE_6BPC:
bpp = 18;
break;
case PIPE_12BPC:
bpp = 36;
break;
default:
DRM_ERROR("unknown pipe bpc value\n");
bpp = 24;
}
if (!lane) {
/*
* Account for spread spectrum to avoid
* oversubscribing the link. Max center spread
* is 2.5%; use 5% for safety's sake.
*/
u32 bps = target_clock * bpp * 21 / 20;
lane = bps / (link_bw * 8) + 1;
}
intel_crtc->fdi_lanes = lane;
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
temp |= DREF_NONSPREAD_SOURCE_ENABLE;
temp &= ~DREF_SSC_SOURCE_MASK;
temp |= DREF_SSC_SOURCE_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
if (has_edp_encoder) {
if (intel_panel_use_ssc(dev_priv)) {
temp |= DREF_SSC1_ENABLE;
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Enable CPU source on CPU attached eDP */
if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
if (intel_panel_use_ssc(dev_priv))
temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
else
temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else {
/* Enable SSC on PCH eDP if needed */
if (intel_panel_use_ssc(dev_priv)) {
DRM_ERROR("enabling SSC on PCH\n");
temp |= DREF_SUPERSPREAD_SOURCE_ENABLE;
}
}
I915_WRITE(PCH_DREF_CONTROL, temp);
POSTING_READ(PCH_DREF_CONTROL);
udelay(200);
}
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->lvds_ssc_freq == 100) ||
(I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
factor = 25;
} else if (is_sdvo && is_tv)
factor = 20;
if (clock.m < factor * clock.n)
fp |= FP_CB_TUNE;
dpll = 0;
if (is_lvds)
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
dpll |= DPLL_DVO_HIGH_SPEED;
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
switch (clock.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
case 7:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
break;
case 10:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
break;
case 14:
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
if (is_sdvo && is_tv)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (is_tv)
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
/* PCH eDP needs FDI, but CPU eDP does not */
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(PCH_FP0(pipe), fp);
I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(PCH_DPLL(pipe));
udelay(150);
}
/* enable transcoder DPLL */
if (HAS_PCH_CPT(dev)) {
temp = I915_READ(PCH_DPLL_SEL);
switch (pipe) {
case 0:
temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
break;
case 1:
temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
break;
case 2:
/* FIXME: manage transcoder PLLs? */
temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
break;
default:
BUG();
}
I915_WRITE(PCH_DPLL_SEL, temp);
POSTING_READ(PCH_DPLL_SEL);
udelay(150);
}
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
if (HAS_PCH_CPT(dev))
temp |= PORT_TRANS_B_SEL_CPT;
else
temp |= LVDS_PIPEB_SELECT;
} else {
if (HAS_PCH_CPT(dev))
temp &= ~PORT_TRANS_SEL_MASK;
else
temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
lvds_sync |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
lvds_sync |= LVDS_VSYNC_POLARITY;
if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
!= lvds_sync) {
char flags[2] = "-+";
DRM_INFO("Changing LVDS panel from "
"(%chsync, %cvsync) to (%chsync, %cvsync)\n",
flags[!(temp & LVDS_HSYNC_POLARITY)],
flags[!(temp & LVDS_VSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
temp |= lvds_sync;
}
I915_WRITE(PCH_LVDS, temp);
}
/* set the dithering flag and clear for anything other than a panel. */
pipeconf &= ~PIPECONF_DITHER_EN;
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_ST1;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
/* For non-DP output, clear any trans DP clock recovery setting.*/
I915_WRITE(TRANSDATA_M1(pipe), 0);
I915_WRITE(TRANSDATA_N1(pipe), 0);
I915_WRITE(TRANSDPLINK_M1(pipe), 0);
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
if (!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(PCH_DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pipe));
udelay(150);
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(PCH_DPLL(pipe), dpll);
}
intel_crtc->lowfreq_avail = false;
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(PCH_FP1(pipe), fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(PCH_FP1(pipe), fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(HBLANK(pipe),
(adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
I915_WRITE(HSYNC(pipe),
(adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
I915_WRITE(VTOTAL(pipe),
(adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
I915_WRITE(VBLANK(pipe),
(adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
I915_WRITE(VSYNC(pipe),
(adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
if (has_edp_encoder &&
!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
}
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
intel_wait_for_vblank(dev, pipe);
if (IS_GEN5(dev)) {
/* enable address swizzle for tiling buffer */
temp = I915_READ(DISP_ARB_CTL);
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
}
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
ret = intel_pipe_set_base(crtc, x, y, old_fb);
intel_update_watermarks(dev);
return ret;
}
static int intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int ret;
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
x, y, old_fb);
drm_vblank_post_modeset(dev, pipe);
return ret;
}
/** Loads the palette/gamma unit for the CRTC with the prepared values */
void intel_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int palreg = PALETTE(intel_crtc->pipe);
int i;
/* The clocks have to be on to load the palette. */
if (!crtc->enabled)
return;
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
palreg = LGC_PALETTE(intel_crtc->pipe);
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
(intel_crtc->lut_r[i] << 16) |
(intel_crtc->lut_g[i] << 8) |
intel_crtc->lut_b[i]);
}
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool visible = base != 0;
u32 cntl;
if (intel_crtc->cursor_visible == visible)
return;
cntl = I915_READ(_CURACNTR);
if (visible) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
*/
I915_WRITE(_CURABASE, base);
cntl &= ~(CURSOR_FORMAT_MASK);
/* XXX width must be 64, stride 256 => 0x00 << 28 */
cntl |= CURSOR_ENABLE |
CURSOR_GAMMA_ENABLE |
CURSOR_FORMAT_ARGB;
} else
cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
I915_WRITE(_CURACNTR, cntl);
intel_crtc->cursor_visible = visible;
}
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
uint32_t cntl = I915_READ(CURCNTR(pipe));
if (base) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
cntl |= pipe << 28; /* Connect to correct pipe */
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
I915_WRITE(CURCNTR(pipe), cntl);
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
I915_WRITE(CURBASE(pipe), base);
}
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
if (base) {
cntl &= ~CURSOR_MODE;
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
I915_WRITE(CURCNTR_IVB(pipe), cntl);
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
I915_WRITE(CURBASE_IVB(pipe), base);
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
u32 base, pos;
bool visible;
pos = 0;
if (on && crtc->enabled && crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
if (y > (int) crtc->fb->height)
base = 0;
} else
base = 0;
if (x < 0) {
if (x + intel_crtc->cursor_width < 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
}
pos |= x << CURSOR_X_SHIFT;
if (y < 0) {
if (y + intel_crtc->cursor_height < 0)
base = 0;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
y = -y;
}
pos |= y << CURSOR_Y_SHIFT;
visible = base != 0;
if (!visible && !intel_crtc->cursor_visible)
return;
if (IS_IVYBRIDGE(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
ivb_update_cursor(crtc, base);
} else {
I915_WRITE(CURPOS(pipe), pos);
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
}
if (visible)
intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file,
uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
uint32_t addr;
int ret;
DRM_DEBUG_KMS("\n");
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
obj = NULL;
mutex_lock(&dev->struct_mutex);
goto finish;
}
/* Currently we only support 64x64 cursors */
if (width != 64 || height != 64) {
DRM_ERROR("we currently only support 64x64 cursors\n");
return -EINVAL;
}
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
if (&obj->base == NULL)
return -ENOENT;
if (obj->base.size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
ret = -ENOMEM;
goto fail;
}
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
if (obj->tiling_mode) {
DRM_ERROR("cursor cannot be tiled\n");
ret = -EINVAL;
goto fail_locked;
}
ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
}
ret = i915_gem_object_set_to_gtt_domain(obj, 0);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_unpin;
}
ret = i915_gem_object_put_fence(obj);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_unpin;
}
addr = obj->gtt_offset;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
}
addr = obj->phys_obj->handle->busaddr;
}
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
finish:
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
intel_crtc_update_cursor(crtc, true);
return 0;
fail_unpin:
i915_gem_object_unpin(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
intel_crtc_update_cursor(crtc, true);
return 0;
}
/** Sets the color ramps on behalf of RandR */
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
intel_crtc->lut_r[regno] = red >> 8;
intel_crtc->lut_g[regno] = green >> 8;
intel_crtc->lut_b[regno] = blue >> 8;
}
void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
*red = intel_crtc->lut_r[regno] << 8;
*green = intel_crtc->lut_g[regno] << 8;
*blue = intel_crtc->lut_b[regno] << 8;
}
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
int end = (start + size > 256) ? 256 : start + size, i;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
for (i = start; i < end; i++) {
intel_crtc->lut_r[i] = red[i] >> 8;
intel_crtc->lut_g[i] = green[i] >> 8;
intel_crtc->lut_b[i] = blue[i] >> 8;
}
intel_crtc_load_lut(crtc);
}
/**
* Get a pipe with a simple mode set on it for doing load-based monitor
* detection.
*
* It will be up to the load-detect code to adjust the pipe as appropriate for
* its requirements. The pipe will be connected to no other encoders.
*
* Currently this code will only succeed if there is a pipe with no encoders
* configured for it. In the future, it could choose to temporarily disable
* some outputs to free up a pipe for its use.
*
* \return crtc, or NULL if no pipes are available.
*/
/* VESA 640x480x72Hz mode to set on the pipe */
static struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct intel_framebuffer *intel_fb;
int ret;
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
drm_gem_object_unreference_unlocked(&obj->base);
return ERR_PTR(-ENOMEM);
}
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
}
return &intel_fb->base;
}
static u32
intel_framebuffer_pitch_for_width(int width, int bpp)
{
u32 pitch = DIV_ROUND_UP(width * bpp, 8);
return ALIGN(pitch, 64);
}
static u32
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
}
static struct drm_framebuffer *
intel_framebuffer_create_for_mode(struct drm_device *dev,
struct drm_display_mode *mode,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd mode_cmd;
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
if (obj == NULL)
return ERR_PTR(-ENOMEM);
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
mode_cmd.depth = depth;
mode_cmd.bpp = bpp;
mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
return intel_framebuffer_create(dev, &mode_cmd, obj);
}
static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *fb;
if (dev_priv->fbdev == NULL)
return NULL;
obj = dev_priv->fbdev->ifb.obj;
if (obj == NULL)
return NULL;
fb = &dev_priv->fbdev->ifb.base;
if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
fb->bits_per_pixel))
return NULL;
if (obj->base.size < mode->vdisplay * fb->pitch)
return NULL;
return fb;
}
bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old)
{
struct intel_crtc *intel_crtc;
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *old_fb;
int i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
/*
* Algorithm gets a little messy:
*
* - if the connector already has an assigned crtc, use it (but make
* sure it's on first)
*
* - try to find the first unused crtc that can drive this connector,
* and use that if we find one
*/
/* See if we already have a CRTC for this connector */
if (encoder->crtc) {
crtc = encoder->crtc;
intel_crtc = to_intel_crtc(crtc);
old->dpms_mode = intel_crtc->dpms_mode;
old->load_detect_temp = false;
/* Make sure the crtc and connector are running */
if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc_helper_funcs *crtc_funcs;
crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
return true;
}
/* Find an unused one (if possible) */
list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
if (!possible_crtc->enabled) {
crtc = possible_crtc;
break;
}
}
/*
* If we didn't find an unused CRTC, don't use any.
*/
if (!crtc) {
DRM_DEBUG_KMS("no pipe available for load-detect\n");
return false;
}
encoder->crtc = crtc;
connector->encoder = encoder;
intel_crtc = to_intel_crtc(crtc);
old->dpms_mode = intel_crtc->dpms_mode;
old->load_detect_temp = true;
old->release_fb = NULL;
if (!mode)
mode = &load_detect_mode;
old_fb = crtc->fb;
/* We need a framebuffer large enough to accommodate all accesses
* that the plane may generate whilst we perform load detection.
* We can not rely on the fbcon either being present (we get called
* during its initialisation to detect all boot displays, or it may
* not even exist) or that it is large enough to satisfy the
* requested mode.
*/
crtc->fb = mode_fits_in_fbdev(dev, mode);
if (crtc->fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
old->release_fb = crtc->fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(crtc->fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
crtc->fb = old_fb;
return false;
}
if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
crtc->fb = old_fb;
return false;
}
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
return true;
}
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
struct intel_load_detect_pipe *old)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
encoder->base.id, drm_get_encoder_name(encoder));
if (old->load_detect_temp) {
connector->encoder = NULL;
drm_helper_disable_unused_functions(dev);
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
return;
}
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON) {
encoder_funcs->dpms(encoder, old->dpms_mode);
crtc_funcs->dpms(crtc, old->dpms_mode);
}
}
/* Returns the clock of the currently programmed mode of the given pipe. */
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = I915_READ(FP0(pipe));
else
fp = I915_READ(FP1(pipe));
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
if (!IS_GEN2(dev)) {
if (IS_PINEVIEW(dev))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
else
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
switch (dpll & DPLL_MODE_MASK) {
case DPLLB_MODE_DAC_SERIAL:
clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5 : 10;
break;
case DPLLB_MODE_LVDS:
clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7 : 14;
break;
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
return 0;
}
/* XXX: Handle the 100Mhz refclk */
intel_clock(dev, 96000, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
if (is_lvds) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
clock.p2 = 14;
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
intel_clock(dev, 66000, &clock);
} else
intel_clock(dev, 48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
else {
clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
}
if (dpll & PLL_P2_DIVIDE_BY_4)
clock.p2 = 4;
else
clock.p2 = 2;
intel_clock(dev, 48000, &clock);
}
}
/* XXX: It would be nice to validate the clocks, but we can't reuse
* i830PllIsValid() because it relies on the xf86_config connector
* configuration being accurate, which it isn't necessarily.
*/
return clock.dot;
}
/** Returns the currently programmed mode of the given pipe. */
struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_display_mode *mode;
int htot = I915_READ(HTOTAL(pipe));
int hsync = I915_READ(HSYNC(pipe));
int vtot = I915_READ(VTOTAL(pipe));
int vsync = I915_READ(VSYNC(pipe));
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
return NULL;
mode->clock = intel_crtc_clock_get(dev, crtc);
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
mode->vdisplay = (vtot & 0xffff) + 1;
mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
mode->vsync_start = (vsync & 0xffff) + 1;
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
drm_mode_set_name(mode);
drm_mode_set_crtcinfo(mode, 0);
return mode;
}
#define GPU_IDLE_TIMEOUT 500 /* ms */
/* When this timer fires, we've been idle for awhile */
static void intel_gpu_idle_timer(unsigned long arg)
{
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
if (!list_empty(&dev_priv->mm.active_list)) {
/* Still processing requests, so just re-arm the timer. */
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
return;
}
dev_priv->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
static void intel_crtc_idle_timer(unsigned long arg)
{
struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
struct intel_framebuffer *intel_fb;
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb && intel_fb->obj->active) {
/* The framebuffer is still being accessed by the GPU. */
mod_timer(&intel_crtc->idle_timer, jiffies +
msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
return;
}
intel_crtc->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll;
if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
return;
dpll = I915_READ(dpll_reg);
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
/* Schedule downclock */
mod_timer(&intel_crtc->idle_timer, jiffies +
msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll = I915_READ(dpll_reg);
if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
return;
/*
* Since this is called by a timer, we should never get here in
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
DRM_DEBUG_DRIVER("downclocking LVDS\n");
/* Unlock panel regs */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
PANEL_UNLOCK_REGS);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
}
/**
* intel_idle_update - adjust clocks for idleness
* @work: work struct
*
* Either the GPU or display (or both) went idle. Check the busy status
* here and adjust the CRTC and GPU clocks as necessary.
*/
static void intel_idle_update(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
idle_work);
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
if (!i915_powersave)
return;
mutex_lock(&dev->struct_mutex);
i915_update_gfx_val(dev_priv);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->busy)
intel_decrease_pllclock(crtc);
}
mutex_unlock(&dev->struct_mutex);
}
/**
* intel_mark_busy - mark the GPU and possibly the display busy
* @dev: drm device
* @obj: object we're operating on
*
* Callers can use this function to indicate that the GPU is busy processing
* commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
* buffer), we'll also mark the display as busy, so we know to increase its
* clock frequency.
*/
void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
struct intel_framebuffer *intel_fb;
struct intel_crtc *intel_crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (!dev_priv->busy)
dev_priv->busy = true;
else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
intel_crtc = to_intel_crtc(crtc);
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb->obj == obj) {
if (!intel_crtc->busy) {
/* Non-busy -> busy, upclock */
intel_increase_pllclock(crtc);
intel_crtc->busy = true;
} else {
/* Busy -> busy, put off timer */
mod_timer(&intel_crtc->idle_timer, jiffies +
msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
}
}
}
}
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct intel_unpin_work *work;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
if (work) {
cancel_work_sync(&work->work);
kfree(work);
}
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
}
static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
i915_gem_object_unpin(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
static void do_intel_finish_page_flip(struct drm_device *dev,
struct drm_crtc *crtc)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
struct timeval tnow, tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
do_gettimeofday(&tnow);
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
intel_crtc->unpin_work = NULL;
if (work->event) {
e = work->event;
e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
/* Called before vblank count and timestamps have
* been updated for the vblank interval of flip
* completion? Need to increment vblank count and
* add one videorefresh duration to returned timestamp
* to account for this. We assume this happened if we
* get called over 0.9 frame durations after the last
* timestamped vblank.
*
* This calculation can not be used with vrefresh rates
* below 5Hz (10Hz to be on the safe side) without
* promoting to 64 integers.
*/
if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
9 * crtc->framedur_ns) {
e->event.sequence++;
tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
crtc->framedur_ns);
}
e->event.tv_sec = tvbl.tv_sec;
e->event.tv_usec = tvbl.tv_usec;
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
drm_vblank_put(dev, intel_crtc->pipe);
spin_unlock_irqrestore(&dev->event_lock, flags);
obj = work->old_fb_obj;
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
if (atomic_read(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
void intel_finish_page_flip(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
do_intel_finish_page_flip(dev, crtc);
}
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
do_intel_finish_page_flip(dev, crtc);
}
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
if ((++intel_crtc->unpin_work->pending) > 1)
DRM_ERROR("Prepared flip multiple times\n");
} else {
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
goto out;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
*/
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
out:
return ret;
}
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long offset;
u32 flip_mask;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
goto out;
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
out:
return ret;
}
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto out;
ret = BEGIN_LP_RING(4);
if (ret)
goto out;
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
* pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
ADVANCE_LP_RING();
out:
return ret;
}
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto out;
ret = BEGIN_LP_RING(4);
if (ret)
goto out;
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
ADVANCE_LP_RING();
out:
return ret;
}
/*
* On gen7 we currently use the blit ring because (in early silicon at least)
* the render ring doesn't give us interrpts for page flip completion, which
* means clients will hang after the first flip is queued. Fortunately the
* blit ring generates interrupts properly, so use it instead.
*/
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto out;
ret = intel_ring_begin(ring, 4);
if (ret)
goto out;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
out:
return ret;
}
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
return -ENODEV;
}
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
int ret;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
}
intel_crtc->unpin_work = work;
spin_unlock_irqrestore(&dev->event_lock, flags);
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
drm_gem_object_reference(&obj->base);
crtc->fb = fb;
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
goto cleanup_objs;
work->pending_flip_obj = obj;
work->enable_stall_check = true;
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
goto cleanup_pending;
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
return 0;
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
spin_lock_irqsave(&dev->event_lock, flags);
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
return ret;
}
static void intel_sanitize_modesetting(struct drm_device *dev,
int pipe, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
if (HAS_PCH_SPLIT(dev))
return;
/* Who knows what state these registers were left in by the BIOS or
* grub?
*
* If we leave the registers in a conflicting state (e.g. with the
* display plane reading from the other pipe than the one we intend
* to use) then when we attempt to teardown the active mode, we will
* not disable the pipes and planes in the correct order -- leaving
* a plane reading from a disabled pipe and possibly leading to
* undefined behaviour.
*/
reg = DSPCNTR(plane);
val = I915_READ(reg);
if ((val & DISPLAY_PLANE_ENABLE) == 0)
return;
if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
return;
/* This display plane is active and attached to the other CPU pipe. */
pipe = !pipe;
/* Disable the plane and wait for it to stop reading from the pipe. */
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
}
static void intel_crtc_reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* Reset flags back to the 'unknown' status so that they
* will be correctly set on the initial modeset.
*/
intel_crtc->dpms_mode = -1;
/* We need to fix up any BIOS configuration that conflicts with
* our expectations.
*/
intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
.mode_set = intel_crtc_mode_set,
.mode_set_base = intel_pipe_set_base,
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
.disable = intel_crtc_disable,
};
static const struct drm_crtc_funcs intel_crtc_funcs = {
.reset = intel_crtc_reset,
.cursor_set = intel_crtc_cursor_set,
.cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (intel_crtc == NULL)
return;
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
intel_crtc->lut_g[i] = i;
intel_crtc->lut_b[i] = i;
}
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
}
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
intel_crtc_reset(&intel_crtc->base);
intel_crtc->active = true; /* force the pipe off on setup_init_config */
if (HAS_PCH_SPLIT(dev)) {
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
intel_helper_funcs.prepare = i9xx_crtc_prepare;
intel_helper_funcs.commit = i9xx_crtc_commit;
}
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
intel_crtc->busy = false;
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!drmmode_obj) {
DRM_ERROR("no such CRTC id\n");
return -EINVAL;
}
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
pipe_from_crtc_id->pipe = crtc->pipe;
return 0;
}
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
{
struct intel_encoder *encoder;
int index_mask = 0;
int entry = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
if (type_mask & encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
}
return index_mask;
}
static bool has_edp_a(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_MOBILE(dev))
return false;
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
if (IS_GEN5(dev) &&
(I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
return false;
return true;
}
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
bool has_lvds = false;
if (IS_MOBILE(dev) && !IS_I830(dev))
has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
}
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
if (has_edp_a(dev))
intel_dp_init(dev, DP_A);
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
}
intel_crt_init(dev);
if (HAS_PCH_SPLIT(dev)) {
int found;
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_B);
}
if (I915_READ(HDMIC) & PORT_DETECTED)
intel_hdmi_init(dev, HDMIC);
if (I915_READ(HDMID) & PORT_DETECTED)
intel_hdmi_init(dev, HDMID);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C);
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
}
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
intel_dp_init(dev, DP_B);
}
}
/* Before G4X SDVOC doesn't have its own detect register */
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, SDVOC);
}
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, SDVOC);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
intel_dp_init(dev, DP_C);
}
}
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
}
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
intel_encoder_clones(dev, encoder->clone_mask);
}
intel_panel_setup_backlight(dev);
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
kfree(intel_fb);
}
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file,
unsigned int *handle)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
return drm_gem_handle_create(file, &obj->base, handle);
}
static const struct drm_framebuffer_funcs intel_fb_funcs = {
.destroy = intel_user_framebuffer_destroy,
.create_handle = intel_user_framebuffer_create_handle,
};
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_i915_gem_object *obj)
{
int ret;
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
if (mode_cmd->pitch & 63)
return -EINVAL;
switch (mode_cmd->bpp) {
case 8:
case 16:
case 24:
case 32:
break;
default:
return -EINVAL;
}
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
return 0;
}
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
return intel_framebuffer_create(dev, mode_cmd, obj);
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fb_output_poll_changed,
};
static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device *dev)
{
struct drm_i915_gem_object *ctx;
int ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
ctx = i915_gem_alloc_object(dev, 4096);
if (!ctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
}
ret = i915_gem_object_pin(ctx, 4096, true);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
}
ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
if (ret) {
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
goto err_unpin;
}
return ctx;
err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
bool ironlake_set_drps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
rgvswctl = I915_READ16(MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
return false; /* still busy with another command */
}
rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
I915_WRITE16(MEMSWCTL, rgvswctl);
POSTING_READ16(MEMSWCTL);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE16(MEMSWCTL, rgvswctl);
return true;
}
void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
I915_WRITE(RCDNEI, 100000);
/* Set max/min thresholds to 90ms and 80ms respectively */
I915_WRITE(RCBMAXAVG, 90000);
I915_WRITE(RCBMINAVG, 80000);
I915_WRITE(MEMIHYST, 1);
/* Set up min, max, and cur for interrupt handling */
fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
dev_priv->fmax = fmax; /* IPS callback will increase this */
dev_priv->fstart = fstart;
dev_priv->max_delay = fstart;
dev_priv->min_delay = fmin;
dev_priv->cur_delay = fstart;
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
/*
* Interrupts will be enabled in ironlake_irq_postinstall
*/
I915_WRITE(VIDSTART, vstart);
POSTING_READ(VIDSTART);
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
msleep(1);
ironlake_set_drps(dev, fstart);
dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
dev_priv->last_time1 = jiffies_to_msecs(jiffies);
dev_priv->last_count2 = I915_READ(0x112f4);
getrawmonotonic(&dev_priv->last_time2);
}
void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl = I915_READ16(MEMSWCTL);
/* Ack interrupts, disable EFC interrupt */
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
I915_WRITE(DEIIR, DE_PCU_EVENT);
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
/* Go back to the starting frequency */
ironlake_set_drps(dev, dev_priv->fstart);
msleep(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
msleep(1);
}
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 swreq;
swreq = (val & 0x3ff) << 25;
I915_WRITE(GEN6_RPNSWREQ, swreq);
}
void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
spin_lock_irq(&dev_priv->rps_lock);
dev_priv->pm_iir = 0;
spin_unlock_irq(&dev_priv->rps_lock);
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
int div = (vidfreq & 0x3f0000) >> 16;
int post = (vidfreq & 0x3000) >> 12;
int pre = (vidfreq & 0x7);
if (!pre)
return 0;
freq = ((div * 133333) / ((1<<post) * pre));
return freq;
}
void intel_init_emon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
u8 pxw[16];
int i;
/* Disable to program */
I915_WRITE(ECR, 0);
POSTING_READ(ECR);
/* Program energy weights for various events */
I915_WRITE(SDEW, 0x15040d00);
I915_WRITE(CSIEW0, 0x007f0000);
I915_WRITE(CSIEW1, 0x1e220004);
I915_WRITE(CSIEW2, 0x04000004);
for (i = 0; i < 5; i++)
I915_WRITE(PEW + (i * 4), 0);
for (i = 0; i < 3; i++)
I915_WRITE(DEW + (i * 4), 0);
/* Program P-state weights to account for frequency power adjustment */
for (i = 0; i < 16; i++) {
u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
unsigned long freq = intel_pxfreq(pxvidfreq);
unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
unsigned long val;
val = vid * vid;
val *= (freq / 1000);
val *= 255;
val /= (127*127*900);
if (val > 0xff)
DRM_ERROR("bad pxval: %ld\n", val);
pxw[i] = val;
}
/* Render standby states get 0 weight */
pxw[14] = 0;
pxw[15] = 0;
for (i = 0; i < 4; i++) {
u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
I915_WRITE(PXW + (i * 4), val);
}
/* Adjust magic regs to magic values (more experimental results) */
I915_WRITE(OGW0, 0);
I915_WRITE(OGW1, 0);
I915_WRITE(EG0, 0x00007f00);
I915_WRITE(EG1, 0x0000000e);
I915_WRITE(EG2, 0x000e0000);
I915_WRITE(EG3, 0x68000300);
I915_WRITE(EG4, 0x42000000);
I915_WRITE(EG5, 0x00140031);
I915_WRITE(EG6, 0);
I915_WRITE(EG7, 0);
for (i = 0; i < 8; i++)
I915_WRITE(PXWL + (i * 4), 0);
/* Enable PMON + select events */
I915_WRITE(ECR, 0x80000019);
lcfuse = I915_READ(LCFUSE02);
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0;
int cur_freq, min_freq, max_freq;
int i;
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
*
* Perhaps there might be some value in exposing these to
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
mutex_lock(&dev_priv->dev->struct_mutex);
gen6_gt_force_wake_get(dev_priv);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for (i = 0; i < I915_NUM_RINGS; i++)
I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
if (i915_enable_rc6)
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
I915_WRITE(GEN6_RPNSWREQ,
GEN6_FREQUENCY(10) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN6_FREQUENCY(12));
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
18 << 24 |
6 << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_USE_NORMAL_FREQ |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_DATA, 0);
I915_WRITE(GEN6_PCODE_MAILBOX,
GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
min_freq = (rp_state_cap & 0xff0000) >> 16;
max_freq = rp_state_cap & 0xff;
cur_freq = (gt_perf_status & 0xff00) >> 8;
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
pcu_mbox = I915_READ(GEN6_PCODE_DATA);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
max_freq = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
/* In units of 100MHz */
dev_priv->max_delay = max_freq;
dev_priv->min_delay = min_freq;
dev_priv->cur_delay = cur_freq;
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER,
GEN6_PM_MBOX_EVENT |
GEN6_PM_THERMAL_EVENT |
GEN6_PM_RP_DOWN_TIMEOUT |
GEN6_PM_RP_UP_THRESHOLD |
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_UP_EI_EXPIRED |
GEN6_PM_RP_DOWN_EI_EXPIRED);
spin_lock_irq(&dev_priv->rps_lock);
WARN_ON(dev_priv->pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev_priv->dev->struct_mutex);
}
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
/* Required for FBC */
dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
DPFCRUNIT_CLOCK_GATE_DISABLE |
DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
* The bit 22/21 of 0x42004
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
I915_WRITE(ILK_DSPCLK_GATE,
(I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE));
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
/*
* Based on the document from hardware guys the following bits
* should be set unconditionally in order to enable FBC.
* The bit 22 of 0x42000
* The bit 22 of 0x42004
* The bit 7,8,9 of 0x42020.
*/
if (IS_IRONLAKE_M(dev)) {
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPFC_DIS1 |
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
}
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
* some amount of runtime in the Mesa "fire" demo, and Unigine
* Sanctuary and Tropics, and apparently anything else with
* alpha test or pixel discard.
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:
* The bit21 and bit22 of 0x42000
* The bit21 and bit22 of 0x42004
* The bit5 and bit7 of 0x42020
* The bit14 of 0x70180
* The bit14 of 0x71180
*/
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
for_each_pipe(pipe)
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
}
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
for_each_pipe(pipe)
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
}
static void g4x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
GS_UNIT_CLOCK_GATE_DISABLE |
CL_UNIT_CLOCK_GATE_DISABLE);
I915_WRITE(RAMCLK_GATE_D, 0);
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
}
static void crestline_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
I915_WRITE(RAMCLK_GATE_D, 0);
I915_WRITE16(DEUC, 0);
}
static void broadwater_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
}
static void gen3_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dstate = I915_READ(D_STATE);
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
}
static void i85x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
}
static void i830_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
}
static void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx) {
i915_gem_object_unpin(dev_priv->renderctx);
drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
}
if (dev_priv->pwrctx) {
i915_gem_object_unpin(dev_priv->pwrctx);
drm_gem_object_unreference(&dev_priv->pwrctx->base);
dev_priv->pwrctx = NULL;
}
}
static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (I915_READ(PWRCTXA)) {
/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
50);
I915_WRITE(PWRCTXA, 0);
POSTING_READ(PWRCTXA);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
POSTING_READ(RSTDBYCTL);
}
ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (!dev_priv->renderctx)
return -ENOMEM;
if (dev_priv->pwrctx == NULL)
dev_priv->pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
return 0;
}
void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
if (!i915_enable_rc6)
return;
mutex_lock(&dev->struct_mutex);
ret = ironlake_setup_rc6(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return;
}
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
ret = BEGIN_LP_RING(6);
if (ret) {
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
return;
}
OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
OUT_RING(MI_SET_CONTEXT);
OUT_RING(dev_priv->renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
OUT_RING(MI_SUSPEND_FLUSH);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
/*
* Wait for the command parser to advance past MI_SET_CONTEXT. The HW
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
ret = intel_wait_ring_idle(LP_RING(dev_priv));
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
return;
}
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
mutex_unlock(&dev->struct_mutex);
}
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->display.init_clock_gating(dev);
if (dev_priv->display.init_pch_clock_gating)
dev_priv->display.init_pch_clock_gating(dev);
}
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
}
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
} else if (IS_CRESTLINE(dev)) {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
}
/* 855GM needs testing */
}
/* Returns the core display clock speed */
if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
else if (IS_I865G(dev))
dev_priv->display.get_display_clock_speed =
i865_get_display_clock_speed;
else if (IS_I85X(dev))
dev_priv->display.get_display_clock_speed =
i855_get_display_clock_speed;
else /* 852, 830 */
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
else {
DRM_DEBUG_KMS("Failed to get proper latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
DRM_INFO("failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
(dev_priv->is_ddr3 == 1) ? "3": "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev);
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
dev_priv->display.update_wm = g4x_update_wm;
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
} else if (IS_GEN4(dev)) {
dev_priv->display.update_wm = i965_update_wm;
if (IS_CRESTLINE(dev))
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
else if (IS_BROADWATER(dev))
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
} else if (IS_GEN3(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_I865G(dev)) {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
dev_priv->display.get_fifo_size = i830_get_fifo_size;
} else if (IS_I85X(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i85x_get_fifo_size;
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
} else {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i830_init_clock_gating;
if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
/* Default just returns -ENODEV to indicate unsupported */
dev_priv->display.queue_flip = intel_default_queue_flip;
switch (INTEL_INFO(dev)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
break;
case 3:
dev_priv->display.queue_flip = intel_gen3_queue_flip;
break;
case 4:
case 5:
dev_priv->display.queue_flip = intel_gen4_queue_flip;
break;
case 6:
dev_priv->display.queue_flip = intel_gen6_queue_flip;
break;
case 7:
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
}
}
/*
* Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
* resume, or other times. This quirk makes sure that's the case for
* affected systems.
*/
static void quirk_pipea_force (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
}
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
static void quirk_ssc_force_disable(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
}
struct intel_quirk {
int device;
int subsystem_vendor;
int subsystem_device;
void (*hook)(struct drm_device *dev);
};
struct intel_quirk intel_quirks[] = {
/* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
{ 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae,0x103c, 0x361a, quirk_pipea_force },
/* Thinkpad R31 needs pipe A force quirk */
{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
{ 0x3577, 0x1014, 0x0513, quirk_pipea_force },
/* ThinkPad X40 needs pipe A force quirk */
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
};
static void intel_init_quirks(struct drm_device *dev)
{
struct pci_dev *d = dev->pdev;
int i;
for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
struct intel_quirk *q = &intel_quirks[i];
if (d->device == q->device &&
(d->subsystem_vendor == q->subsystem_vendor ||
q->subsystem_vendor == PCI_ANY_ID) &&
(d->subsystem_device == q->subsystem_device ||
q->subsystem_device == PCI_ANY_ID))
q->hook(dev);
}
}
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
u32 vga_reg;
if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(1, VGA_SR_INDEX);
sr1 = inb(VGA_SR_DATA);
outb(sr1 | 1<<5, VGA_SR_DATA);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.funcs = (void *)&intel_mode_funcs;
intel_init_quirks(dev);
intel_init_display(dev);
if (IS_GEN2(dev)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
} else if (IS_GEN3(dev)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
dev->mode_config.fb_base = dev->agp->base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
}
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
intel_init_clock_gating(dev);
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
if (IS_GEN6(dev) || IS_GEN7(dev))
gen6_enable_rps(dev_priv);
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
{
if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
intel_setup_overlay(dev);
}
void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
drm_kms_helper_poll_fini(dev);
mutex_lock(&dev->struct_mutex);
intel_unregister_dsm_handler();
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
intel_crtc = to_intel_crtc(crtc);
intel_increase_pllclock(crtc);
}
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
if (IS_GEN6(dev) || IS_GEN7(dev))
gen6_disable_rps(dev);
if (IS_IRONLAKE_M(dev))
ironlake_disable_rc6(dev);
mutex_unlock(&dev->struct_mutex);
/* Disable the irq before mode object teardown, for the irq might
* enqueue unpin/hotplug work. */
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
/* Shut off idle work before the crtcs get freed. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
intel_crtc = to_intel_crtc(crtc);
del_timer_sync(&intel_crtc->idle_timer);
}
del_timer_sync(&dev_priv->idle_timer);
cancel_work_sync(&dev_priv->idle_work);
drm_mode_config_cleanup(dev);
}
/*
* Return which encoder is currently attached for connector.
*/
struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
{
return &intel_attached_encoder(connector)->base;
}
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder)
{
connector->encoder = encoder;
drm_mode_connector_attach_encoder(&connector->base,
&encoder->base);
}
/*
* set vga decode state - true == enable VGA decode
*/
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 gmch_ctrl;
pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
return 0;
}
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
struct intel_display_error_state {
struct intel_cursor_error_state {
u32 control;
u32 position;
u32 base;
u32 size;
} cursor[2];
struct intel_pipe_error_state {
u32 conf;
u32 source;
u32 htotal;
u32 hblank;
u32 hsync;
u32 vtotal;
u32 vblank;
u32 vsync;
} pipe[2];
struct intel_plane_error_state {
u32 control;
u32 stride;
u32 size;
u32 pos;
u32 addr;
u32 surface;
u32 tile_offset;
} plane[2];
};
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int i;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
for (i = 0; i < 2; i++) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
error->plane[i].size = I915_READ(DSPSIZE(i));
error->plane[i].pos= I915_READ(DSPPOS(i));
error->plane[i].addr = I915_READ(DSPADDR(i));
if (INTEL_INFO(dev)->gen >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
error->pipe[i].conf = I915_READ(PIPECONF(i));
error->pipe[i].source = I915_READ(PIPESRC(i));
error->pipe[i].htotal = I915_READ(HTOTAL(i));
error->pipe[i].hblank = I915_READ(HBLANK(i));
error->pipe[i].hsync = I915_READ(HSYNC(i));
error->pipe[i].vtotal = I915_READ(VTOTAL(i));
error->pipe[i].vblank = I915_READ(VBLANK(i));
error->pipe[i].vsync = I915_READ(VSYNC(i));
}
return error;
}
void
intel_display_print_error_state(struct seq_file *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
int i;
for (i = 0; i < 2; i++) {
seq_printf(m, "Pipe [%d]:\n", i);
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
seq_printf(m, "Plane [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
seq_printf(m, " POS: %08x\n", error->plane[i].pos);
seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
if (INTEL_INFO(dev)->gen >= 4) {
seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
seq_printf(m, "Cursor [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
seq_printf(m, " POS: %08x\n", error->cursor[i].position);
seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
}
#endif
| gpl-2.0 |
sandeshghimire/xlnx-3.17 | drivers/rtc/rtc-s3c.c | 198 | 16936 | /* drivers/rtc/rtc-s3c.c
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Copyright (c) 2004,2006 Simtec Electronics
* Ben Dooks, <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* S3C2410/S3C2440/S3C24XX Internal RTC Driver
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/clk.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/irq.h>
#include "rtc-s3c.h"
enum s3c_cpu_type {
TYPE_S3C2410,
TYPE_S3C2416,
TYPE_S3C2443,
TYPE_S3C64XX,
};
struct s3c_rtc_drv_data {
int cpu_type;
};
/* I have yet to find an S3C implementation with more than one
* of these rtc blocks in */
static struct clk *rtc_clk;
static void __iomem *s3c_rtc_base;
static int s3c_rtc_alarmno;
static int s3c_rtc_tickno;
static enum s3c_cpu_type s3c_rtc_cpu_type;
static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
static void s3c_rtc_alarm_clk_enable(bool enable)
{
static DEFINE_SPINLOCK(s3c_rtc_alarm_clk_lock);
static bool alarm_clk_enabled;
unsigned long irq_flags;
spin_lock_irqsave(&s3c_rtc_alarm_clk_lock, irq_flags);
if (enable) {
if (!alarm_clk_enabled) {
clk_enable(rtc_clk);
alarm_clk_enabled = true;
}
} else {
if (alarm_clk_enabled) {
clk_disable(rtc_clk);
alarm_clk_enabled = false;
}
}
spin_unlock_irqrestore(&s3c_rtc_alarm_clk_lock, irq_flags);
}
/* IRQ Handlers */
static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
{
struct rtc_device *rdev = id;
clk_enable(rtc_clk);
rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF);
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP);
clk_disable(rtc_clk);
s3c_rtc_alarm_clk_enable(false);
return IRQ_HANDLED;
}
static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
{
struct rtc_device *rdev = id;
clk_enable(rtc_clk);
rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF);
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
writeb(S3C2410_INTP_TIC, s3c_rtc_base + S3C2410_INTP);
clk_disable(rtc_clk);
return IRQ_HANDLED;
}
/* Update control registers */
static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
{
unsigned int tmp;
dev_dbg(dev, "%s: aie=%d\n", __func__, enabled);
clk_enable(rtc_clk);
tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
if (enabled)
tmp |= S3C2410_RTCALM_ALMEN;
writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
clk_disable(rtc_clk);
s3c_rtc_alarm_clk_enable(enabled);
return 0;
}
static int s3c_rtc_setfreq(struct device *dev, int freq)
{
struct platform_device *pdev = to_platform_device(dev);
struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
unsigned int tmp = 0;
int val;
if (!is_power_of_2(freq))
return -EINVAL;
clk_enable(rtc_clk);
spin_lock_irq(&s3c_rtc_pie_lock);
if (s3c_rtc_cpu_type != TYPE_S3C64XX) {
tmp = readb(s3c_rtc_base + S3C2410_TICNT);
tmp &= S3C2410_TICNT_ENABLE;
}
val = (rtc_dev->max_user_freq / freq) - 1;
if (s3c_rtc_cpu_type == TYPE_S3C2416 || s3c_rtc_cpu_type == TYPE_S3C2443) {
tmp |= S3C2443_TICNT_PART(val);
writel(S3C2443_TICNT1_PART(val), s3c_rtc_base + S3C2443_TICNT1);
if (s3c_rtc_cpu_type == TYPE_S3C2416)
writel(S3C2416_TICNT2_PART(val), s3c_rtc_base + S3C2416_TICNT2);
} else {
tmp |= val;
}
writel(tmp, s3c_rtc_base + S3C2410_TICNT);
spin_unlock_irq(&s3c_rtc_pie_lock);
clk_disable(rtc_clk);
return 0;
}
/* Time read/write */
static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
unsigned int have_retried = 0;
void __iomem *base = s3c_rtc_base;
clk_enable(rtc_clk);
retry_get_time:
rtc_tm->tm_min = readb(base + S3C2410_RTCMIN);
rtc_tm->tm_hour = readb(base + S3C2410_RTCHOUR);
rtc_tm->tm_mday = readb(base + S3C2410_RTCDATE);
rtc_tm->tm_mon = readb(base + S3C2410_RTCMON);
rtc_tm->tm_year = readb(base + S3C2410_RTCYEAR);
rtc_tm->tm_sec = readb(base + S3C2410_RTCSEC);
/* the only way to work out whether the system was mid-update
* when we read it is to check the second counter, and if it
* is zero, then we re-try the entire read
*/
if (rtc_tm->tm_sec == 0 && !have_retried) {
have_retried = 1;
goto retry_get_time;
}
rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
rtc_tm->tm_year += 100;
dev_dbg(dev, "read time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
rtc_tm->tm_mon -= 1;
clk_disable(rtc_clk);
return rtc_valid_tm(rtc_tm);
}
static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
{
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
dev_dbg(dev, "set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
/* we get around y2k by simply not supporting it */
if (year < 0 || year >= 100) {
dev_err(dev, "rtc only supports 100 years\n");
return -EINVAL;
}
clk_enable(rtc_clk);
writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
writeb(bin2bcd(tm->tm_mday), base + S3C2410_RTCDATE);
writeb(bin2bcd(tm->tm_mon + 1), base + S3C2410_RTCMON);
writeb(bin2bcd(year), base + S3C2410_RTCYEAR);
clk_disable(rtc_clk);
return 0;
}
static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *alm_tm = &alrm->time;
void __iomem *base = s3c_rtc_base;
unsigned int alm_en;
clk_enable(rtc_clk);
alm_tm->tm_sec = readb(base + S3C2410_ALMSEC);
alm_tm->tm_min = readb(base + S3C2410_ALMMIN);
alm_tm->tm_hour = readb(base + S3C2410_ALMHOUR);
alm_tm->tm_mon = readb(base + S3C2410_ALMMON);
alm_tm->tm_mday = readb(base + S3C2410_ALMDATE);
alm_tm->tm_year = readb(base + S3C2410_ALMYEAR);
alm_en = readb(base + S3C2410_RTCALM);
alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0;
dev_dbg(dev, "read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alm_en,
1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
/* decode the alarm enable field */
if (alm_en & S3C2410_RTCALM_SECEN)
alm_tm->tm_sec = bcd2bin(alm_tm->tm_sec);
else
alm_tm->tm_sec = -1;
if (alm_en & S3C2410_RTCALM_MINEN)
alm_tm->tm_min = bcd2bin(alm_tm->tm_min);
else
alm_tm->tm_min = -1;
if (alm_en & S3C2410_RTCALM_HOUREN)
alm_tm->tm_hour = bcd2bin(alm_tm->tm_hour);
else
alm_tm->tm_hour = -1;
if (alm_en & S3C2410_RTCALM_DAYEN)
alm_tm->tm_mday = bcd2bin(alm_tm->tm_mday);
else
alm_tm->tm_mday = -1;
if (alm_en & S3C2410_RTCALM_MONEN) {
alm_tm->tm_mon = bcd2bin(alm_tm->tm_mon);
alm_tm->tm_mon -= 1;
} else {
alm_tm->tm_mon = -1;
}
if (alm_en & S3C2410_RTCALM_YEAREN)
alm_tm->tm_year = bcd2bin(alm_tm->tm_year);
else
alm_tm->tm_year = -1;
clk_disable(rtc_clk);
return 0;
}
static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct rtc_time *tm = &alrm->time;
void __iomem *base = s3c_rtc_base;
unsigned int alrm_en;
clk_enable(rtc_clk);
dev_dbg(dev, "s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n",
alrm->enabled,
1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
alrm_en = readb(base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN;
writeb(0x00, base + S3C2410_RTCALM);
if (tm->tm_sec < 60 && tm->tm_sec >= 0) {
alrm_en |= S3C2410_RTCALM_SECEN;
writeb(bin2bcd(tm->tm_sec), base + S3C2410_ALMSEC);
}
if (tm->tm_min < 60 && tm->tm_min >= 0) {
alrm_en |= S3C2410_RTCALM_MINEN;
writeb(bin2bcd(tm->tm_min), base + S3C2410_ALMMIN);
}
if (tm->tm_hour < 24 && tm->tm_hour >= 0) {
alrm_en |= S3C2410_RTCALM_HOUREN;
writeb(bin2bcd(tm->tm_hour), base + S3C2410_ALMHOUR);
}
dev_dbg(dev, "setting S3C2410_RTCALM to %08x\n", alrm_en);
writeb(alrm_en, base + S3C2410_RTCALM);
s3c_rtc_setaie(dev, alrm->enabled);
clk_disable(rtc_clk);
return 0;
}
static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
{
unsigned int ticnt;
clk_enable(rtc_clk);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
ticnt = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt &= S3C64XX_RTCCON_TICEN;
} else {
ticnt = readb(s3c_rtc_base + S3C2410_TICNT);
ticnt &= S3C2410_TICNT_ENABLE;
}
seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt ? "yes" : "no");
clk_disable(rtc_clk);
return 0;
}
static const struct rtc_class_ops s3c_rtcops = {
.read_time = s3c_rtc_gettime,
.set_time = s3c_rtc_settime,
.read_alarm = s3c_rtc_getalarm,
.set_alarm = s3c_rtc_setalarm,
.proc = s3c_rtc_proc,
.alarm_irq_enable = s3c_rtc_setaie,
};
static void s3c_rtc_enable(struct platform_device *pdev, int en)
{
void __iomem *base = s3c_rtc_base;
unsigned int tmp;
if (s3c_rtc_base == NULL)
return;
clk_enable(rtc_clk);
if (!en) {
tmp = readw(base + S3C2410_RTCCON);
if (s3c_rtc_cpu_type == TYPE_S3C64XX)
tmp &= ~S3C64XX_RTCCON_TICEN;
tmp &= ~S3C2410_RTCCON_RTCEN;
writew(tmp, base + S3C2410_RTCCON);
if (s3c_rtc_cpu_type != TYPE_S3C64XX) {
tmp = readb(base + S3C2410_TICNT);
tmp &= ~S3C2410_TICNT_ENABLE;
writeb(tmp, base + S3C2410_TICNT);
}
} else {
/* re-enable the device, and check it is ok */
if ((readw(base+S3C2410_RTCCON) & S3C2410_RTCCON_RTCEN) == 0) {
dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
tmp = readw(base + S3C2410_RTCCON);
writew(tmp | S3C2410_RTCCON_RTCEN,
base + S3C2410_RTCCON);
}
if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CNTSEL)) {
dev_info(&pdev->dev, "removing RTCCON_CNTSEL\n");
tmp = readw(base + S3C2410_RTCCON);
writew(tmp & ~S3C2410_RTCCON_CNTSEL,
base + S3C2410_RTCCON);
}
if ((readw(base + S3C2410_RTCCON) & S3C2410_RTCCON_CLKRST)) {
dev_info(&pdev->dev, "removing RTCCON_CLKRST\n");
tmp = readw(base + S3C2410_RTCCON);
writew(tmp & ~S3C2410_RTCCON_CLKRST,
base + S3C2410_RTCCON);
}
}
clk_disable(rtc_clk);
}
static int s3c_rtc_remove(struct platform_device *dev)
{
s3c_rtc_setaie(&dev->dev, 0);
clk_unprepare(rtc_clk);
rtc_clk = NULL;
return 0;
}
static const struct of_device_id s3c_rtc_dt_match[];
static inline int s3c_rtc_get_driver_data(struct platform_device *pdev)
{
#ifdef CONFIG_OF
struct s3c_rtc_drv_data *data;
if (pdev->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node);
data = (struct s3c_rtc_drv_data *) match->data;
return data->cpu_type;
}
#endif
return platform_get_device_id(pdev)->driver_data;
}
static int s3c_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
struct rtc_time rtc_tm;
struct resource *res;
int ret;
int tmp;
dev_dbg(&pdev->dev, "%s: probe=%p\n", __func__, pdev);
/* find the IRQs */
s3c_rtc_tickno = platform_get_irq(pdev, 1);
if (s3c_rtc_tickno < 0) {
dev_err(&pdev->dev, "no irq for rtc tick\n");
return s3c_rtc_tickno;
}
s3c_rtc_alarmno = platform_get_irq(pdev, 0);
if (s3c_rtc_alarmno < 0) {
dev_err(&pdev->dev, "no irq for alarm\n");
return s3c_rtc_alarmno;
}
dev_dbg(&pdev->dev, "s3c2410_rtc: tick irq %d, alarm irq %d\n",
s3c_rtc_tickno, s3c_rtc_alarmno);
/* get the memory region */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(s3c_rtc_base))
return PTR_ERR(s3c_rtc_base);
rtc_clk = devm_clk_get(&pdev->dev, "rtc");
if (IS_ERR(rtc_clk)) {
dev_err(&pdev->dev, "failed to find rtc clock source\n");
ret = PTR_ERR(rtc_clk);
rtc_clk = NULL;
return ret;
}
clk_prepare_enable(rtc_clk);
/* check to see if everything is setup correctly */
s3c_rtc_enable(pdev, 1);
dev_dbg(&pdev->dev, "s3c2410_rtc: RTCCON=%02x\n",
readw(s3c_rtc_base + S3C2410_RTCCON));
device_init_wakeup(&pdev->dev, 1);
/* register RTC and exit */
rtc = devm_rtc_device_register(&pdev->dev, "s3c", &s3c_rtcops,
THIS_MODULE);
if (IS_ERR(rtc)) {
dev_err(&pdev->dev, "cannot attach rtc\n");
ret = PTR_ERR(rtc);
goto err_nortc;
}
s3c_rtc_cpu_type = s3c_rtc_get_driver_data(pdev);
/* Check RTC Time */
s3c_rtc_gettime(NULL, &rtc_tm);
if (rtc_valid_tm(&rtc_tm)) {
rtc_tm.tm_year = 100;
rtc_tm.tm_mon = 0;
rtc_tm.tm_mday = 1;
rtc_tm.tm_hour = 0;
rtc_tm.tm_min = 0;
rtc_tm.tm_sec = 0;
s3c_rtc_settime(NULL, &rtc_tm);
dev_warn(&pdev->dev, "warning: invalid RTC value so initializing it\n");
}
if (s3c_rtc_cpu_type != TYPE_S3C2410)
rtc->max_user_freq = 32768;
else
rtc->max_user_freq = 128;
if (s3c_rtc_cpu_type == TYPE_S3C2416 || s3c_rtc_cpu_type == TYPE_S3C2443) {
tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
tmp |= S3C2443_RTCCON_TICSEL;
writew(tmp, s3c_rtc_base + S3C2410_RTCCON);
}
platform_set_drvdata(pdev, rtc);
s3c_rtc_setfreq(&pdev->dev, 1);
ret = devm_request_irq(&pdev->dev, s3c_rtc_alarmno, s3c_rtc_alarmirq,
0, "s3c2410-rtc alarm", rtc);
if (ret) {
dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_alarmno, ret);
goto err_nortc;
}
ret = devm_request_irq(&pdev->dev, s3c_rtc_tickno, s3c_rtc_tickirq,
0, "s3c2410-rtc tick", rtc);
if (ret) {
dev_err(&pdev->dev, "IRQ%d error %d\n", s3c_rtc_tickno, ret);
goto err_nortc;
}
clk_disable(rtc_clk);
return 0;
err_nortc:
s3c_rtc_enable(pdev, 0);
clk_disable_unprepare(rtc_clk);
return ret;
}
#ifdef CONFIG_PM_SLEEP
/* RTC Power management control */
static int ticnt_save, ticnt_en_save;
static bool wake_en;
static int s3c_rtc_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
clk_enable(rtc_clk);
/* save TICNT for anyone using periodic interrupts */
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT);
} else {
ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
}
s3c_rtc_enable(pdev, 0);
if (device_may_wakeup(dev) && !wake_en) {
if (enable_irq_wake(s3c_rtc_alarmno) == 0)
wake_en = true;
else
dev_err(dev, "enable_irq_wake failed\n");
}
clk_disable(rtc_clk);
return 0;
}
static int s3c_rtc_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
unsigned int tmp;
clk_enable(rtc_clk);
s3c_rtc_enable(pdev, 1);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
if (ticnt_en_save) {
tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
writew(tmp | ticnt_en_save,
s3c_rtc_base + S3C2410_RTCCON);
}
} else {
writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
}
if (device_may_wakeup(dev) && wake_en) {
disable_irq_wake(s3c_rtc_alarmno);
wake_en = false;
}
clk_disable(rtc_clk);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(s3c_rtc_pm_ops, s3c_rtc_suspend, s3c_rtc_resume);
#ifdef CONFIG_OF
static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
[TYPE_S3C2410] = { TYPE_S3C2410 },
[TYPE_S3C2416] = { TYPE_S3C2416 },
[TYPE_S3C2443] = { TYPE_S3C2443 },
[TYPE_S3C64XX] = { TYPE_S3C64XX },
};
static const struct of_device_id s3c_rtc_dt_match[] = {
{
.compatible = "samsung,s3c2410-rtc",
.data = &s3c_rtc_drv_data_array[TYPE_S3C2410],
}, {
.compatible = "samsung,s3c2416-rtc",
.data = &s3c_rtc_drv_data_array[TYPE_S3C2416],
}, {
.compatible = "samsung,s3c2443-rtc",
.data = &s3c_rtc_drv_data_array[TYPE_S3C2443],
}, {
.compatible = "samsung,s3c6410-rtc",
.data = &s3c_rtc_drv_data_array[TYPE_S3C64XX],
},
{},
};
MODULE_DEVICE_TABLE(of, s3c_rtc_dt_match);
#endif
static struct platform_device_id s3c_rtc_driver_ids[] = {
{
.name = "s3c2410-rtc",
.driver_data = TYPE_S3C2410,
}, {
.name = "s3c2416-rtc",
.driver_data = TYPE_S3C2416,
}, {
.name = "s3c2443-rtc",
.driver_data = TYPE_S3C2443,
}, {
.name = "s3c64xx-rtc",
.driver_data = TYPE_S3C64XX,
},
{ }
};
MODULE_DEVICE_TABLE(platform, s3c_rtc_driver_ids);
static struct platform_driver s3c_rtc_driver = {
.probe = s3c_rtc_probe,
.remove = s3c_rtc_remove,
.id_table = s3c_rtc_driver_ids,
.driver = {
.name = "s3c-rtc",
.owner = THIS_MODULE,
.pm = &s3c_rtc_pm_ops,
.of_match_table = of_match_ptr(s3c_rtc_dt_match),
},
};
module_platform_driver(s3c_rtc_driver);
MODULE_DESCRIPTION("Samsung S3C RTC Driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:s3c2410-rtc");
| gpl-2.0 |
PhenomX1998/android_kernel_oneplus_msm8996 | drivers/gpu/drm/radeon/radeon_cs.c | 198 | 23823 | /*
* Copyright 2008 Jerome Glisse.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/list_sort.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_trace.h"
#define RADEON_CS_MAX_PRIORITY 32u
#define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
/* This is based on the bucket sort with O(n) time complexity.
* An item with priority "i" is added to bucket[i]. The lists are then
* concatenated in descending order.
*/
struct radeon_cs_buckets {
struct list_head bucket[RADEON_CS_NUM_BUCKETS];
};
static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
{
unsigned i;
for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
INIT_LIST_HEAD(&b->bucket[i]);
}
static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
struct list_head *item, unsigned priority)
{
/* Since buffers which appear sooner in the relocation list are
* likely to be used more often than buffers which appear later
* in the list, the sort mustn't change the ordering of buffers
* with the same priority, i.e. it must be stable.
*/
list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
}
static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
struct list_head *out_list)
{
unsigned i;
/* Connect the sorted buckets in the output list. */
for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
list_splice(&b->bucket[i], out_list);
}
}
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
struct drm_device *ddev = p->rdev->ddev;
struct radeon_cs_chunk *chunk;
struct radeon_cs_buckets buckets;
unsigned i, j;
bool duplicate, need_mmap_lock = false;
int r;
if (p->chunk_relocs_idx == -1) {
return 0;
}
chunk = &p->chunks[p->chunk_relocs_idx];
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
if (p->relocs_ptr == NULL) {
return -ENOMEM;
}
p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
radeon_cs_buckets_init(&buckets);
for (i = 0; i < p->nrelocs; i++) {
struct drm_radeon_cs_reloc *r;
unsigned priority;
duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
for (j = 0; j < i; j++) {
if (r->handle == p->relocs[j].handle) {
p->relocs_ptr[i] = &p->relocs[j];
duplicate = true;
break;
}
}
if (duplicate) {
p->relocs[i].handle = 0;
continue;
}
p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
r->handle);
if (p->relocs[i].gobj == NULL) {
DRM_ERROR("gem object lookup failed 0x%x\n",
r->handle);
return -ENOENT;
}
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
/* The userspace buffer priorities are from 0 to 15. A higher
* number means the buffer is more important.
* Also, the buffers used for write have a higher priority than
* the buffers used for read only, which doubles the range
* to 0 to 31. 32 is reserved for the kernel driver.
*/
priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
+ !!r->write_domain;
/* the first reloc of an UVD job is the msg and that must be in
VRAM, also but everything into VRAM on AGP cards and older
IGP chips to avoid image corruptions */
if (p->ring == R600_RING_TYPE_UVD_INDEX &&
(i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
p->rdev->family == CHIP_RS780 ||
p->rdev->family == CHIP_RS880)) {
/* TODO: is this still needed for NI+ ? */
p->relocs[i].prefered_domains =
RADEON_GEM_DOMAIN_VRAM;
p->relocs[i].allowed_domains =
RADEON_GEM_DOMAIN_VRAM;
/* prioritize this over any other relocation */
priority = RADEON_CS_MAX_PRIORITY;
} else {
uint32_t domain = r->write_domain ?
r->write_domain : r->read_domains;
if (domain & RADEON_GEM_DOMAIN_CPU) {
DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
"for command submission\n");
return -EINVAL;
}
p->relocs[i].prefered_domains = domain;
if (domain == RADEON_GEM_DOMAIN_VRAM)
domain |= RADEON_GEM_DOMAIN_GTT;
p->relocs[i].allowed_domains = domain;
}
if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
uint32_t domain = p->relocs[i].prefered_domains;
if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
"allowed for userptr BOs\n");
return -EINVAL;
}
need_mmap_lock = true;
domain = RADEON_GEM_DOMAIN_GTT;
p->relocs[i].prefered_domains = domain;
p->relocs[i].allowed_domains = domain;
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority);
}
radeon_cs_buckets_get_list(&buckets, &p->validated);
if (p->cs_flags & RADEON_CS_USE_VM)
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
&p->validated);
if (need_mmap_lock)
down_read(¤t->mm->mmap_sem);
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
if (need_mmap_lock)
up_read(¤t->mm->mmap_sem);
return r;
}
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
{
p->priority = priority;
switch (ring) {
default:
DRM_ERROR("unknown ring id: %d\n", ring);
return -EINVAL;
case RADEON_CS_RING_GFX:
p->ring = RADEON_RING_TYPE_GFX_INDEX;
break;
case RADEON_CS_RING_COMPUTE:
if (p->rdev->family >= CHIP_TAHITI) {
if (p->priority > 0)
p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
else
p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
} else
p->ring = RADEON_RING_TYPE_GFX_INDEX;
break;
case RADEON_CS_RING_DMA:
if (p->rdev->family >= CHIP_CAYMAN) {
if (p->priority > 0)
p->ring = R600_RING_TYPE_DMA_INDEX;
else
p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
} else if (p->rdev->family >= CHIP_RV770) {
p->ring = R600_RING_TYPE_DMA_INDEX;
} else {
return -EINVAL;
}
break;
case RADEON_CS_RING_UVD:
p->ring = R600_RING_TYPE_UVD_INDEX;
break;
case RADEON_CS_RING_VCE:
/* TODO: only use the low priority ring for now */
p->ring = TN_RING_TYPE_VCE1_INDEX;
break;
}
return 0;
}
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
struct radeon_cs_reloc *reloc;
int r;
list_for_each_entry(reloc, &p->validated, tv.head) {
struct reservation_object *resv;
resv = reloc->robj->tbo.resv;
r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
reloc->tv.shared);
if (r)
return r;
}
return 0;
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
unsigned size, i;
u32 ring = RADEON_CS_RING_GFX;
s32 priority = 0;
INIT_LIST_HEAD(&p->validated);
if (!cs->num_chunks) {
return 0;
}
/* get chunks */
p->idx = 0;
p->ib.sa_bo = NULL;
p->ib.semaphore = NULL;
p->const_ib.sa_bo = NULL;
p->const_ib.semaphore = NULL;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
p->chunk_flags_idx = -1;
p->chunk_const_ib_idx = -1;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
}
chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
if (copy_from_user(p->chunks_array, chunk_array_ptr,
sizeof(uint64_t)*cs->num_chunks)) {
return -EFAULT;
}
p->cs_flags = 0;
p->nchunks = cs->num_chunks;
p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
if (p->chunks == NULL) {
return -ENOMEM;
}
for (i = 0; i < p->nchunks; i++) {
struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
struct drm_radeon_cs_chunk user_chunk;
uint32_t __user *cdata;
chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_radeon_cs_chunk))) {
return -EFAULT;
}
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].chunk_id = user_chunk.chunk_id;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
p->chunk_ib_idx = i;
/* zero length IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
p->chunk_const_ib_idx = i;
/* zero length CONST IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
p->chunk_flags_idx = i;
/* zero length flags aren't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
p->chunks[i].user_ptr = cdata;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
continue;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
continue;
}
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
size *= sizeof(uint32_t);
if (p->chunks[i].kdata == NULL) {
return -ENOMEM;
}
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
return -EFAULT;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
p->cs_flags = p->chunks[i].kdata[0];
if (p->chunks[i].length_dw > 1)
ring = p->chunks[i].kdata[1];
if (p->chunks[i].length_dw > 2)
priority = (s32)p->chunks[i].kdata[2];
}
}
/* these are KMS only */
if (p->rdev) {
if ((p->cs_flags & RADEON_CS_USE_VM) &&
!p->rdev->vm_manager.enabled) {
DRM_ERROR("VM not active on asic!\n");
return -EINVAL;
}
if (radeon_cs_get_ring(p, ring, priority))
return -EINVAL;
/* we only support VM on some SI+ rings */
if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
DRM_ERROR("Ring %d requires VM!\n", p->ring);
return -EINVAL;
}
} else {
if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
DRM_ERROR("VM not supported on ring %d!\n",
p->ring);
return -EINVAL;
}
}
}
return 0;
}
static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct list_head *b)
{
struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
/* Sort A before B if A is smaller. */
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
}
/**
* cs_parser_fini() - clean parser states
* @parser: parser structure holding parsing context.
* @error: error number
*
* If error is set than unvalidate buffer, otherwise just free memory
* used by parsing context.
**/
static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
{
unsigned i;
if (!error) {
/* Sort the buffer list from the smallest to largest buffer,
* which affects the order of buffers in the LRU list.
* This assures that the smallest buffers are added first
* to the LRU list, so they are likely to be later evicted
* first, instead of large buffers whose eviction is more
* expensive.
*
* This slightly lowers the number of bytes moved by TTM
* per frame under memory pressure.
*/
list_sort(NULL, &parser->validated, cmp_size_smaller_first);
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
&parser->ib.fence->base);
} else if (backoff) {
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj)
drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
}
}
kfree(parser->track);
kfree(parser->relocs);
kfree(parser->relocs_ptr);
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
kfree(parser->chunks_array);
radeon_ib_free(parser->rdev, &parser->ib);
radeon_ib_free(parser->rdev, &parser->const_ib);
}
static int radeon_cs_ib_chunk(struct radeon_device *rdev,
struct radeon_cs_parser *parser)
{
int r;
if (parser->chunk_ib_idx == -1)
return 0;
if (parser->cs_flags & RADEON_CS_USE_VM)
return 0;
r = radeon_cs_parse(rdev, parser->ring, parser);
if (r || parser->parser_error) {
DRM_ERROR("Invalid command stream !\n");
return r;
}
r = radeon_cs_sync_rings(parser);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to sync rings: %i\n", r);
return r;
}
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
(parser->ring == TN_RING_TYPE_VCE2_INDEX))
radeon_vce_note_usage(rdev);
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
if (r) {
DRM_ERROR("Failed to schedule IB !\n");
}
return r;
}
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
struct radeon_device *rdev = p->rdev;
struct radeon_bo_va *bo_va;
int i, r;
r = radeon_vm_update_page_directory(rdev, vm);
if (r)
return r;
r = radeon_vm_clear_freed(rdev, vm);
if (r)
return r;
if (vm->ib_bo_va == NULL) {
DRM_ERROR("Tmp BO not in VM!\n");
return -EINVAL;
}
r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
&rdev->ring_tmp_bo.bo->tbo.mem);
if (r)
return r;
for (i = 0; i < p->nrelocs; i++) {
struct radeon_bo *bo;
/* ignore duplicates */
if (p->relocs_ptr[i] != &p->relocs[i])
continue;
bo = p->relocs[i].robj;
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
}
return radeon_vm_clear_invalids(rdev, vm);
}
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
struct radeon_cs_parser *parser)
{
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
struct radeon_vm *vm = &fpriv->vm;
int r;
if (parser->chunk_ib_idx == -1)
return 0;
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
if (parser->const_ib.length_dw) {
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
if (r) {
return r;
}
}
r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
if (r) {
return r;
}
if (parser->ring == R600_RING_TYPE_UVD_INDEX)
radeon_uvd_note_usage(rdev);
mutex_lock(&vm->mutex);
r = radeon_bo_vm_update_pte(parser, vm);
if (r) {
goto out;
}
r = radeon_cs_sync_rings(parser);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to sync rings: %i\n", r);
goto out;
}
radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
} else {
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
}
out:
mutex_unlock(&vm->mutex);
return r;
}
static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
{
if (r == -EDEADLK) {
r = radeon_gpu_reset(rdev);
if (!r)
r = -EAGAIN;
}
return r;
}
static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
{
struct radeon_cs_chunk *ib_chunk;
struct radeon_vm *vm = NULL;
int r;
if (parser->chunk_ib_idx == -1)
return 0;
if (parser->cs_flags & RADEON_CS_USE_VM) {
struct radeon_fpriv *fpriv = parser->filp->driver_priv;
vm = &fpriv->vm;
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
}
r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
vm, ib_chunk->length_dw * 4);
if (r) {
DRM_ERROR("Failed to get const ib !\n");
return r;
}
parser->const_ib.is_const_ib = true;
parser->const_ib.length_dw = ib_chunk->length_dw;
if (copy_from_user(parser->const_ib.ptr,
ib_chunk->user_ptr,
ib_chunk->length_dw * 4))
return -EFAULT;
}
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
}
}
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
vm, ib_chunk->length_dw * 4);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
}
parser->ib.length_dw = ib_chunk->length_dw;
if (ib_chunk->kdata)
memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
return -EFAULT;
return 0;
}
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_cs_parser parser;
int r;
down_read(&rdev->exclusive_lock);
if (!rdev->accel_working) {
up_read(&rdev->exclusive_lock);
return -EBUSY;
}
if (rdev->in_reset) {
up_read(&rdev->exclusive_lock);
r = radeon_gpu_reset(rdev);
if (!r)
r = -EAGAIN;
return r;
}
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
parser.rdev = rdev;
parser.dev = rdev->dev;
parser.family = rdev->family;
r = radeon_cs_parser_init(&parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
radeon_cs_parser_fini(&parser, r, false);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
r = radeon_cs_ib_fill(rdev, &parser);
if (!r) {
r = radeon_cs_parser_relocs(&parser);
if (r && r != -ERESTARTSYS)
DRM_ERROR("Failed to parse relocation %d!\n", r);
}
if (r) {
radeon_cs_parser_fini(&parser, r, false);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
trace_radeon_cs(&parser);
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
}
r = radeon_cs_ib_vm_chunk(rdev, &parser);
if (r) {
goto out;
}
out:
radeon_cs_parser_fini(&parser, r, true);
up_read(&rdev->exclusive_lock);
r = radeon_cs_handle_lockup(rdev, r);
return r;
}
/**
* radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
* @pkt: where to store packet information
*
* Assume that chunk_ib_index is properly set. Will return -EINVAL
* if packet is bigger than remaining ib size. or if packets is unknown.
**/
int radeon_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx)
{
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
struct radeon_device *rdev = p->rdev;
uint32_t header;
if (idx >= ib_chunk->length_dw) {
DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
idx, ib_chunk->length_dw);
return -EINVAL;
}
header = radeon_get_ib_value(p, idx);
pkt->idx = idx;
pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
pkt->one_reg_wr = 0;
switch (pkt->type) {
case RADEON_PACKET_TYPE0:
if (rdev->family < CHIP_R600) {
pkt->reg = R100_CP_PACKET0_GET_REG(header);
pkt->one_reg_wr =
RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
} else
pkt->reg = R600_CP_PACKET0_GET_REG(header);
break;
case RADEON_PACKET_TYPE3:
pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
break;
case RADEON_PACKET_TYPE2:
pkt->count = -1;
break;
default:
DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
return -EINVAL;
}
if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
return -EINVAL;
}
return 0;
}
/**
* radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
* @p: structure holding the parser context.
*
* Check if the next packet is NOP relocation packet3.
**/
bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
{
struct radeon_cs_packet p3reloc;
int r;
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
if (r)
return false;
if (p3reloc.type != RADEON_PACKET_TYPE3)
return false;
if (p3reloc.opcode != RADEON_PACKET3_NOP)
return false;
return true;
}
/**
* radeon_cs_dump_packet() - dump raw packet context
* @p: structure holding the parser context.
* @pkt: structure holding the packet.
*
* Used mostly for debugging and error reporting.
**/
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
volatile uint32_t *ib;
unsigned i;
unsigned idx;
ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++)
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
}
/**
* radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
* @parser: parser structure holding parsing context.
* @data: pointer to relocation data
* @offset_start: starting offset
* @offset_mask: offset mask (to align start offset on)
* @reloc: reloc informations
*
* Check if next packet is relocation packet3, do bo validation and compute
* GPU offset using the provided start.
**/
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc,
int nomm)
{
struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_packet p3reloc;
unsigned idx;
int r;
if (p->chunk_relocs_idx == -1) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
*cs_reloc = NULL;
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
if (r)
return r;
p->idx += p3reloc.count + 2;
if (p3reloc.type != RADEON_PACKET_TYPE3 ||
p3reloc.opcode != RADEON_PACKET3_NOP) {
DRM_ERROR("No packet3 for relocation for packet at %d.\n",
p3reloc.idx);
radeon_cs_dump_packet(p, &p3reloc);
return -EINVAL;
}
idx = radeon_get_ib_value(p, p3reloc.idx + 1);
if (idx >= relocs_chunk->length_dw) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, relocs_chunk->length_dw);
radeon_cs_dump_packet(p, &p3reloc);
return -EINVAL;
}
/* FIXME: we assume reloc size is 4 dwords */
if (nomm) {
*cs_reloc = p->relocs;
(*cs_reloc)->gpu_offset =
(u64)relocs_chunk->kdata[idx + 3] << 32;
(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
} else
*cs_reloc = p->relocs_ptr[(idx / 4)];
return 0;
}
| gpl-2.0 |
civato/Note8.0-StormBorn | drivers/mfd/max77686-irq.c | 454 | 9624 | /*
* max77686-irq.c - Interrupt controller support for MAX77686
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Chiwoong Byun <woong.byun@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This driver is based on max8997-irq.c
*/
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/mfd/max77686.h>
#include <linux/mfd/max77686-private.h>
#undef MAX77686_IRQ_TEST
enum {
MAX77686_DEBUG_IRQ_INFO = 1 << 0,
MAX77686_DEBUG_IRQ_MASK = 1 << 1,
MAX77686_DEBUG_IRQ_INT = 1 << 2,
};
static int debug_mask = MAX77686_DEBUG_IRQ_INFO | MAX77686_DEBUG_IRQ_MASK |
MAX77686_DEBUG_IRQ_INT;
static const u8 max77686_mask_reg[] = {
[PMIC_INT1] = MAX77686_REG_INT1MSK,
[PMIC_INT2] = MAX77686_REG_INT2MSK,
[RTC_INT] = MAX77686_RTC_INTM,
};
static struct i2c_client *max77686_get_i2c(struct max77686_dev *max77686,
enum max77686_irq_source src)
{
switch (src) {
case PMIC_INT1 ... PMIC_INT2:
return max77686->i2c;
case RTC_INT:
return max77686->rtc;
default:
return ERR_PTR(-EINVAL);
}
}
struct max77686_irq_data {
int mask;
enum max77686_irq_source group;
};
#define DECLARE_IRQ(idx, _group, _mask) \
[(idx)] = { .group = (_group), .mask = (_mask) }
static const struct max77686_irq_data max77686_irqs[] = {
DECLARE_IRQ(MAX77686_PMICIRQ_PWRONF, PMIC_INT1, 1 << 0),
DECLARE_IRQ(MAX77686_PMICIRQ_PWRONR, PMIC_INT1, 1 << 1),
DECLARE_IRQ(MAX77686_PMICIRQ_JIGONBF, PMIC_INT1, 1 << 2),
DECLARE_IRQ(MAX77686_PMICIRQ_JIGONBR, PMIC_INT1, 1 << 3),
DECLARE_IRQ(MAX77686_PMICIRQ_ACOKBF, PMIC_INT1, 1 << 4),
DECLARE_IRQ(MAX77686_PMICIRQ_ACOKBR, PMIC_INT1, 1 << 5),
DECLARE_IRQ(MAX77686_PMICIRQ_ONKEY1S, PMIC_INT1, 1 << 6),
DECLARE_IRQ(MAX77686_PMICIRQ_MRSTB, PMIC_INT1, 1 << 7),
DECLARE_IRQ(MAX77686_PMICIRQ_140C, PMIC_INT2, 1 << 0),
DECLARE_IRQ(MAX77686_PMICIRQ_120C, PMIC_INT2, 1 << 1),
DECLARE_IRQ(MAX77686_RTCIRQ_RTC60S, RTC_INT, 1 << 0),
DECLARE_IRQ(MAX77686_RTCIRQ_RTCA1, RTC_INT, 1 << 1),
DECLARE_IRQ(MAX77686_RTCIRQ_RTCA2, RTC_INT, 1 << 2),
DECLARE_IRQ(MAX77686_RTCIRQ_SMPL, RTC_INT, 1 << 3),
DECLARE_IRQ(MAX77686_RTCIRQ_RTC1S, RTC_INT, 1 << 4),
DECLARE_IRQ(MAX77686_RTCIRQ_WTSR, RTC_INT, 1 << 5),
};
static void max77686_irq_lock(struct irq_data *data)
{
struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
pr_debug("%s\n", __func__);
mutex_lock(&max77686->irqlock);
}
static void max77686_irq_sync_unlock(struct irq_data *data)
{
struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
int i;
for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++) {
u8 mask_reg = max77686_mask_reg[i];
struct i2c_client *i2c = max77686_get_i2c(max77686, i);
if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
pr_debug("%s: mask_reg[%d]=0x%x, cur=0x%x\n",
__func__, i, mask_reg, max77686->irq_masks_cur[i]);
if (mask_reg == MAX77686_REG_INVALID ||
IS_ERR_OR_NULL(i2c))
continue;
max77686->irq_masks_cache[i] = max77686->irq_masks_cur[i];
max77686_write_reg(i2c, max77686_mask_reg[i],
max77686->irq_masks_cur[i]);
}
mutex_unlock(&max77686->irqlock);
}
static const inline struct max77686_irq_data *
irq_to_max77686_irq(struct max77686_dev *max77686, int irq)
{
return &max77686_irqs[irq - max77686->irq_base];
}
static void max77686_irq_mask(struct irq_data *data)
{
struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
const struct max77686_irq_data *irq_data = irq_to_max77686_irq(max77686,
data->irq);
max77686->irq_masks_cur[irq_data->group] |= irq_data->mask;
if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
pr_info("%s: group=%d, cur=0x%x\n",
__func__, irq_data->group,
max77686->irq_masks_cur[irq_data->group]);
}
static void max77686_irq_unmask(struct irq_data *data)
{
struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
const struct max77686_irq_data *irq_data = irq_to_max77686_irq(max77686,
data->irq);
max77686->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
pr_info("%s: group=%d, cur=0x%x\n",
__func__, irq_data->group,
max77686->irq_masks_cur[irq_data->group]);
}
static struct irq_chip max77686_irq_chip = {
.name = "max77686",
.irq_bus_lock = max77686_irq_lock,
.irq_bus_sync_unlock = max77686_irq_sync_unlock,
.irq_mask = max77686_irq_mask,
.irq_unmask = max77686_irq_unmask,
};
static irqreturn_t max77686_irq_thread(int irq, void *data)
{
struct max77686_dev *max77686 = data;
u8 irq_reg[MAX77686_IRQ_GROUP_NR] = {};
u8 irq_src;
int ret;
int i;
ret = max77686_read_reg(max77686->i2c, MAX77686_REG_INTSRC, &irq_src);
if (ret < 0) {
dev_err(max77686->dev, "Failed to read interrupt source: %d\n",
ret);
return IRQ_NONE;
}
if (debug_mask & MAX77686_DEBUG_IRQ_INT)
pr_info("%s: irq_src=0x%x\n", __func__, irq_src);
/* MAX77686_IRQSRC_RTC may be set even if there are pending at INT1/2 */
ret = max77686_read_reg(max77686->i2c, MAX77686_REG_INT1, &irq_reg[0]);
ret = max77686_read_reg(max77686->i2c, MAX77686_REG_INT2, &irq_reg[1]);
if (ret < 0) {
dev_err(max77686->dev, "Failed to read pmic interrupt: %d\n",
ret);
return IRQ_NONE;
}
if (debug_mask & MAX77686_DEBUG_IRQ_INT)
pr_info("%s: int1=0x%x, int2=0x%x\n",
__func__, irq_reg[PMIC_INT1], irq_reg[PMIC_INT2]);
if (irq_src & MAX77686_IRQSRC_RTC) {
#ifdef CONFIG_RTC_DRV_MAX77686
ret = max77686_read_reg(max77686->rtc, MAX77686_RTC_INT, &irq_reg[RTC_INT]);
#else
ret = -ENODEV;
#endif
if (ret < 0) {
dev_err(max77686->dev, "Failed to read rtc interrupt: %d\n",
ret);
return IRQ_NONE;
}
if (debug_mask & MAX77686_DEBUG_IRQ_INT)
pr_info("%s: rtc int=0x%x\n", __func__, irq_reg[RTC_INT]);
}
for (i = 0; i < MAX77686_IRQ_NR; i++) {
if (irq_reg[max77686_irqs[i].group] & max77686_irqs[i].mask)
handle_nested_irq(max77686->irq_base + i);
}
return IRQ_HANDLED;
}
int max77686_irq_resume(struct max77686_dev *max77686)
{
if (max77686->irq && max77686->irq_base)
max77686_irq_thread(max77686->irq_base, max77686);
return 0;
}
int max77686_irq_init(struct max77686_dev *max77686)
{
int i;
int cur_irq;
int ret;
int val;
#ifdef MAX77686_IRQ_TEST
u8 irq_reg[6] = { };
u8 irq_src;
#endif
if (debug_mask & MAX77686_DEBUG_IRQ_INFO)
pr_info("%s+\n", __func__);
if (!max77686->irq_gpio) {
dev_warn(max77686->dev, "No interrupt gpio specified.\n");
max77686->irq_base = 0;
return 0;
}
if (!max77686->irq_base) {
dev_err(max77686->dev, "No interrupt base specified.\n");
return 0;
}
mutex_init(&max77686->irqlock);
max77686->irq = gpio_to_irq(max77686->irq_gpio);
ret = gpio_request(max77686->irq_gpio, "pmic_irq");
if (ret < 0 && ret != -EBUSY) {
dev_err(max77686->dev,
"Failed to request gpio %d with ret: %d\n",
max77686->irq_gpio, ret);
return IRQ_NONE;
}
if (ret == -EBUSY)
dev_warn(max77686->dev, "gpio pmic_irq is already requested\n");
gpio_direction_input(max77686->irq_gpio);
val = gpio_get_value(max77686->irq_gpio);
gpio_free(max77686->irq_gpio);
if (debug_mask & MAX77686_DEBUG_IRQ_INT)
pr_info("%s: gpio_irq=%x\n", __func__, val);
#ifdef MAX77686_IRQ_TEST
ret = max77686_read_reg(max77686->i2c, MAX77686_REG_INTSRC, &irq_src);
if (ret < 0) {
dev_err(max77686->dev, "Failed to read interrupt source: %d\n",
ret);
return IRQ_NONE;
}
pr_info("%s: irq_src=0x%x\n", __func__, irq_src);
ret = max77686_bulk_read(max77686->i2c, MAX77686_REG_INT1, 6, irq_reg);
if (ret < 0) {
dev_err(max77686->dev, "Failed to read interrupt source: %d\n",
ret);
return IRQ_NONE;
}
for (i = 0; i < 6; i++)
pr_info("%s: i[%d]=0x%x\n", __func__, i, irq_reg[i]);
#endif /* MAX77686_IRQ_TEST */
/* Mask individual interrupt sources */
for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++) {
struct i2c_client *i2c;
max77686->irq_masks_cur[i] = 0xff;
max77686->irq_masks_cache[i] = 0xff;
i2c = max77686_get_i2c(max77686, i);
if (IS_ERR_OR_NULL(i2c))
continue;
if (max77686_mask_reg[i] == MAX77686_REG_INVALID)
continue;
max77686_write_reg(i2c, max77686_mask_reg[i], 0xff);
}
/* Register with genirq */
for (i = 0; i < MAX77686_IRQ_NR; i++) {
cur_irq = i + max77686->irq_base;
irq_set_chip_data(cur_irq, max77686);
irq_set_chip_and_handler(cur_irq, &max77686_irq_chip,
handle_edge_irq);
irq_set_nested_thread(cur_irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(cur_irq, IRQF_VALID);
#else
irq_set_noprobe(cur_irq);
#endif
}
ret = request_threaded_irq(max77686->irq, NULL, max77686_irq_thread,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"max77686-irq", max77686);
if (ret) {
dev_err(max77686->dev, "Failed to request IRQ %d: %d\n",
max77686->irq, ret);
return ret;
}
if (debug_mask & MAX77686_DEBUG_IRQ_INFO)
pr_info("%s-\n", __func__);
return 0;
}
void max77686_irq_exit(struct max77686_dev *max77686)
{
if (max77686->irq)
free_irq(max77686->irq, max77686);
}
| gpl-2.0 |
Krabappel2548/u8500_kernel_sources | arch/x86/mm/gup.c | 966 | 9740 | /*
* Lockless get_user_pages_fast for x86
*
* Copyright (C) 2008 Nick Piggin
* Copyright (C) 2008 Novell Inc.
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/highmem.h>
#include <asm/pgtable.h>
static inline pte_t gup_get_pte(pte_t *ptep)
{
#ifndef CONFIG_X86_PAE
return ACCESS_ONCE(*ptep);
#else
/*
* With get_user_pages_fast, we walk down the pagetables without taking
* any locks. For this we would like to load the pointers atomically,
* but that is not possible (without expensive cmpxchg8b) on PAE. What
* we do have is the guarantee that a pte will only either go from not
* present to present, or present to not present or both -- it will not
* switch to a completely different present page without a TLB flush in
* between; something that we are blocking by holding interrupts off.
*
* Setting ptes from not present to present goes:
* ptep->pte_high = h;
* smp_wmb();
* ptep->pte_low = l;
*
* And present to not present goes:
* ptep->pte_low = 0;
* smp_wmb();
* ptep->pte_high = 0;
*
* We must ensure here that the load of pte_low sees l iff pte_high
* sees h. We load pte_high *after* loading pte_low, which ensures we
* don't see an older value of pte_high. *Then* we recheck pte_low,
* which ensures that we haven't picked up a changed pte high. We might
* have got rubbish values from pte_low and pte_high, but we are
* guaranteed that pte_low will not have the present bit set *unless*
* it is 'l'. And get_user_pages_fast only operates on present ptes, so
* we're safe.
*
* gup_get_pte should not be used or copied outside gup.c without being
* very careful -- it does not atomically load the pte or anything that
* is likely to be useful for you.
*/
pte_t pte;
retry:
pte.pte_low = ptep->pte_low;
smp_rmb();
pte.pte_high = ptep->pte_high;
smp_rmb();
if (unlikely(pte.pte_low != ptep->pte_low))
goto retry;
return pte;
#endif
}
/*
* The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much
* register pressure.
*/
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask;
pte_t *ptep;
mask = _PAGE_PRESENT|_PAGE_USER;
if (write)
mask |= _PAGE_RW;
ptep = pte_offset_map(&pmd, addr);
do {
pte_t pte = gup_get_pte(ptep);
struct page *page;
if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
pte_unmap(ptep);
return 0;
}
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte);
get_page(page);
pages[*nr] = page;
(*nr)++;
} while (ptep++, addr += PAGE_SIZE, addr != end);
pte_unmap(ptep - 1);
return 1;
}
static inline void get_head_page_multiple(struct page *page, int nr)
{
VM_BUG_ON(page != compound_head(page));
VM_BUG_ON(page_count(page) == 0);
atomic_add(nr, &page->_count);
}
static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask;
pte_t pte = *(pte_t *)&pmd;
struct page *head, *page;
int refs;
mask = _PAGE_PRESENT|_PAGE_USER;
if (write)
mask |= _PAGE_RW;
if ((pte_flags(pte) & mask) != mask)
return 0;
/* hugepages are never "special" */
VM_BUG_ON(pte_flags(pte) & _PAGE_SPECIAL);
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
refs = 0;
head = pte_page(pte);
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
get_head_page_multiple(head, refs);
return 1;
}
static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pmd_t *pmdp;
pmdp = pmd_offset(&pud, addr);
do {
pmd_t pmd = *pmdp;
next = pmd_addr_end(addr, end);
if (pmd_none(pmd))
return 0;
if (unlikely(pmd_large(pmd))) {
if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
return 0;
} else {
if (!gup_pte_range(pmd, addr, next, write, pages, nr))
return 0;
}
} while (pmdp++, addr = next, addr != end);
return 1;
}
static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
unsigned long mask;
pte_t pte = *(pte_t *)&pud;
struct page *head, *page;
int refs;
mask = _PAGE_PRESENT|_PAGE_USER;
if (write)
mask |= _PAGE_RW;
if ((pte_flags(pte) & mask) != mask)
return 0;
/* hugepages are never "special" */
VM_BUG_ON(pte_flags(pte) & _PAGE_SPECIAL);
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
refs = 0;
head = pte_page(pte);
page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
(*nr)++;
page++;
refs++;
} while (addr += PAGE_SIZE, addr != end);
get_head_page_multiple(head, refs);
return 1;
}
static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
int write, struct page **pages, int *nr)
{
unsigned long next;
pud_t *pudp;
pudp = pud_offset(&pgd, addr);
do {
pud_t pud = *pudp;
next = pud_addr_end(addr, end);
if (pud_none(pud))
return 0;
if (unlikely(pud_large(pud))) {
if (!gup_huge_pud(pud, addr, next, write, pages, nr))
return 0;
} else {
if (!gup_pmd_range(pud, addr, next, write, pages, nr))
return 0;
}
} while (pudp++, addr = next, addr != end);
return 1;
}
/*
* Like get_user_pages_fast() except its IRQ-safe in that it won't fall
* back to the regular GUP.
*/
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
unsigned long flags;
pgd_t *pgdp;
int nr = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len)))
return 0;
/*
* XXX: batch / limit 'nr', to avoid large irq off latency
* needs some instrumenting to determine the common sizes used by
* important workloads (eg. DB2), and whether limiting the batch size
* will decrease performance.
*
* It seems like we're in the clear for the moment. Direct-IO is
* the main guy that batches up lots of get_user_pages, and even
* they are limited to 64-at-a-time which is not so many.
*/
/*
* This doesn't prevent pagetable teardown, but does prevent
* the pagetables and pages from being freed on x86.
*
* So long as we atomically load page table pointers versus teardown
* (which we do on x86, with the above PAE exception), we can follow the
* address down to the the page and take a ref on it.
*/
local_irq_save(flags);
pgdp = pgd_offset(mm, addr);
do {
pgd_t pgd = *pgdp;
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
break;
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
break;
} while (pgdp++, addr = next, addr != end);
local_irq_restore(flags);
return nr;
}
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
* Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
* calling get_user_pages().
*
* Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages
* were pinned, returns -errno.
*/
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
unsigned long next;
pgd_t *pgdp;
int nr = 0;
start &= PAGE_MASK;
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (end < start)
goto slow_irqon;
#ifdef CONFIG_X86_64
if (end >> __VIRTUAL_MASK_SHIFT)
goto slow_irqon;
#endif
/*
* XXX: batch / limit 'nr', to avoid large irq off latency
* needs some instrumenting to determine the common sizes used by
* important workloads (eg. DB2), and whether limiting the batch size
* will decrease performance.
*
* It seems like we're in the clear for the moment. Direct-IO is
* the main guy that batches up lots of get_user_pages, and even
* they are limited to 64-at-a-time which is not so many.
*/
/*
* This doesn't prevent pagetable teardown, but does prevent
* the pagetables and pages from being freed on x86.
*
* So long as we atomically load page table pointers versus teardown
* (which we do on x86, with the above PAE exception), we can follow the
* address down to the the page and take a ref on it.
*/
local_irq_disable();
pgdp = pgd_offset(mm, addr);
do {
pgd_t pgd = *pgdp;
next = pgd_addr_end(addr, end);
if (pgd_none(pgd))
goto slow;
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
goto slow;
} while (pgdp++, addr = next, addr != end);
local_irq_enable();
VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
return nr;
{
int ret;
slow:
local_irq_enable();
slow_irqon:
/* Try to get the remaining pages with get_user_pages */
start += nr << PAGE_SHIFT;
pages += nr;
down_read(&mm->mmap_sem);
ret = get_user_pages(current, mm, start,
(end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
up_read(&mm->mmap_sem);
/* Have to be a bit careful with return values */
if (nr > 0) {
if (ret < 0)
ret = nr;
else
ret += nr;
}
return ret;
}
}
| gpl-2.0 |
simone201/neak-gs3-jb2 | fs/xfs/linux-2.6/xfs_super.c | 1734 | 46635 | /*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_alloc.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_btree_trace.h"
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
#include "xfs_fsops.h"
#include "xfs_attr.h"
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_vnodeops.h"
#include "xfs_log_priv.h"
#include "xfs_trans_priv.h"
#include "xfs_filestream.h"
#include "xfs_da_btree.h"
#include "xfs_extfree_item.h"
#include "xfs_mru_cache.h"
#include "xfs_inode_item.h"
#include "xfs_sync.h"
#include "xfs_trace.h"
#include <linux/namei.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mount.h>
#include <linux/mempool.h>
#include <linux/writeback.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/parser.h>
static const struct super_operations xfs_super_operations;
static kmem_zone_t *xfs_ioend_zone;
mempool_t *xfs_ioend_pool;
#define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */
#define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */
#define MNTOPT_LOGDEV "logdev" /* log device */
#define MNTOPT_RTDEV "rtdev" /* realtime I/O device */
#define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */
#define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */
#define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */
#define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */
#define MNTOPT_SUNIT "sunit" /* data volume stripe unit */
#define MNTOPT_SWIDTH "swidth" /* data volume stripe width */
#define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */
#define MNTOPT_MTPT "mtpt" /* filesystem mount point */
#define MNTOPT_GRPID "grpid" /* group-ID from parent directory */
#define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */
#define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */
#define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */
#define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */
#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
#define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and
* unwritten extent conversion */
#define MNTOPT_NOBARRIER "nobarrier" /* .. disable */
#define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */
#define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */
#define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */
#define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */
#define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes
* in stat(). */
#define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */
#define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */
#define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */
#define MNTOPT_QUOTA "quota" /* disk quotas (user) */
#define MNTOPT_NOQUOTA "noquota" /* no quotas */
#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
#define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */
#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
#define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */
#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
#define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */
#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */
#define MNTOPT_DISCARD "discard" /* Discard unused blocks */
#define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */
/*
* Table driven mount option parser.
*
* Currently only used for remount, but it will be used for mount
* in the future, too.
*/
enum {
Opt_barrier, Opt_nobarrier, Opt_err
};
static const match_table_t tokens = {
{Opt_barrier, "barrier"},
{Opt_nobarrier, "nobarrier"},
{Opt_err, NULL}
};
STATIC unsigned long
suffix_strtoul(char *s, char **endp, unsigned int base)
{
int last, shift_left_factor = 0;
char *value = s;
last = strlen(value) - 1;
if (value[last] == 'K' || value[last] == 'k') {
shift_left_factor = 10;
value[last] = '\0';
}
if (value[last] == 'M' || value[last] == 'm') {
shift_left_factor = 20;
value[last] = '\0';
}
if (value[last] == 'G' || value[last] == 'g') {
shift_left_factor = 30;
value[last] = '\0';
}
return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
}
/*
* This function fills in xfs_mount_t fields based on mount args.
* Note: the superblock has _not_ yet been read in.
*
* Note that this function leaks the various device name allocations on
* failure. The caller takes care of them.
*/
STATIC int
xfs_parseargs(
struct xfs_mount *mp,
char *options)
{
struct super_block *sb = mp->m_super;
char *this_char, *value, *eov;
int dsunit = 0;
int dswidth = 0;
int iosize = 0;
__uint8_t iosizelog = 0;
/*
* set up the mount name first so all the errors will refer to the
* correct device.
*/
mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
if (!mp->m_fsname)
return ENOMEM;
mp->m_fsname_len = strlen(mp->m_fsname) + 1;
/*
* Copy binary VFS mount flags we are interested in.
*/
if (sb->s_flags & MS_RDONLY)
mp->m_flags |= XFS_MOUNT_RDONLY;
if (sb->s_flags & MS_DIRSYNC)
mp->m_flags |= XFS_MOUNT_DIRSYNC;
if (sb->s_flags & MS_SYNCHRONOUS)
mp->m_flags |= XFS_MOUNT_WSYNC;
/*
* Set some default flags that could be cleared by the mount option
* parsing.
*/
mp->m_flags |= XFS_MOUNT_BARRIER;
mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
mp->m_flags |= XFS_MOUNT_DELAYLOG;
/*
* These can be overridden by the mount option parsing.
*/
mp->m_logbufs = -1;
mp->m_logbsize = -1;
if (!options)
goto done;
while ((this_char = strsep(&options, ",")) != NULL) {
if (!*this_char)
continue;
if ((value = strchr(this_char, '=')) != NULL)
*value++ = 0;
if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
mp->m_logbufs = simple_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
mp->m_logbsize = suffix_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
if (!mp->m_logname)
return ENOMEM;
} else if (!strcmp(this_char, MNTOPT_MTPT)) {
xfs_warn(mp, "%s option not allowed on this system",
this_char);
return EINVAL;
} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
if (!mp->m_rtname)
return ENOMEM;
} else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
iosize = simple_strtoul(value, &eov, 10);
iosizelog = ffs(iosize) - 1;
} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
iosize = suffix_strtoul(value, &eov, 10);
iosizelog = ffs(iosize) - 1;
} else if (!strcmp(this_char, MNTOPT_GRPID) ||
!strcmp(this_char, MNTOPT_BSDGROUPS)) {
mp->m_flags |= XFS_MOUNT_GRPID;
} else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
!strcmp(this_char, MNTOPT_SYSVGROUPS)) {
mp->m_flags &= ~XFS_MOUNT_GRPID;
} else if (!strcmp(this_char, MNTOPT_WSYNC)) {
mp->m_flags |= XFS_MOUNT_WSYNC;
} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
mp->m_flags |= XFS_MOUNT_NORECOVERY;
} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
mp->m_flags |= XFS_MOUNT_NOALIGN;
} else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
mp->m_flags |= XFS_MOUNT_SWALLOC;
} else if (!strcmp(this_char, MNTOPT_SUNIT)) {
if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
dsunit = simple_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument",
this_char);
return EINVAL;
}
dswidth = simple_strtoul(value, &eov, 10);
} else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
#if !XFS_BIG_INUMS
xfs_warn(mp, "%s option not allowed on this system",
this_char);
return EINVAL;
#endif
} else if (!strcmp(this_char, MNTOPT_NOUUID)) {
mp->m_flags |= XFS_MOUNT_NOUUID;
} else if (!strcmp(this_char, MNTOPT_BARRIER)) {
mp->m_flags |= XFS_MOUNT_BARRIER;
} else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
mp->m_flags &= ~XFS_MOUNT_BARRIER;
} else if (!strcmp(this_char, MNTOPT_IKEEP)) {
mp->m_flags |= XFS_MOUNT_IKEEP;
} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
mp->m_flags &= ~XFS_MOUNT_IKEEP;
} else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
} else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
} else if (!strcmp(this_char, MNTOPT_ATTR2)) {
mp->m_flags |= XFS_MOUNT_ATTR2;
} else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
mp->m_flags &= ~XFS_MOUNT_ATTR2;
mp->m_flags |= XFS_MOUNT_NOATTR2;
} else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
mp->m_flags |= XFS_MOUNT_FILESTREAMS;
} else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD);
} else if (!strcmp(this_char, MNTOPT_QUOTA) ||
!strcmp(this_char, MNTOPT_UQUOTA) ||
!strcmp(this_char, MNTOPT_USRQUOTA)) {
mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
XFS_UQUOTA_ENFD);
} else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
!strcmp(this_char, MNTOPT_UQUOTANOENF)) {
mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
mp->m_qflags &= ~XFS_UQUOTA_ENFD;
} else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
!strcmp(this_char, MNTOPT_PRJQUOTA)) {
mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
XFS_OQUOTA_ENFD);
} else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
mp->m_qflags &= ~XFS_OQUOTA_ENFD;
} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
!strcmp(this_char, MNTOPT_GRPQUOTA)) {
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
XFS_OQUOTA_ENFD);
} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
mp->m_qflags &= ~XFS_OQUOTA_ENFD;
} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
mp->m_flags |= XFS_MOUNT_DELAYLOG;
} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
} else if (!strcmp(this_char, MNTOPT_DISCARD)) {
mp->m_flags |= XFS_MOUNT_DISCARD;
} else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
mp->m_flags &= ~XFS_MOUNT_DISCARD;
} else if (!strcmp(this_char, "ihashsize")) {
xfs_warn(mp,
"ihashsize no longer used, option is deprecated.");
} else if (!strcmp(this_char, "osyncisdsync")) {
xfs_warn(mp,
"osyncisdsync has no effect, option is deprecated.");
} else if (!strcmp(this_char, "osyncisosync")) {
xfs_warn(mp,
"osyncisosync has no effect, option is deprecated.");
} else if (!strcmp(this_char, "irixsgid")) {
xfs_warn(mp,
"irixsgid is now a sysctl(2) variable, option is deprecated.");
} else {
xfs_warn(mp, "unknown mount option [%s].", this_char);
return EINVAL;
}
}
/*
* no recovery flag requires a read-only mount
*/
if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
!(mp->m_flags & XFS_MOUNT_RDONLY)) {
xfs_warn(mp, "no-recovery mounts must be read-only.");
return EINVAL;
}
if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
xfs_warn(mp,
"sunit and swidth options incompatible with the noalign option");
return EINVAL;
}
if ((mp->m_flags & XFS_MOUNT_DISCARD) &&
!(mp->m_flags & XFS_MOUNT_DELAYLOG)) {
xfs_warn(mp,
"the discard option is incompatible with the nodelaylog option");
return EINVAL;
}
#ifndef CONFIG_XFS_QUOTA
if (XFS_IS_QUOTA_RUNNING(mp)) {
xfs_warn(mp, "quota support not available in this kernel.");
return EINVAL;
}
#endif
if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
(mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
xfs_warn(mp, "cannot mount with both project and group quota");
return EINVAL;
}
if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
xfs_warn(mp, "sunit and swidth must be specified together");
return EINVAL;
}
if (dsunit && (dswidth % dsunit != 0)) {
xfs_warn(mp,
"stripe width (%d) must be a multiple of the stripe unit (%d)",
dswidth, dsunit);
return EINVAL;
}
done:
if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
/*
* At this point the superblock has not been read
* in, therefore we do not know the block size.
* Before the mount call ends we will convert
* these to FSBs.
*/
if (dsunit) {
mp->m_dalign = dsunit;
mp->m_flags |= XFS_MOUNT_RETERR;
}
if (dswidth)
mp->m_swidth = dswidth;
}
if (mp->m_logbufs != -1 &&
mp->m_logbufs != 0 &&
(mp->m_logbufs < XLOG_MIN_ICLOGS ||
mp->m_logbufs > XLOG_MAX_ICLOGS)) {
xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
return XFS_ERROR(EINVAL);
}
if (mp->m_logbsize != -1 &&
mp->m_logbsize != 0 &&
(mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
!is_power_of_2(mp->m_logbsize))) {
xfs_warn(mp,
"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
mp->m_logbsize);
return XFS_ERROR(EINVAL);
}
if (iosizelog) {
if (iosizelog > XFS_MAX_IO_LOG ||
iosizelog < XFS_MIN_IO_LOG) {
xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
iosizelog, XFS_MIN_IO_LOG,
XFS_MAX_IO_LOG);
return XFS_ERROR(EINVAL);
}
mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
mp->m_readio_log = iosizelog;
mp->m_writeio_log = iosizelog;
}
return 0;
}
struct proc_xfs_info {
int flag;
char *str;
};
STATIC int
xfs_showargs(
struct xfs_mount *mp,
struct seq_file *m)
{
static struct proc_xfs_info xfs_info_set[] = {
/* the few simple ones we can get from the mount struct */
{ XFS_MOUNT_IKEEP, "," MNTOPT_IKEEP },
{ XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC },
{ XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN },
{ XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC },
{ XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID },
{ XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY },
{ XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 },
{ XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM },
{ XFS_MOUNT_GRPID, "," MNTOPT_GRPID },
{ XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG },
{ XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD },
{ 0, NULL }
};
static struct proc_xfs_info xfs_info_unset[] = {
/* the few simple ones we can get from the mount struct */
{ XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO },
{ XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER },
{ XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE },
{ 0, NULL }
};
struct proc_xfs_info *xfs_infop;
for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
if (mp->m_flags & xfs_infop->flag)
seq_puts(m, xfs_infop->str);
}
for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
if (!(mp->m_flags & xfs_infop->flag))
seq_puts(m, xfs_infop->str);
}
if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
(int)(1 << mp->m_writeio_log) >> 10);
if (mp->m_logbufs > 0)
seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
if (mp->m_logbsize > 0)
seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
if (mp->m_logname)
seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
if (mp->m_rtname)
seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
if (mp->m_dalign > 0)
seq_printf(m, "," MNTOPT_SUNIT "=%d",
(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
if (mp->m_swidth > 0)
seq_printf(m, "," MNTOPT_SWIDTH "=%d",
(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
seq_puts(m, "," MNTOPT_USRQUOTA);
else if (mp->m_qflags & XFS_UQUOTA_ACCT)
seq_puts(m, "," MNTOPT_UQUOTANOENF);
/* Either project or group quotas can be active, not both */
if (mp->m_qflags & XFS_PQUOTA_ACCT) {
if (mp->m_qflags & XFS_OQUOTA_ENFD)
seq_puts(m, "," MNTOPT_PRJQUOTA);
else
seq_puts(m, "," MNTOPT_PQUOTANOENF);
} else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
if (mp->m_qflags & XFS_OQUOTA_ENFD)
seq_puts(m, "," MNTOPT_GRPQUOTA);
else
seq_puts(m, "," MNTOPT_GQUOTANOENF);
}
if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
seq_puts(m, "," MNTOPT_NOQUOTA);
return 0;
}
__uint64_t
xfs_max_file_offset(
unsigned int blockshift)
{
unsigned int pagefactor = 1;
unsigned int bitshift = BITS_PER_LONG - 1;
/* Figure out maximum filesize, on Linux this can depend on
* the filesystem blocksize (on 32 bit platforms).
* __block_write_begin does this in an [unsigned] long...
* page->index << (PAGE_CACHE_SHIFT - bbits)
* So, for page sized blocks (4K on 32 bit platforms),
* this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
* (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
* but for smaller blocksizes it is less (bbits = log2 bsize).
* Note1: get_block_t takes a long (implicit cast from above)
* Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
* can optionally convert the [unsigned] long from above into
* an [unsigned] long long.
*/
#if BITS_PER_LONG == 32
# if defined(CONFIG_LBDAF)
ASSERT(sizeof(sector_t) == 8);
pagefactor = PAGE_CACHE_SIZE;
bitshift = BITS_PER_LONG;
# else
pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
# endif
#endif
return (((__uint64_t)pagefactor) << bitshift) - 1;
}
STATIC int
xfs_blkdev_get(
xfs_mount_t *mp,
const char *name,
struct block_device **bdevp)
{
int error = 0;
*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
mp);
if (IS_ERR(*bdevp)) {
error = PTR_ERR(*bdevp);
xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
}
return -error;
}
STATIC void
xfs_blkdev_put(
struct block_device *bdev)
{
if (bdev)
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
}
void
xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg)
{
blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
}
STATIC void
xfs_close_devices(
struct xfs_mount *mp)
{
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
xfs_free_buftarg(mp, mp->m_logdev_targp);
xfs_blkdev_put(logdev);
}
if (mp->m_rtdev_targp) {
struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
xfs_free_buftarg(mp, mp->m_rtdev_targp);
xfs_blkdev_put(rtdev);
}
xfs_free_buftarg(mp, mp->m_ddev_targp);
}
/*
* The file system configurations are:
* (1) device (partition) with data and internal log
* (2) logical volume with data and log subvolumes.
* (3) logical volume with data, log, and realtime subvolumes.
*
* We only have to handle opening the log and realtime volumes here if
* they are present. The data subvolume has already been opened by
* get_sb_bdev() and is stored in sb->s_bdev.
*/
STATIC int
xfs_open_devices(
struct xfs_mount *mp)
{
struct block_device *ddev = mp->m_super->s_bdev;
struct block_device *logdev = NULL, *rtdev = NULL;
int error;
/*
* Open real time and log devices - order is important.
*/
if (mp->m_logname) {
error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
if (error)
goto out;
}
if (mp->m_rtname) {
error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
if (error)
goto out_close_logdev;
if (rtdev == ddev || rtdev == logdev) {
xfs_warn(mp,
"Cannot mount filesystem with identical rtdev and ddev/logdev.");
error = EINVAL;
goto out_close_rtdev;
}
}
/*
* Setup xfs_mount buffer target pointers
*/
error = ENOMEM;
mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
if (!mp->m_ddev_targp)
goto out_close_rtdev;
if (rtdev) {
mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
mp->m_fsname);
if (!mp->m_rtdev_targp)
goto out_free_ddev_targ;
}
if (logdev && logdev != ddev) {
mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
mp->m_fsname);
if (!mp->m_logdev_targp)
goto out_free_rtdev_targ;
} else {
mp->m_logdev_targp = mp->m_ddev_targp;
}
return 0;
out_free_rtdev_targ:
if (mp->m_rtdev_targp)
xfs_free_buftarg(mp, mp->m_rtdev_targp);
out_free_ddev_targ:
xfs_free_buftarg(mp, mp->m_ddev_targp);
out_close_rtdev:
if (rtdev)
xfs_blkdev_put(rtdev);
out_close_logdev:
if (logdev && logdev != ddev)
xfs_blkdev_put(logdev);
out:
return error;
}
/*
* Setup xfs_mount buffer target pointers based on superblock
*/
STATIC int
xfs_setup_devices(
struct xfs_mount *mp)
{
int error;
error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
mp->m_sb.sb_sectsize);
if (error)
return error;
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
unsigned int log_sector_size = BBSIZE;
if (xfs_sb_version_hassector(&mp->m_sb))
log_sector_size = mp->m_sb.sb_logsectsize;
error = xfs_setsize_buftarg(mp->m_logdev_targp,
mp->m_sb.sb_blocksize,
log_sector_size);
if (error)
return error;
}
if (mp->m_rtdev_targp) {
error = xfs_setsize_buftarg(mp->m_rtdev_targp,
mp->m_sb.sb_blocksize,
mp->m_sb.sb_sectsize);
if (error)
return error;
}
return 0;
}
/* Catch misguided souls that try to use this interface on XFS */
STATIC struct inode *
xfs_fs_alloc_inode(
struct super_block *sb)
{
BUG();
return NULL;
}
/*
* Now that the generic code is guaranteed not to be accessing
* the linux inode, we can reclaim the inode.
*/
STATIC void
xfs_fs_destroy_inode(
struct inode *inode)
{
struct xfs_inode *ip = XFS_I(inode);
trace_xfs_destroy_inode(ip);
XFS_STATS_INC(vn_reclaim);
/* bad inode, get out here ASAP */
if (is_bad_inode(inode))
goto out_reclaim;
xfs_ioend_wait(ip);
ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
/*
* We should never get here with one of the reclaim flags already set.
*/
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
/*
* We always use background reclaim here because even if the
* inode is clean, it still may be under IO and hence we have
* to take the flush lock. The background reclaim path handles
* this more efficiently than we can here, so simply let background
* reclaim tear down all inodes.
*/
out_reclaim:
xfs_inode_set_reclaim_tag(ip);
}
/*
* Slab object creation initialisation for the XFS inode.
* This covers only the idempotent fields in the XFS inode;
* all other fields need to be initialised on allocation
* from the slab. This avoids the need to repeatedly initialise
* fields in the xfs inode that left in the initialise state
* when freeing the inode.
*/
STATIC void
xfs_fs_inode_init_once(
void *inode)
{
struct xfs_inode *ip = inode;
memset(ip, 0, sizeof(struct xfs_inode));
/* vfs inode */
inode_init_once(VFS_I(ip));
/* xfs inode */
atomic_set(&ip->i_iocount, 0);
atomic_set(&ip->i_pincount, 0);
spin_lock_init(&ip->i_flags_lock);
init_waitqueue_head(&ip->i_ipin_wait);
/*
* Because we want to use a counting completion, complete
* the flush completion once to allow a single access to
* the flush completion without blocking.
*/
init_completion(&ip->i_flush);
complete(&ip->i_flush);
mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
"xfsino", ip->i_ino);
}
/*
* Dirty the XFS inode when mark_inode_dirty_sync() is called so that
* we catch unlogged VFS level updates to the inode.
*
* We need the barrier() to maintain correct ordering between unlogged
* updates and the transaction commit code that clears the i_update_core
* field. This requires all updates to be completed before marking the
* inode dirty.
*/
STATIC void
xfs_fs_dirty_inode(
struct inode *inode,
int flags)
{
barrier();
XFS_I(inode)->i_update_core = 1;
}
STATIC int
xfs_fs_write_inode(
struct inode *inode,
struct writeback_control *wbc)
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
int error = EAGAIN;
trace_xfs_write_inode(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO);
if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
/*
* Make sure the inode has made it it into the log. Instead
* of forcing it all the way to stable storage using a
* synchronous transaction we let the log force inside the
* ->sync_fs call do that for thus, which reduces the number
* of synchronous log foces dramatically.
*/
xfs_ioend_wait(ip);
error = xfs_log_dirty_inode(ip, NULL, 0);
if (error)
goto out;
return 0;
} else {
if (!ip->i_update_core)
return 0;
/*
* We make this non-blocking if the inode is contended, return
* EAGAIN to indicate to the caller that they did not succeed.
* This prevents the flush path from blocking on inodes inside
* another operation right now, they get caught later by
* xfs_sync.
*/
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
goto out;
if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
goto out_unlock;
/*
* Now we have the flush lock and the inode is not pinned, we
* can check if the inode is really clean as we know that
* there are no pending transaction completions, it is not
* waiting on the delayed write queue and there is no IO in
* progress.
*/
if (xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
error = 0;
goto out_unlock;
}
error = xfs_iflush(ip, SYNC_TRYLOCK);
}
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
out:
/*
* if we failed to write out the inode then mark
* it dirty again so we'll try again later.
*/
if (error)
xfs_mark_inode_dirty_sync(ip);
return -error;
}
STATIC void
xfs_fs_evict_inode(
struct inode *inode)
{
xfs_inode_t *ip = XFS_I(inode);
trace_xfs_evict_inode(ip);
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
XFS_STATS_INC(vn_rele);
XFS_STATS_INC(vn_remove);
XFS_STATS_DEC(vn_active);
/*
* The iolock is used by the file system to coordinate reads,
* writes, and block truncates. Up to this point the lock
* protected concurrent accesses by users of the inode. But
* from here forward we're doing some final processing of the
* inode because we're done with it, and although we reuse the
* iolock for protection it is really a distinct lock class
* (in the lockdep sense) from before. To keep lockdep happy
* (and basically indicate what we are doing), we explicitly
* re-init the iolock here.
*/
ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
&xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
xfs_inactive(ip);
}
STATIC void
xfs_free_fsname(
struct xfs_mount *mp)
{
kfree(mp->m_fsname);
kfree(mp->m_rtname);
kfree(mp->m_logname);
}
STATIC void
xfs_fs_put_super(
struct super_block *sb)
{
struct xfs_mount *mp = XFS_M(sb);
/*
* Unregister the memory shrinker before we tear down the mount
* structure so we don't have memory reclaim racing with us here.
*/
xfs_inode_shrinker_unregister(mp);
xfs_syncd_stop(mp);
/*
* Blow away any referenced inode in the filestreams cache.
* This can and will cause log traffic as inodes go inactive
* here.
*/
xfs_filestream_unmount(mp);
XFS_bflush(mp->m_ddev_targp);
xfs_unmountfs(mp);
xfs_freesb(mp);
xfs_icsb_destroy_counters(mp);
xfs_close_devices(mp);
xfs_free_fsname(mp);
kfree(mp);
}
STATIC int
xfs_fs_sync_fs(
struct super_block *sb,
int wait)
{
struct xfs_mount *mp = XFS_M(sb);
int error;
/*
* Not much we can do for the first async pass. Writing out the
* superblock would be counter-productive as we are going to redirty
* when writing out other data and metadata (and writing out a single
* block is quite fast anyway).
*
* Try to asynchronously kick off quota syncing at least.
*/
if (!wait) {
xfs_qm_sync(mp, SYNC_TRYLOCK);
return 0;
}
error = xfs_quiesce_data(mp);
if (error)
return -error;
if (laptop_mode) {
/*
* The disk must be active because we're syncing.
* We schedule xfssyncd now (now that the disk is
* active) instead of later (when it might not be).
*/
flush_delayed_work_sync(&mp->m_sync_work);
}
return 0;
}
STATIC int
xfs_fs_statfs(
struct dentry *dentry,
struct kstatfs *statp)
{
struct xfs_mount *mp = XFS_M(dentry->d_sb);
xfs_sb_t *sbp = &mp->m_sb;
struct xfs_inode *ip = XFS_I(dentry->d_inode);
__uint64_t fakeinos, id;
xfs_extlen_t lsize;
__int64_t ffree;
statp->f_type = XFS_SB_MAGIC;
statp->f_namelen = MAXNAMELEN - 1;
id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
statp->f_fsid.val[0] = (u32)id;
statp->f_fsid.val[1] = (u32)(id >> 32);
xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
spin_lock(&mp->m_sb_lock);
statp->f_bsize = sbp->sb_blocksize;
lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
statp->f_blocks = sbp->sb_dblocks - lsize;
statp->f_bfree = statp->f_bavail =
sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
fakeinos = statp->f_bfree << sbp->sb_inopblog;
statp->f_files =
MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
if (mp->m_maxicount)
statp->f_files = min_t(typeof(statp->f_files),
statp->f_files,
mp->m_maxicount);
/* make sure statp->f_ffree does not underflow */
ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
statp->f_ffree = max_t(__int64_t, ffree, 0);
spin_unlock(&mp->m_sb_lock);
if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
(XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
xfs_qm_statvfs(ip, statp);
return 0;
}
STATIC void
xfs_save_resvblks(struct xfs_mount *mp)
{
__uint64_t resblks = 0;
mp->m_resblks_save = mp->m_resblks;
xfs_reserve_blocks(mp, &resblks, NULL);
}
STATIC void
xfs_restore_resvblks(struct xfs_mount *mp)
{
__uint64_t resblks;
if (mp->m_resblks_save) {
resblks = mp->m_resblks_save;
mp->m_resblks_save = 0;
} else
resblks = xfs_default_resblks(mp);
xfs_reserve_blocks(mp, &resblks, NULL);
}
STATIC int
xfs_fs_remount(
struct super_block *sb,
int *flags,
char *options)
{
struct xfs_mount *mp = XFS_M(sb);
substring_t args[MAX_OPT_ARGS];
char *p;
int error;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_barrier:
mp->m_flags |= XFS_MOUNT_BARRIER;
break;
case Opt_nobarrier:
mp->m_flags &= ~XFS_MOUNT_BARRIER;
break;
default:
/*
* Logically we would return an error here to prevent
* users from believing they might have changed
* mount options using remount which can't be changed.
*
* But unfortunately mount(8) adds all options from
* mtab and fstab to the mount arguments in some cases
* so we can't blindly reject options, but have to
* check for each specified option if it actually
* differs from the currently set option and only
* reject it if that's the case.
*
* Until that is implemented we return success for
* every remount request, and silently ignore all
* options that we can't actually change.
*/
#if 0
xfs_info(mp,
"mount option \"%s\" not supported for remount\n", p);
return -EINVAL;
#else
break;
#endif
}
}
/* ro -> rw */
if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
mp->m_flags &= ~XFS_MOUNT_RDONLY;
/*
* If this is the first remount to writeable state we
* might have some superblock changes to update.
*/
if (mp->m_update_flags) {
error = xfs_mount_log_sb(mp, mp->m_update_flags);
if (error) {
xfs_warn(mp, "failed to write sb changes");
return error;
}
mp->m_update_flags = 0;
}
/*
* Fill out the reserve pool if it is empty. Use the stashed
* value if it is non-zero, otherwise go with the default.
*/
xfs_restore_resvblks(mp);
}
/* rw -> ro */
if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
/*
* After we have synced the data but before we sync the
* metadata, we need to free up the reserve block pool so that
* the used block count in the superblock on disk is correct at
* the end of the remount. Stash the current reserve pool size
* so that if we get remounted rw, we can return it to the same
* size.
*/
xfs_quiesce_data(mp);
xfs_save_resvblks(mp);
xfs_quiesce_attr(mp);
mp->m_flags |= XFS_MOUNT_RDONLY;
}
return 0;
}
/*
* Second stage of a freeze. The data is already frozen so we only
* need to take care of the metadata. Once that's done write a dummy
* record to dirty the log in case of a crash while frozen.
*/
STATIC int
xfs_fs_freeze(
struct super_block *sb)
{
struct xfs_mount *mp = XFS_M(sb);
xfs_save_resvblks(mp);
xfs_quiesce_attr(mp);
return -xfs_fs_log_dummy(mp);
}
STATIC int
xfs_fs_unfreeze(
struct super_block *sb)
{
struct xfs_mount *mp = XFS_M(sb);
xfs_restore_resvblks(mp);
return 0;
}
STATIC int
xfs_fs_show_options(
struct seq_file *m,
struct vfsmount *mnt)
{
return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
}
/*
* This function fills in xfs_mount_t fields based on mount args.
* Note: the superblock _has_ now been read in.
*/
STATIC int
xfs_finish_flags(
struct xfs_mount *mp)
{
int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
/* Fail a mount where the logbuf is smaller than the log stripe */
if (xfs_sb_version_haslogv2(&mp->m_sb)) {
if (mp->m_logbsize <= 0 &&
mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
mp->m_logbsize = mp->m_sb.sb_logsunit;
} else if (mp->m_logbsize > 0 &&
mp->m_logbsize < mp->m_sb.sb_logsunit) {
xfs_warn(mp,
"logbuf size must be greater than or equal to log stripe size");
return XFS_ERROR(EINVAL);
}
} else {
/* Fail a mount if the logbuf is larger than 32K */
if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
xfs_warn(mp,
"logbuf size for version 1 logs must be 16K or 32K");
return XFS_ERROR(EINVAL);
}
}
/*
* mkfs'ed attr2 will turn on attr2 mount unless explicitly
* told by noattr2 to turn it off
*/
if (xfs_sb_version_hasattr2(&mp->m_sb) &&
!(mp->m_flags & XFS_MOUNT_NOATTR2))
mp->m_flags |= XFS_MOUNT_ATTR2;
/*
* prohibit r/w mounts of read-only filesystems
*/
if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
xfs_warn(mp,
"cannot mount a read-only filesystem as read-write");
return XFS_ERROR(EROFS);
}
return 0;
}
STATIC int
xfs_fs_fill_super(
struct super_block *sb,
void *data,
int silent)
{
struct inode *root;
struct xfs_mount *mp = NULL;
int flags = 0, error = ENOMEM;
mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
if (!mp)
goto out;
spin_lock_init(&mp->m_sb_lock);
mutex_init(&mp->m_growlock);
atomic_set(&mp->m_active_trans, 0);
mp->m_super = sb;
sb->s_fs_info = mp;
error = xfs_parseargs(mp, (char *)data);
if (error)
goto out_free_fsname;
sb_min_blocksize(sb, BBSIZE);
sb->s_xattr = xfs_xattr_handlers;
sb->s_export_op = &xfs_export_operations;
#ifdef CONFIG_XFS_QUOTA
sb->s_qcop = &xfs_quotactl_operations;
#endif
sb->s_op = &xfs_super_operations;
if (silent)
flags |= XFS_MFSI_QUIET;
error = xfs_open_devices(mp);
if (error)
goto out_free_fsname;
error = xfs_icsb_init_counters(mp);
if (error)
goto out_close_devices;
error = xfs_readsb(mp, flags);
if (error)
goto out_destroy_counters;
error = xfs_finish_flags(mp);
if (error)
goto out_free_sb;
error = xfs_setup_devices(mp);
if (error)
goto out_free_sb;
error = xfs_filestream_mount(mp);
if (error)
goto out_free_sb;
/*
* we must configure the block size in the superblock before we run the
* full mount process as the mount process can lookup and cache inodes.
* For the same reason we must also initialise the syncd and register
* the inode cache shrinker so that inodes can be reclaimed during
* operations like a quotacheck that iterate all inodes in the
* filesystem.
*/
sb->s_magic = XFS_SB_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
sb->s_time_gran = 1;
set_posix_acl_flag(sb);
xfs_inode_shrinker_register(mp);
error = xfs_mountfs(mp);
if (error)
goto out_filestream_unmount;
error = xfs_syncd_init(mp);
if (error)
goto out_unmount;
root = igrab(VFS_I(mp->m_rootip));
if (!root) {
error = ENOENT;
goto out_syncd_stop;
}
if (is_bad_inode(root)) {
error = EINVAL;
goto out_syncd_stop;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
error = ENOMEM;
goto out_iput;
}
return 0;
out_filestream_unmount:
xfs_inode_shrinker_unregister(mp);
xfs_filestream_unmount(mp);
out_free_sb:
xfs_freesb(mp);
out_destroy_counters:
xfs_icsb_destroy_counters(mp);
out_close_devices:
xfs_close_devices(mp);
out_free_fsname:
xfs_free_fsname(mp);
kfree(mp);
out:
return -error;
out_iput:
iput(root);
out_syncd_stop:
xfs_syncd_stop(mp);
out_unmount:
xfs_inode_shrinker_unregister(mp);
/*
* Blow away any referenced inode in the filestreams cache.
* This can and will cause log traffic as inodes go inactive
* here.
*/
xfs_filestream_unmount(mp);
XFS_bflush(mp->m_ddev_targp);
xfs_unmountfs(mp);
goto out_free_sb;
}
STATIC struct dentry *
xfs_fs_mount(
struct file_system_type *fs_type,
int flags,
const char *dev_name,
void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
}
static const struct super_operations xfs_super_operations = {
.alloc_inode = xfs_fs_alloc_inode,
.destroy_inode = xfs_fs_destroy_inode,
.dirty_inode = xfs_fs_dirty_inode,
.write_inode = xfs_fs_write_inode,
.evict_inode = xfs_fs_evict_inode,
.put_super = xfs_fs_put_super,
.sync_fs = xfs_fs_sync_fs,
.freeze_fs = xfs_fs_freeze,
.unfreeze_fs = xfs_fs_unfreeze,
.statfs = xfs_fs_statfs,
.remount_fs = xfs_fs_remount,
.show_options = xfs_fs_show_options,
};
static struct file_system_type xfs_fs_type = {
.owner = THIS_MODULE,
.name = "xfs",
.mount = xfs_fs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
STATIC int __init
xfs_init_zones(void)
{
xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
if (!xfs_ioend_zone)
goto out;
xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
xfs_ioend_zone);
if (!xfs_ioend_pool)
goto out_destroy_ioend_zone;
xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
"xfs_log_ticket");
if (!xfs_log_ticket_zone)
goto out_destroy_ioend_pool;
xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
"xfs_bmap_free_item");
if (!xfs_bmap_free_item_zone)
goto out_destroy_log_ticket_zone;
xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
"xfs_btree_cur");
if (!xfs_btree_cur_zone)
goto out_destroy_bmap_free_item_zone;
xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
"xfs_da_state");
if (!xfs_da_state_zone)
goto out_destroy_btree_cur_zone;
xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
if (!xfs_dabuf_zone)
goto out_destroy_da_state_zone;
xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
if (!xfs_ifork_zone)
goto out_destroy_dabuf_zone;
xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
if (!xfs_trans_zone)
goto out_destroy_ifork_zone;
xfs_log_item_desc_zone =
kmem_zone_init(sizeof(struct xfs_log_item_desc),
"xfs_log_item_desc");
if (!xfs_log_item_desc_zone)
goto out_destroy_trans_zone;
/*
* The size of the zone allocated buf log item is the maximum
* size possible under XFS. This wastes a little bit of memory,
* but it is much faster.
*/
xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
(((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
NBWORD) * sizeof(int))), "xfs_buf_item");
if (!xfs_buf_item_zone)
goto out_destroy_log_item_desc_zone;
xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
((XFS_EFD_MAX_FAST_EXTENTS - 1) *
sizeof(xfs_extent_t))), "xfs_efd_item");
if (!xfs_efd_zone)
goto out_destroy_buf_item_zone;
xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
((XFS_EFI_MAX_FAST_EXTENTS - 1) *
sizeof(xfs_extent_t))), "xfs_efi_item");
if (!xfs_efi_zone)
goto out_destroy_efd_zone;
xfs_inode_zone =
kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
xfs_fs_inode_init_once);
if (!xfs_inode_zone)
goto out_destroy_efi_zone;
xfs_ili_zone =
kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
KM_ZONE_SPREAD, NULL);
if (!xfs_ili_zone)
goto out_destroy_inode_zone;
return 0;
out_destroy_inode_zone:
kmem_zone_destroy(xfs_inode_zone);
out_destroy_efi_zone:
kmem_zone_destroy(xfs_efi_zone);
out_destroy_efd_zone:
kmem_zone_destroy(xfs_efd_zone);
out_destroy_buf_item_zone:
kmem_zone_destroy(xfs_buf_item_zone);
out_destroy_log_item_desc_zone:
kmem_zone_destroy(xfs_log_item_desc_zone);
out_destroy_trans_zone:
kmem_zone_destroy(xfs_trans_zone);
out_destroy_ifork_zone:
kmem_zone_destroy(xfs_ifork_zone);
out_destroy_dabuf_zone:
kmem_zone_destroy(xfs_dabuf_zone);
out_destroy_da_state_zone:
kmem_zone_destroy(xfs_da_state_zone);
out_destroy_btree_cur_zone:
kmem_zone_destroy(xfs_btree_cur_zone);
out_destroy_bmap_free_item_zone:
kmem_zone_destroy(xfs_bmap_free_item_zone);
out_destroy_log_ticket_zone:
kmem_zone_destroy(xfs_log_ticket_zone);
out_destroy_ioend_pool:
mempool_destroy(xfs_ioend_pool);
out_destroy_ioend_zone:
kmem_zone_destroy(xfs_ioend_zone);
out:
return -ENOMEM;
}
STATIC void
xfs_destroy_zones(void)
{
kmem_zone_destroy(xfs_ili_zone);
kmem_zone_destroy(xfs_inode_zone);
kmem_zone_destroy(xfs_efi_zone);
kmem_zone_destroy(xfs_efd_zone);
kmem_zone_destroy(xfs_buf_item_zone);
kmem_zone_destroy(xfs_log_item_desc_zone);
kmem_zone_destroy(xfs_trans_zone);
kmem_zone_destroy(xfs_ifork_zone);
kmem_zone_destroy(xfs_dabuf_zone);
kmem_zone_destroy(xfs_da_state_zone);
kmem_zone_destroy(xfs_btree_cur_zone);
kmem_zone_destroy(xfs_bmap_free_item_zone);
kmem_zone_destroy(xfs_log_ticket_zone);
mempool_destroy(xfs_ioend_pool);
kmem_zone_destroy(xfs_ioend_zone);
}
STATIC int __init
xfs_init_workqueues(void)
{
/*
* max_active is set to 8 to give enough concurency to allow
* multiple work operations on each CPU to run. This allows multiple
* filesystems to be running sync work concurrently, and scales with
* the number of CPUs in the system.
*/
xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
if (!xfs_syncd_wq)
return -ENOMEM;
return 0;
}
STATIC void
xfs_destroy_workqueues(void)
{
destroy_workqueue(xfs_syncd_wq);
}
STATIC int __init
init_xfs_fs(void)
{
int error;
printk(KERN_INFO XFS_VERSION_STRING " with "
XFS_BUILD_OPTIONS " enabled\n");
xfs_ioend_init();
xfs_dir_startup();
error = xfs_init_zones();
if (error)
goto out;
error = xfs_init_workqueues();
if (error)
goto out_destroy_zones;
error = xfs_mru_cache_init();
if (error)
goto out_destroy_wq;
error = xfs_filestream_init();
if (error)
goto out_mru_cache_uninit;
error = xfs_buf_init();
if (error)
goto out_filestream_uninit;
error = xfs_init_procfs();
if (error)
goto out_buf_terminate;
error = xfs_sysctl_register();
if (error)
goto out_cleanup_procfs;
vfs_initquota();
error = register_filesystem(&xfs_fs_type);
if (error)
goto out_sysctl_unregister;
return 0;
out_sysctl_unregister:
xfs_sysctl_unregister();
out_cleanup_procfs:
xfs_cleanup_procfs();
out_buf_terminate:
xfs_buf_terminate();
out_filestream_uninit:
xfs_filestream_uninit();
out_mru_cache_uninit:
xfs_mru_cache_uninit();
out_destroy_wq:
xfs_destroy_workqueues();
out_destroy_zones:
xfs_destroy_zones();
out:
return error;
}
STATIC void __exit
exit_xfs_fs(void)
{
vfs_exitquota();
unregister_filesystem(&xfs_fs_type);
xfs_sysctl_unregister();
xfs_cleanup_procfs();
xfs_buf_terminate();
xfs_filestream_uninit();
xfs_mru_cache_uninit();
xfs_destroy_workqueues();
xfs_destroy_zones();
}
module_init(init_xfs_fs);
module_exit(exit_xfs_fs);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
MODULE_LICENSE("GPL");
| gpl-2.0 |
bigzz/kernel_msm | drivers/gpu/drm/drm_pci.c | 1990 | 13046 | /* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
/**
* \file drm_pci.c
* \brief Functions and ioctls to manage PCI memory
*
* \warning These interfaces aren't stable yet.
*
* \todo Implement the remaining ioctl's for the PCI pools.
* \todo The wrappers here are so thin that they would be better off inlined..
*
* \author José Fonseca <jrfonseca@tungstengraphics.com>
* \author Leif Delgass <ldelgass@retinalburn.net>
*/
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <drm/drmP.h>
/**********************************************************************/
/** \name PCI memory */
/*@{*/
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
#if 1
unsigned long addr;
size_t sz;
#endif
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
* Return NULL here for now to make sure nobody tries for larger alignment
*/
if (align > size)
return NULL;
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
dmah->size = size;
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
memset(dmah->vaddr, 0, size);
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
SetPageReserved(virt_to_page(addr));
}
return dmah;
}
EXPORT_SYMBOL(drm_pci_alloc);
/**
* \brief Free a PCI consistent memory block without freeing its descriptor.
*
* This function is for internal use in the Linux-specific DRM core code.
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
#if 1
unsigned long addr;
size_t sz;
#endif
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Unreserve */
for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
}
dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
dmah->busaddr);
}
}
/**
* \brief Free a PCI consistent memory block
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
__drm_pci_free(dev, dmah);
kfree(dmah);
}
EXPORT_SYMBOL(drm_pci_free);
#ifdef CONFIG_PCI
static int drm_get_pci_domain(struct drm_device *dev)
{
#ifndef __alpha__
/* For historical reasons, drm_get_pci_domain() is busticated
* on most archs and has to remain so for userspace interface
* < 1.4, except on alpha which was right from the beginning
*/
if (dev->if_version < 0x10004)
return 0;
#endif /* __alpha__ */
return pci_domain_nr(dev->pdev->bus);
}
static int drm_pci_get_irq(struct drm_device *dev)
{
return dev->pdev->irq;
}
static const char *drm_pci_get_name(struct drm_device *dev)
{
struct pci_driver *pdriver = dev->driver->kdriver.pci;
return pdriver->name;
}
static int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
{
int len, ret;
struct pci_driver *pdriver = dev->driver->kdriver.pci;
master->unique_len = 40;
master->unique_size = master->unique_len;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (master->unique == NULL)
return -ENOMEM;
len = snprintf(master->unique, master->unique_len,
"pci:%04x:%02x:%02x.%d",
drm_get_pci_domain(dev),
dev->pdev->bus->number,
PCI_SLOT(dev->pdev->devfn),
PCI_FUNC(dev->pdev->devfn));
if (len >= master->unique_len) {
DRM_ERROR("buffer overflow");
ret = -EINVAL;
goto err;
} else
master->unique_len = len;
dev->devname =
kmalloc(strlen(pdriver->name) +
master->unique_len + 2, GFP_KERNEL);
if (dev->devname == NULL) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", pdriver->name,
master->unique);
return 0;
err:
return ret;
}
static int drm_pci_set_unique(struct drm_device *dev,
struct drm_master *master,
struct drm_unique *u)
{
int domain, bus, slot, func, ret;
const char *bus_name;
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
if (!master->unique) {
ret = -ENOMEM;
goto err;
}
if (copy_from_user(master->unique, u->unique, master->unique_len)) {
ret = -EFAULT;
goto err;
}
master->unique[master->unique_len] = '\0';
bus_name = dev->driver->bus->get_name(dev);
dev->devname = kmalloc(strlen(bus_name) +
strlen(master->unique) + 2, GFP_KERNEL);
if (!dev->devname) {
ret = -ENOMEM;
goto err;
}
sprintf(dev->devname, "%s@%s", bus_name,
master->unique);
/* Return error if the busid submitted doesn't match the device's actual
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
ret = -EINVAL;
goto err;
}
domain = bus >> 8;
bus &= 0xff;
if ((domain != drm_get_pci_domain(dev)) ||
(bus != dev->pdev->bus->number) ||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
(func != PCI_FUNC(dev->pdev->devfn))) {
ret = -EINVAL;
goto err;
}
return 0;
err:
return ret;
}
static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
{
if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
(p->busnum & 0xff) != dev->pdev->bus->number ||
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
return 0;
}
static int drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_has_AGP(dev)) {
if (drm_pci_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
&& (dev->agp == NULL)) {
DRM_ERROR("Cannot initialize the agpgart module.\n");
return -EINVAL;
}
if (drm_core_has_MTRR(dev)) {
if (dev->agp)
dev->agp->agp_mtrr =
mtrr_add(dev->agp->agp_info.aper_base,
dev->agp->agp_info.aper_size *
1024 * 1024, MTRR_TYPE_WRCOMB, 1);
}
}
return 0;
}
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
.get_name = drm_pci_get_name,
.set_busid = drm_pci_set_busid,
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
};
/**
* Register.
*
* \param pdev - PCI device structure
* \param ent entry from the PCI ID table with device type flags
* \return zero on success or a negative number on failure.
*
* Attempt to gets inter module "drm" information. If we are first
* then register the character device and inter module information.
* Try and register, if we fail to register, backout previous work.
*/
int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
DRM_DEBUG("\n");
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
ret = pci_enable_device(pdev);
if (ret)
goto err_g1;
dev->pdev = pdev;
dev->dev = &pdev->dev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
mutex_lock(&drm_global_mutex);
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
pci_set_drvdata(pdev, dev);
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
if (ret)
goto err_g2;
}
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g3;
if (dev->driver->load) {
ret = dev->driver->load(dev, ent->driver_data);
if (ret)
goto err_g4;
}
/* setup the grouping for the legacy output */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = drm_mode_group_init_legacy_group(dev,
&dev->primary->mode_group);
if (ret)
goto err_g4;
}
list_add_tail(&dev->driver_item, &driver->device_list);
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, pci_name(pdev), dev->primary->index);
mutex_unlock(&drm_global_mutex);
return 0;
err_g4:
drm_put_minor(&dev->primary);
err_g3:
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g2:
pci_disable_device(pdev);
err_g1:
kfree(dev);
mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_get_pci_dev);
/**
* PCI device initialization. Called direct from modules at load time.
*
* \return zero on success or a negative number on failure.
*
* Initializes a drm_device structures,registering the
* stubs and initializing the AGP device.
*
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
DRM_DEBUG("\n");
INIT_LIST_HEAD(&driver->device_list);
driver->kdriver.pci = pdriver;
driver->bus = &drm_pci_bus;
if (driver->driver_features & DRIVER_MODESET)
return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
pid = &pdriver->id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
* function that pci_get_subsys and pci_get_class used, we'd
* be able to just pass pid in instead of doing a two-stage
* thing.
*/
pdev = NULL;
while ((pdev =
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
pid->subdevice, pdev)) != NULL) {
if ((pdev->class & pid->class_mask) != pid->class)
continue;
/* stealth mode requires a manual probe */
pci_dev_get(pdev);
drm_get_pci_dev(pdev, pid, driver);
}
}
return 0;
}
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
u32 lnkcap, lnkcap2;
*mask = 0;
if (!dev->pdev)
return -EINVAL;
root = dev->pdev->bus->self;
/* we've been informed via and serverworks don't make the cut */
if (root->vendor == PCI_VENDOR_ID_VIA ||
root->vendor == PCI_VENDOR_ID_SERVERWORKS)
return -EINVAL;
pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
if (lnkcap2) { /* PCIe r3.0-compliant */
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
*mask |= DRM_PCIE_SPEED_50;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
*mask |= DRM_PCIE_SPEED_80;
} else { /* pre-r3.0 */
if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
*mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
}
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
return 0;
}
EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
#else
int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
return -1;
}
#endif
EXPORT_SYMBOL(drm_pci_init);
/*@}*/
void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct drm_device *dev, *tmp;
DRM_DEBUG("\n");
if (driver->driver_features & DRIVER_MODESET) {
pci_unregister_driver(pdriver);
} else {
list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
drm_put_dev(dev);
}
DRM_INFO("Module unloaded\n");
}
EXPORT_SYMBOL(drm_pci_exit);
| gpl-2.0 |
Herna1994/android_kernel_cyanogen_msm8916 | sound/isa/opl3sa2.c | 2246 | 29761 | /*
* Driver for Yamaha OPL3-SA[2,3] soundcards
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/err.h>
#include <linux/isa.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/pnp.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/wss.h>
#include <sound/mpu401.h>
#include <sound/opl3.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <asm/io.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Yamaha OPL3SA2+");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF719E-S},"
"{Genius,Sound Maker 3DX},"
"{Yamaha,OPL3SA3},"
"{Intel,AL440LX sound},"
"{NeoMagic,MagicWave 3DX}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */
#ifdef CONFIG_PNP
static bool isapnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1};
#endif
static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0xf86,0x370,0x100 */
static long sb_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x220,0x240,0x260 */
static long wss_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;/* 0x530,0xe80,0xf40,0x604 */
static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x388 */
static long midi_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;/* 0x330,0x300 */
static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 0,1,3,5,9,11,12,15 */
static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 1,3,5,6,7 */
static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 1,3,5,6,7 */
static int opl3sa3_ymode[SNDRV_CARDS]; /* 0,1,2,3 */ /*SL Added*/
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for OPL3-SA soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for OPL3-SA soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable OPL3-SA soundcard.");
#ifdef CONFIG_PNP
module_param_array(isapnp, bool, NULL, 0444);
MODULE_PARM_DESC(isapnp, "PnP detection for specified soundcard.");
#endif
module_param_array(port, long, NULL, 0444);
MODULE_PARM_DESC(port, "Port # for OPL3-SA driver.");
module_param_array(sb_port, long, NULL, 0444);
MODULE_PARM_DESC(sb_port, "SB port # for OPL3-SA driver.");
module_param_array(wss_port, long, NULL, 0444);
MODULE_PARM_DESC(wss_port, "WSS port # for OPL3-SA driver.");
module_param_array(fm_port, long, NULL, 0444);
MODULE_PARM_DESC(fm_port, "FM port # for OPL3-SA driver.");
module_param_array(midi_port, long, NULL, 0444);
MODULE_PARM_DESC(midi_port, "MIDI port # for OPL3-SA driver.");
module_param_array(irq, int, NULL, 0444);
MODULE_PARM_DESC(irq, "IRQ # for OPL3-SA driver.");
module_param_array(dma1, int, NULL, 0444);
MODULE_PARM_DESC(dma1, "DMA1 # for OPL3-SA driver.");
module_param_array(dma2, int, NULL, 0444);
MODULE_PARM_DESC(dma2, "DMA2 # for OPL3-SA driver.");
module_param_array(opl3sa3_ymode, int, NULL, 0444);
MODULE_PARM_DESC(opl3sa3_ymode, "Speaker size selection for 3D Enhancement mode: Desktop/Large Notebook/Small Notebook/HiFi.");
#ifdef CONFIG_PNP
static int isa_registered;
static int pnp_registered;
static int pnpc_registered;
#endif
/* control ports */
#define OPL3SA2_PM_CTRL 0x01
#define OPL3SA2_SYS_CTRL 0x02
#define OPL3SA2_IRQ_CONFIG 0x03
#define OPL3SA2_IRQ_STATUS 0x04
#define OPL3SA2_DMA_CONFIG 0x06
#define OPL3SA2_MASTER_LEFT 0x07
#define OPL3SA2_MASTER_RIGHT 0x08
#define OPL3SA2_MIC 0x09
#define OPL3SA2_MISC 0x0A
/* opl3sa3 only */
#define OPL3SA3_DGTL_DOWN 0x12
#define OPL3SA3_ANLG_DOWN 0x13
#define OPL3SA3_WIDE 0x14
#define OPL3SA3_BASS 0x15
#define OPL3SA3_TREBLE 0x16
/* power management bits */
#define OPL3SA2_PM_ADOWN 0x20
#define OPL3SA2_PM_PSV 0x04
#define OPL3SA2_PM_PDN 0x02
#define OPL3SA2_PM_PDX 0x01
#define OPL3SA2_PM_D0 0x00
#define OPL3SA2_PM_D3 (OPL3SA2_PM_ADOWN|OPL3SA2_PM_PSV|OPL3SA2_PM_PDN|OPL3SA2_PM_PDX)
struct snd_opl3sa2 {
int version; /* 2 or 3 */
unsigned long port; /* control port */
struct resource *res_port; /* control port resource */
int irq;
int single_dma;
spinlock_t reg_lock;
struct snd_hwdep *synth;
struct snd_rawmidi *rmidi;
struct snd_wss *wss;
unsigned char ctlregs[0x20];
int ymode; /* SL added */
struct snd_kcontrol *master_switch;
struct snd_kcontrol *master_volume;
};
#define PFX "opl3sa2: "
#ifdef CONFIG_PNP
static struct pnp_device_id snd_opl3sa2_pnpbiosids[] = {
{ .id = "YMH0021" },
{ .id = "NMX2210" }, /* Gateway Solo 2500 */
{ .id = "" } /* end */
};
MODULE_DEVICE_TABLE(pnp, snd_opl3sa2_pnpbiosids);
static struct pnp_card_device_id snd_opl3sa2_pnpids[] = {
/* Yamaha YMF719E-S (Genius Sound Maker 3DX) */
{ .id = "YMH0020", .devs = { { "YMH0021" } } },
/* Yamaha OPL3-SA3 (integrated on Intel's Pentium II AL440LX motherboard) */
{ .id = "YMH0030", .devs = { { "YMH0021" } } },
/* Yamaha OPL3-SA2 */
{ .id = "YMH0800", .devs = { { "YMH0021" } } },
/* Yamaha OPL3-SA2 */
{ .id = "YMH0801", .devs = { { "YMH0021" } } },
/* NeoMagic MagicWave 3DX */
{ .id = "NMX2200", .devs = { { "YMH2210" } } },
/* NeoMagic MagicWave 3D */
{ .id = "NMX2200", .devs = { { "NMX2210" } } },
/* --- */
{ .id = "" } /* end */
};
MODULE_DEVICE_TABLE(pnp_card, snd_opl3sa2_pnpids);
#endif /* CONFIG_PNP */
/* read control port (w/o spinlock) */
static unsigned char __snd_opl3sa2_read(struct snd_opl3sa2 *chip, unsigned char reg)
{
unsigned char result;
#if 0
outb(0x1d, port); /* password */
printk(KERN_DEBUG "read [0x%lx] = 0x%x\n", port, inb(port));
#endif
outb(reg, chip->port); /* register */
result = inb(chip->port + 1);
#if 0
printk(KERN_DEBUG "read [0x%lx] = 0x%x [0x%x]\n",
port, result, inb(port));
#endif
return result;
}
/* read control port (with spinlock) */
static unsigned char snd_opl3sa2_read(struct snd_opl3sa2 *chip, unsigned char reg)
{
unsigned long flags;
unsigned char result;
spin_lock_irqsave(&chip->reg_lock, flags);
result = __snd_opl3sa2_read(chip, reg);
spin_unlock_irqrestore(&chip->reg_lock, flags);
return result;
}
/* write control port (w/o spinlock) */
static void __snd_opl3sa2_write(struct snd_opl3sa2 *chip, unsigned char reg, unsigned char value)
{
#if 0
outb(0x1d, port); /* password */
#endif
outb(reg, chip->port); /* register */
outb(value, chip->port + 1);
chip->ctlregs[reg] = value;
}
/* write control port (with spinlock) */
static void snd_opl3sa2_write(struct snd_opl3sa2 *chip, unsigned char reg, unsigned char value)
{
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
__snd_opl3sa2_write(chip, reg, value);
spin_unlock_irqrestore(&chip->reg_lock, flags);
}
static int snd_opl3sa2_detect(struct snd_card *card)
{
struct snd_opl3sa2 *chip = card->private_data;
unsigned long port;
unsigned char tmp, tmp1;
char str[2];
port = chip->port;
if ((chip->res_port = request_region(port, 2, "OPL3-SA control")) == NULL) {
snd_printk(KERN_ERR PFX "can't grab port 0x%lx\n", port);
return -EBUSY;
}
/*
snd_printk(KERN_DEBUG "REG 0A = 0x%x\n",
snd_opl3sa2_read(chip, 0x0a));
*/
chip->version = 0;
tmp = snd_opl3sa2_read(chip, OPL3SA2_MISC);
if (tmp == 0xff) {
snd_printd("OPL3-SA [0x%lx] detect = 0x%x\n", port, tmp);
return -ENODEV;
}
switch (tmp & 0x07) {
case 0x01:
chip->version = 2; /* YMF711 */
break;
default:
chip->version = 3;
/* 0x02 - standard */
/* 0x03 - YM715B */
/* 0x04 - YM719 - OPL-SA4? */
/* 0x05 - OPL3-SA3 - Libretto 100 */
/* 0x07 - unknown - Neomagic MagicWave 3D */
break;
}
str[0] = chip->version + '0';
str[1] = 0;
strcat(card->shortname, str);
snd_opl3sa2_write(chip, OPL3SA2_MISC, tmp ^ 7);
if ((tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MISC)) != tmp) {
snd_printd("OPL3-SA [0x%lx] detect (1) = 0x%x (0x%x)\n", port, tmp, tmp1);
return -ENODEV;
}
/* try if the MIC register is accessible */
tmp = snd_opl3sa2_read(chip, OPL3SA2_MIC);
snd_opl3sa2_write(chip, OPL3SA2_MIC, 0x8a);
if (((tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MIC)) & 0x9f) != 0x8a) {
snd_printd("OPL3-SA [0x%lx] detect (2) = 0x%x (0x%x)\n", port, tmp, tmp1);
return -ENODEV;
}
snd_opl3sa2_write(chip, OPL3SA2_MIC, 0x9f);
/* initialization */
/* Power Management - full on */
snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D0);
if (chip->version > 2) {
/* ymode is bits 4&5 (of 0 to 7) on all but opl3sa2 versions */
snd_opl3sa2_write(chip, OPL3SA2_SYS_CTRL, (chip->ymode << 4));
} else {
/* default for opl3sa2 versions */
snd_opl3sa2_write(chip, OPL3SA2_SYS_CTRL, 0x00);
}
snd_opl3sa2_write(chip, OPL3SA2_IRQ_CONFIG, 0x0d); /* Interrupt Channel Configuration - IRQ A = OPL3 + MPU + WSS */
if (chip->single_dma) {
snd_opl3sa2_write(chip, OPL3SA2_DMA_CONFIG, 0x03); /* DMA Configuration - DMA A = WSS-R + WSS-P */
} else {
snd_opl3sa2_write(chip, OPL3SA2_DMA_CONFIG, 0x21); /* DMA Configuration - DMA B = WSS-R, DMA A = WSS-P */
}
snd_opl3sa2_write(chip, OPL3SA2_MISC, 0x80 | (tmp & 7)); /* Miscellaneous - default */
if (chip->version > 2) {
snd_opl3sa2_write(chip, OPL3SA3_DGTL_DOWN, 0x00); /* Digital Block Partial Power Down - default */
snd_opl3sa2_write(chip, OPL3SA3_ANLG_DOWN, 0x00); /* Analog Block Partial Power Down - default */
}
return 0;
}
static irqreturn_t snd_opl3sa2_interrupt(int irq, void *dev_id)
{
unsigned short status;
struct snd_card *card = dev_id;
struct snd_opl3sa2 *chip;
int handled = 0;
if (card == NULL)
return IRQ_NONE;
chip = card->private_data;
status = snd_opl3sa2_read(chip, OPL3SA2_IRQ_STATUS);
if (status & 0x20) {
handled = 1;
snd_opl3_interrupt(chip->synth);
}
if ((status & 0x10) && chip->rmidi != NULL) {
handled = 1;
snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data);
}
if (status & 0x07) { /* TI,CI,PI */
handled = 1;
snd_wss_interrupt(irq, chip->wss);
}
if (status & 0x40) { /* hardware volume change */
handled = 1;
/* reading from Master Lch register at 0x07 clears this bit */
snd_opl3sa2_read(chip, OPL3SA2_MASTER_RIGHT);
snd_opl3sa2_read(chip, OPL3SA2_MASTER_LEFT);
if (chip->master_switch && chip->master_volume) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&chip->master_switch->id);
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&chip->master_volume->id);
}
}
return IRQ_RETVAL(handled);
}
#define OPL3SA2_SINGLE(xname, xindex, reg, shift, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
.info = snd_wss_info_single, \
.get = snd_opl3sa2_get_single, .put = snd_opl3sa2_put_single, \
.private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) }
#define OPL3SA2_SINGLE_TLV(xname, xindex, reg, shift, mask, invert, xtlv) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \
.name = xname, .index = xindex, \
.info = snd_wss_info_single, \
.get = snd_opl3sa2_get_single, .put = snd_opl3sa2_put_single, \
.private_value = reg | (shift << 8) | (mask << 16) | (invert << 24), \
.tlv = { .p = (xtlv) } }
static int snd_opl3sa2_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_opl3sa2 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
spin_lock_irqsave(&chip->reg_lock, flags);
ucontrol->value.integer.value[0] = (chip->ctlregs[reg] >> shift) & mask;
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (invert)
ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0];
return 0;
}
static int snd_opl3sa2_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_opl3sa2 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int reg = kcontrol->private_value & 0xff;
int shift = (kcontrol->private_value >> 8) & 0xff;
int mask = (kcontrol->private_value >> 16) & 0xff;
int invert = (kcontrol->private_value >> 24) & 0xff;
int change;
unsigned short val, oval;
val = (ucontrol->value.integer.value[0] & mask);
if (invert)
val = mask - val;
val <<= shift;
spin_lock_irqsave(&chip->reg_lock, flags);
oval = chip->ctlregs[reg];
val = (oval & ~(mask << shift)) | val;
change = val != oval;
__snd_opl3sa2_write(chip, reg, val);
spin_unlock_irqrestore(&chip->reg_lock, flags);
return change;
}
#define OPL3SA2_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
.info = snd_wss_info_double, \
.get = snd_opl3sa2_get_double, .put = snd_opl3sa2_put_double, \
.private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22) }
#define OPL3SA2_DOUBLE_TLV(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert, xtlv) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \
.name = xname, .index = xindex, \
.info = snd_wss_info_double, \
.get = snd_opl3sa2_get_double, .put = snd_opl3sa2_put_double, \
.private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22), \
.tlv = { .p = (xtlv) } }
static int snd_opl3sa2_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_opl3sa2 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int left_reg = kcontrol->private_value & 0xff;
int right_reg = (kcontrol->private_value >> 8) & 0xff;
int shift_left = (kcontrol->private_value >> 16) & 0x07;
int shift_right = (kcontrol->private_value >> 19) & 0x07;
int mask = (kcontrol->private_value >> 24) & 0xff;
int invert = (kcontrol->private_value >> 22) & 1;
spin_lock_irqsave(&chip->reg_lock, flags);
ucontrol->value.integer.value[0] = (chip->ctlregs[left_reg] >> shift_left) & mask;
ucontrol->value.integer.value[1] = (chip->ctlregs[right_reg] >> shift_right) & mask;
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (invert) {
ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0];
ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1];
}
return 0;
}
static int snd_opl3sa2_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct snd_opl3sa2 *chip = snd_kcontrol_chip(kcontrol);
unsigned long flags;
int left_reg = kcontrol->private_value & 0xff;
int right_reg = (kcontrol->private_value >> 8) & 0xff;
int shift_left = (kcontrol->private_value >> 16) & 0x07;
int shift_right = (kcontrol->private_value >> 19) & 0x07;
int mask = (kcontrol->private_value >> 24) & 0xff;
int invert = (kcontrol->private_value >> 22) & 1;
int change;
unsigned short val1, val2, oval1, oval2;
val1 = ucontrol->value.integer.value[0] & mask;
val2 = ucontrol->value.integer.value[1] & mask;
if (invert) {
val1 = mask - val1;
val2 = mask - val2;
}
val1 <<= shift_left;
val2 <<= shift_right;
spin_lock_irqsave(&chip->reg_lock, flags);
if (left_reg != right_reg) {
oval1 = chip->ctlregs[left_reg];
oval2 = chip->ctlregs[right_reg];
val1 = (oval1 & ~(mask << shift_left)) | val1;
val2 = (oval2 & ~(mask << shift_right)) | val2;
change = val1 != oval1 || val2 != oval2;
__snd_opl3sa2_write(chip, left_reg, val1);
__snd_opl3sa2_write(chip, right_reg, val2);
} else {
oval1 = chip->ctlregs[left_reg];
val1 = (oval1 & ~((mask << shift_left) | (mask << shift_right))) | val1 | val2;
change = val1 != oval1;
__snd_opl3sa2_write(chip, left_reg, val1);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
return change;
}
static const DECLARE_TLV_DB_SCALE(db_scale_master, -3000, 200, 0);
static const DECLARE_TLV_DB_SCALE(db_scale_5bit_12db_max, -3450, 150, 0);
static struct snd_kcontrol_new snd_opl3sa2_controls[] = {
OPL3SA2_DOUBLE("Master Playback Switch", 0, 0x07, 0x08, 7, 7, 1, 1),
OPL3SA2_DOUBLE_TLV("Master Playback Volume", 0, 0x07, 0x08, 0, 0, 15, 1,
db_scale_master),
OPL3SA2_SINGLE("Mic Playback Switch", 0, 0x09, 7, 1, 1),
OPL3SA2_SINGLE_TLV("Mic Playback Volume", 0, 0x09, 0, 31, 1,
db_scale_5bit_12db_max),
OPL3SA2_SINGLE("ZV Port Switch", 0, 0x02, 0, 1, 0),
};
static struct snd_kcontrol_new snd_opl3sa2_tone_controls[] = {
OPL3SA2_DOUBLE("3D Control - Wide", 0, 0x14, 0x14, 4, 0, 7, 0),
OPL3SA2_DOUBLE("Tone Control - Bass", 0, 0x15, 0x15, 4, 0, 7, 0),
OPL3SA2_DOUBLE("Tone Control - Treble", 0, 0x16, 0x16, 4, 0, 7, 0)
};
static void snd_opl3sa2_master_free(struct snd_kcontrol *kcontrol)
{
struct snd_opl3sa2 *chip = snd_kcontrol_chip(kcontrol);
chip->master_switch = NULL;
chip->master_volume = NULL;
}
static int snd_opl3sa2_mixer(struct snd_card *card)
{
struct snd_opl3sa2 *chip = card->private_data;
struct snd_ctl_elem_id id1, id2;
struct snd_kcontrol *kctl;
unsigned int idx;
int err;
memset(&id1, 0, sizeof(id1));
memset(&id2, 0, sizeof(id2));
id1.iface = id2.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
/* reassign AUX0 to CD */
strcpy(id1.name, "Aux Playback Switch");
strcpy(id2.name, "CD Playback Switch");
if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) {
snd_printk(KERN_ERR "Cannot rename opl3sa2 control\n");
return err;
}
strcpy(id1.name, "Aux Playback Volume");
strcpy(id2.name, "CD Playback Volume");
if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) {
snd_printk(KERN_ERR "Cannot rename opl3sa2 control\n");
return err;
}
/* reassign AUX1 to FM */
strcpy(id1.name, "Aux Playback Switch"); id1.index = 1;
strcpy(id2.name, "FM Playback Switch");
if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) {
snd_printk(KERN_ERR "Cannot rename opl3sa2 control\n");
return err;
}
strcpy(id1.name, "Aux Playback Volume");
strcpy(id2.name, "FM Playback Volume");
if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) {
snd_printk(KERN_ERR "Cannot rename opl3sa2 control\n");
return err;
}
/* add OPL3SA2 controls */
for (idx = 0; idx < ARRAY_SIZE(snd_opl3sa2_controls); idx++) {
if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_opl3sa2_controls[idx], chip))) < 0)
return err;
switch (idx) {
case 0: chip->master_switch = kctl; kctl->private_free = snd_opl3sa2_master_free; break;
case 1: chip->master_volume = kctl; kctl->private_free = snd_opl3sa2_master_free; break;
}
}
if (chip->version > 2) {
for (idx = 0; idx < ARRAY_SIZE(snd_opl3sa2_tone_controls); idx++)
if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_opl3sa2_tone_controls[idx], chip))) < 0)
return err;
}
return 0;
}
/* Power Management support functions */
#ifdef CONFIG_PM
static int snd_opl3sa2_suspend(struct snd_card *card, pm_message_t state)
{
if (card) {
struct snd_opl3sa2 *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
chip->wss->suspend(chip->wss);
/* power down */
snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D3);
}
return 0;
}
static int snd_opl3sa2_resume(struct snd_card *card)
{
struct snd_opl3sa2 *chip;
int i;
if (!card)
return 0;
chip = card->private_data;
/* power up */
snd_opl3sa2_write(chip, OPL3SA2_PM_CTRL, OPL3SA2_PM_D0);
/* restore registers */
for (i = 2; i <= 0x0a; i++) {
if (i != OPL3SA2_IRQ_STATUS)
snd_opl3sa2_write(chip, i, chip->ctlregs[i]);
}
if (chip->version > 2) {
for (i = 0x12; i <= 0x16; i++)
snd_opl3sa2_write(chip, i, chip->ctlregs[i]);
}
/* restore wss */
chip->wss->resume(chip->wss);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PNP
static int snd_opl3sa2_pnp(int dev, struct snd_opl3sa2 *chip,
struct pnp_dev *pdev)
{
if (pnp_activate_dev(pdev) < 0) {
snd_printk(KERN_ERR "PnP configure failure (out of resources?)\n");
return -EBUSY;
}
sb_port[dev] = pnp_port_start(pdev, 0);
wss_port[dev] = pnp_port_start(pdev, 1);
fm_port[dev] = pnp_port_start(pdev, 2);
midi_port[dev] = pnp_port_start(pdev, 3);
port[dev] = pnp_port_start(pdev, 4);
dma1[dev] = pnp_dma(pdev, 0);
dma2[dev] = pnp_dma(pdev, 1);
irq[dev] = pnp_irq(pdev, 0);
snd_printdd("%sPnP OPL3-SA: sb port=0x%lx, wss port=0x%lx, fm port=0x%lx, midi port=0x%lx\n",
pnp_device_is_pnpbios(pdev) ? "BIOS" : "ISA", sb_port[dev], wss_port[dev], fm_port[dev], midi_port[dev]);
snd_printdd("%sPnP OPL3-SA: control port=0x%lx, dma1=%i, dma2=%i, irq=%i\n",
pnp_device_is_pnpbios(pdev) ? "BIOS" : "ISA", port[dev], dma1[dev], dma2[dev], irq[dev]);
return 0;
}
#endif /* CONFIG_PNP */
static void snd_opl3sa2_free(struct snd_card *card)
{
struct snd_opl3sa2 *chip = card->private_data;
if (chip->irq >= 0)
free_irq(chip->irq, card);
release_and_free_resource(chip->res_port);
}
static int snd_opl3sa2_card_new(int dev, struct snd_card **cardp)
{
struct snd_card *card;
struct snd_opl3sa2 *chip;
int err;
err = snd_card_create(index[dev], id[dev], THIS_MODULE,
sizeof(struct snd_opl3sa2), &card);
if (err < 0)
return err;
strcpy(card->driver, "OPL3SA2");
strcpy(card->shortname, "Yamaha OPL3-SA");
chip = card->private_data;
spin_lock_init(&chip->reg_lock);
chip->irq = -1;
card->private_free = snd_opl3sa2_free;
*cardp = card;
return 0;
}
static int snd_opl3sa2_probe(struct snd_card *card, int dev)
{
int xirq, xdma1, xdma2;
struct snd_opl3sa2 *chip;
struct snd_wss *wss;
struct snd_opl3 *opl3;
int err;
/* initialise this card from supplied (or default) parameter*/
chip = card->private_data;
chip->ymode = opl3sa3_ymode[dev] & 0x03 ;
chip->port = port[dev];
xirq = irq[dev];
xdma1 = dma1[dev];
xdma2 = dma2[dev];
if (xdma2 < 0)
chip->single_dma = 1;
err = snd_opl3sa2_detect(card);
if (err < 0)
return err;
err = request_irq(xirq, snd_opl3sa2_interrupt, 0,
"OPL3-SA2", card);
if (err) {
snd_printk(KERN_ERR PFX "can't grab IRQ %d\n", xirq);
return -ENODEV;
}
chip->irq = xirq;
err = snd_wss_create(card,
wss_port[dev] + 4, -1,
xirq, xdma1, xdma2,
WSS_HW_OPL3SA2, WSS_HWSHARE_IRQ, &wss);
if (err < 0) {
snd_printd("Oops, WSS not detected at 0x%lx\n", wss_port[dev] + 4);
return err;
}
chip->wss = wss;
err = snd_wss_pcm(wss, 0, NULL);
if (err < 0)
return err;
err = snd_wss_mixer(wss);
if (err < 0)
return err;
err = snd_opl3sa2_mixer(card);
if (err < 0)
return err;
err = snd_wss_timer(wss, 0, NULL);
if (err < 0)
return err;
if (fm_port[dev] >= 0x340 && fm_port[dev] < 0x400) {
if ((err = snd_opl3_create(card, fm_port[dev],
fm_port[dev] + 2,
OPL3_HW_OPL3, 0, &opl3)) < 0)
return err;
if ((err = snd_opl3_timer_new(opl3, 1, 2)) < 0)
return err;
if ((err = snd_opl3_hwdep_new(opl3, 0, 1, &chip->synth)) < 0)
return err;
}
if (midi_port[dev] >= 0x300 && midi_port[dev] < 0x340) {
if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_OPL3SA2,
midi_port[dev],
MPU401_INFO_IRQ_HOOK, -1,
&chip->rmidi)) < 0)
return err;
}
sprintf(card->longname, "%s at 0x%lx, irq %d, dma %d",
card->shortname, chip->port, xirq, xdma1);
if (xdma2 >= 0)
sprintf(card->longname + strlen(card->longname), "&%d", xdma2);
return snd_card_register(card);
}
#ifdef CONFIG_PNP
static int snd_opl3sa2_pnp_detect(struct pnp_dev *pdev,
const struct pnp_device_id *id)
{
static int dev;
int err;
struct snd_card *card;
if (pnp_device_is_isapnp(pdev))
return -ENOENT; /* we have another procedure - card */
for (; dev < SNDRV_CARDS; dev++) {
if (enable[dev] && isapnp[dev])
break;
}
if (dev >= SNDRV_CARDS)
return -ENODEV;
err = snd_opl3sa2_card_new(dev, &card);
if (err < 0)
return err;
if ((err = snd_opl3sa2_pnp(dev, card->private_data, pdev)) < 0) {
snd_card_free(card);
return err;
}
snd_card_set_dev(card, &pdev->dev);
if ((err = snd_opl3sa2_probe(card, dev)) < 0) {
snd_card_free(card);
return err;
}
pnp_set_drvdata(pdev, card);
dev++;
return 0;
}
static void snd_opl3sa2_pnp_remove(struct pnp_dev *pdev)
{
snd_card_free(pnp_get_drvdata(pdev));
pnp_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
static int snd_opl3sa2_pnp_suspend(struct pnp_dev *pdev, pm_message_t state)
{
return snd_opl3sa2_suspend(pnp_get_drvdata(pdev), state);
}
static int snd_opl3sa2_pnp_resume(struct pnp_dev *pdev)
{
return snd_opl3sa2_resume(pnp_get_drvdata(pdev));
}
#endif
static struct pnp_driver opl3sa2_pnp_driver = {
.name = "snd-opl3sa2-pnpbios",
.id_table = snd_opl3sa2_pnpbiosids,
.probe = snd_opl3sa2_pnp_detect,
.remove = snd_opl3sa2_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_opl3sa2_pnp_suspend,
.resume = snd_opl3sa2_pnp_resume,
#endif
};
static int snd_opl3sa2_pnp_cdetect(struct pnp_card_link *pcard,
const struct pnp_card_device_id *id)
{
static int dev;
struct pnp_dev *pdev;
int err;
struct snd_card *card;
pdev = pnp_request_card_device(pcard, id->devs[0].id, NULL);
if (pdev == NULL) {
snd_printk(KERN_ERR PFX "can't get pnp device from id '%s'\n",
id->devs[0].id);
return -EBUSY;
}
for (; dev < SNDRV_CARDS; dev++) {
if (enable[dev] && isapnp[dev])
break;
}
if (dev >= SNDRV_CARDS)
return -ENODEV;
err = snd_opl3sa2_card_new(dev, &card);
if (err < 0)
return err;
if ((err = snd_opl3sa2_pnp(dev, card->private_data, pdev)) < 0) {
snd_card_free(card);
return err;
}
snd_card_set_dev(card, &pdev->dev);
if ((err = snd_opl3sa2_probe(card, dev)) < 0) {
snd_card_free(card);
return err;
}
pnp_set_card_drvdata(pcard, card);
dev++;
return 0;
}
static void snd_opl3sa2_pnp_cremove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
}
#ifdef CONFIG_PM
static int snd_opl3sa2_pnp_csuspend(struct pnp_card_link *pcard, pm_message_t state)
{
return snd_opl3sa2_suspend(pnp_get_card_drvdata(pcard), state);
}
static int snd_opl3sa2_pnp_cresume(struct pnp_card_link *pcard)
{
return snd_opl3sa2_resume(pnp_get_card_drvdata(pcard));
}
#endif
static struct pnp_card_driver opl3sa2_pnpc_driver = {
.flags = PNP_DRIVER_RES_DISABLE,
.name = "snd-opl3sa2-cpnp",
.id_table = snd_opl3sa2_pnpids,
.probe = snd_opl3sa2_pnp_cdetect,
.remove = snd_opl3sa2_pnp_cremove,
#ifdef CONFIG_PM
.suspend = snd_opl3sa2_pnp_csuspend,
.resume = snd_opl3sa2_pnp_cresume,
#endif
};
#endif /* CONFIG_PNP */
static int snd_opl3sa2_isa_match(struct device *pdev,
unsigned int dev)
{
if (!enable[dev])
return 0;
#ifdef CONFIG_PNP
if (isapnp[dev])
return 0;
#endif
if (port[dev] == SNDRV_AUTO_PORT) {
snd_printk(KERN_ERR PFX "specify port\n");
return 0;
}
if (wss_port[dev] == SNDRV_AUTO_PORT) {
snd_printk(KERN_ERR PFX "specify wss_port\n");
return 0;
}
if (fm_port[dev] == SNDRV_AUTO_PORT) {
snd_printk(KERN_ERR PFX "specify fm_port\n");
return 0;
}
if (midi_port[dev] == SNDRV_AUTO_PORT) {
snd_printk(KERN_ERR PFX "specify midi_port\n");
return 0;
}
return 1;
}
static int snd_opl3sa2_isa_probe(struct device *pdev,
unsigned int dev)
{
struct snd_card *card;
int err;
err = snd_opl3sa2_card_new(dev, &card);
if (err < 0)
return err;
snd_card_set_dev(card, pdev);
if ((err = snd_opl3sa2_probe(card, dev)) < 0) {
snd_card_free(card);
return err;
}
dev_set_drvdata(pdev, card);
return 0;
}
static int snd_opl3sa2_isa_remove(struct device *devptr,
unsigned int dev)
{
snd_card_free(dev_get_drvdata(devptr));
dev_set_drvdata(devptr, NULL);
return 0;
}
#ifdef CONFIG_PM
static int snd_opl3sa2_isa_suspend(struct device *dev, unsigned int n,
pm_message_t state)
{
return snd_opl3sa2_suspend(dev_get_drvdata(dev), state);
}
static int snd_opl3sa2_isa_resume(struct device *dev, unsigned int n)
{
return snd_opl3sa2_resume(dev_get_drvdata(dev));
}
#endif
#define DEV_NAME "opl3sa2"
static struct isa_driver snd_opl3sa2_isa_driver = {
.match = snd_opl3sa2_isa_match,
.probe = snd_opl3sa2_isa_probe,
.remove = snd_opl3sa2_isa_remove,
#ifdef CONFIG_PM
.suspend = snd_opl3sa2_isa_suspend,
.resume = snd_opl3sa2_isa_resume,
#endif
.driver = {
.name = DEV_NAME
},
};
static int __init alsa_card_opl3sa2_init(void)
{
int err;
err = isa_register_driver(&snd_opl3sa2_isa_driver, SNDRV_CARDS);
#ifdef CONFIG_PNP
if (!err)
isa_registered = 1;
err = pnp_register_driver(&opl3sa2_pnp_driver);
if (!err)
pnp_registered = 1;
err = pnp_register_card_driver(&opl3sa2_pnpc_driver);
if (!err)
pnpc_registered = 1;
if (isa_registered || pnp_registered)
err = 0;
#endif
return err;
}
static void __exit alsa_card_opl3sa2_exit(void)
{
#ifdef CONFIG_PNP
if (pnpc_registered)
pnp_unregister_card_driver(&opl3sa2_pnpc_driver);
if (pnp_registered)
pnp_unregister_driver(&opl3sa2_pnp_driver);
if (isa_registered)
#endif
isa_unregister_driver(&snd_opl3sa2_isa_driver);
}
module_init(alsa_card_opl3sa2_init)
module_exit(alsa_card_opl3sa2_exit)
| gpl-2.0 |
bju2000/Sense7_Kernel_b2wlj | arch/score/kernel/process.c | 3014 | 4350 | /*
* arch/score/kernel/process.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
/* If or when software machine-restart is implemented, add code here. */
void machine_restart(char *command) {}
/* If or when software machine-halt is implemented, add code here. */
void machine_halt(void) {}
/* If or when software machine-power-off is implemented, add code here. */
void machine_power_off(void) {}
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void __noreturn cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched())
barrier();
schedule_preempt_disabled();
}
}
void ret_from_fork(void);
void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
{
unsigned long status;
/* New thread loses kernel privileges. */
status = regs->cp0_psr & ~(KU_MASK);
status |= KU_USER;
regs->cp0_psr = status;
regs->cp0_epc = pc;
regs->regs[0] = sp;
}
void exit_thread(void) {}
/*
* When a process does an "exec", machine state like FPU and debug
* registers need to be reset. This is a hook function for that.
* Currently we don't have any such state to reset, so this is empty.
*/
void flush_thread(void) {}
/*
* set up the kernel stack and exception frames for a new process
*/
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
p->set_child_tid = NULL;
p->clear_child_tid = NULL;
*childregs = *regs;
childregs->regs[7] = 0; /* Clear error flag */
childregs->regs[4] = 0; /* Child gets zero as return value */
regs->regs[4] = p->pid;
if (childregs->cp0_psr & 0x8) { /* test kernel fork or user fork */
childregs->regs[0] = usp; /* user fork */
} else {
childregs->regs[28] = (unsigned long) ti; /* kernel fork */
childregs->regs[0] = (unsigned long) childregs;
}
p->thread.reg0 = (unsigned long) childregs;
p->thread.reg3 = (unsigned long) ret_from_fork;
p->thread.cp0_psr = 0;
return 0;
}
/* Fill in the fpu structure for a core dump. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
{
return 1;
}
static void __noreturn
kernel_thread_helper(void *unused0, int (*fn)(void *),
void *arg, void *unused1)
{
do_exit(fn(arg));
}
/*
* Create a kernel thread.
*/
long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(®s, 0, sizeof(regs));
regs.regs[6] = (unsigned long) arg;
regs.regs[5] = (unsigned long) fn;
regs.cp0_epc = (unsigned long) kernel_thread_helper;
regs.cp0_psr = (regs.cp0_psr & ~(0x1|0x4|0x8)) | \
((regs.cp0_psr & 0x3) << 2);
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, \
0, ®s, 0, NULL, NULL);
}
unsigned long thread_saved_pc(struct task_struct *tsk)
{
return task_pt_regs(tsk)->cp0_epc;
}
unsigned long get_wchan(struct task_struct *task)
{
if (!task || task == current || task->state == TASK_RUNNING)
return 0;
if (!task_stack_page(task))
return 0;
return task_pt_regs(task)->cp0_epc;
}
unsigned long arch_align_stack(unsigned long sp)
{
return sp;
}
| gpl-2.0 |
dhiru1602/android_kernel_samsung_smdk4412 | drivers/ps3/ps3-vuart.c | 4038 | 31356 | /*
* PS3 virtual uart
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <asm/ps3.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
#include "vuart.h"
MODULE_AUTHOR("Sony Corporation");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PS3 vuart");
/**
* vuart - An inter-partition data link service.
* port 0: PS3 AV Settings.
* port 2: PS3 System Manager.
*
* The vuart provides a bi-directional byte stream data link between logical
* partitions. Its primary role is as a communications link between the guest
* OS and the system policy module. The current HV does not support any
* connections other than those listed.
*/
enum {PORT_COUNT = 3,};
enum vuart_param {
PARAM_TX_TRIGGER = 0,
PARAM_RX_TRIGGER = 1,
PARAM_INTERRUPT_MASK = 2,
PARAM_RX_BUF_SIZE = 3, /* read only */
PARAM_RX_BYTES = 4, /* read only */
PARAM_TX_BUF_SIZE = 5, /* read only */
PARAM_TX_BYTES = 6, /* read only */
PARAM_INTERRUPT_STATUS = 7, /* read only */
};
enum vuart_interrupt_bit {
INTERRUPT_BIT_TX = 0,
INTERRUPT_BIT_RX = 1,
INTERRUPT_BIT_DISCONNECT = 2,
};
enum vuart_interrupt_mask {
INTERRUPT_MASK_TX = 1,
INTERRUPT_MASK_RX = 2,
INTERRUPT_MASK_DISCONNECT = 4,
};
/**
* struct ps3_vuart_port_priv - private vuart device data.
*/
struct ps3_vuart_port_priv {
u64 interrupt_mask;
struct {
spinlock_t lock;
struct list_head head;
} tx_list;
struct {
struct ps3_vuart_work work;
unsigned long bytes_held;
spinlock_t lock;
struct list_head head;
} rx_list;
struct ps3_vuart_stats stats;
};
static struct ps3_vuart_port_priv *to_port_priv(
struct ps3_system_bus_device *dev)
{
BUG_ON(!dev);
BUG_ON(!dev->driver_priv);
return (struct ps3_vuart_port_priv *)dev->driver_priv;
}
/**
* struct ports_bmp - bitmap indicating ports needing service.
*
* A 256 bit read only bitmap indicating ports needing service. Do not write
* to these bits. Must not cross a page boundary.
*/
struct ports_bmp {
u64 status;
u64 unused[3];
} __attribute__((aligned(32)));
#define dump_ports_bmp(_b) _dump_ports_bmp(_b, __func__, __LINE__)
static void __maybe_unused _dump_ports_bmp(
const struct ports_bmp *bmp, const char *func, int line)
{
pr_debug("%s:%d: ports_bmp: %016llxh\n", func, line, bmp->status);
}
#define dump_port_params(_b) _dump_port_params(_b, __func__, __LINE__)
static void __maybe_unused _dump_port_params(unsigned int port_number,
const char *func, int line)
{
#if defined(DEBUG)
static const char *strings[] = {
"tx_trigger ",
"rx_trigger ",
"interrupt_mask ",
"rx_buf_size ",
"rx_bytes ",
"tx_buf_size ",
"tx_bytes ",
"interrupt_status",
};
int result;
unsigned int i;
u64 value;
for (i = 0; i < ARRAY_SIZE(strings); i++) {
result = lv1_get_virtual_uart_param(port_number, i, &value);
if (result) {
pr_debug("%s:%d: port_%u: %s failed: %s\n", func, line,
port_number, strings[i], ps3_result(result));
continue;
}
pr_debug("%s:%d: port_%u: %s = %lxh\n",
func, line, port_number, strings[i], value);
}
#endif
}
struct vuart_triggers {
unsigned long rx;
unsigned long tx;
};
int ps3_vuart_get_triggers(struct ps3_system_bus_device *dev,
struct vuart_triggers *trig)
{
int result;
u64 size;
u64 val;
u64 tx;
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_TX_TRIGGER, &tx);
trig->tx = tx;
if (result) {
dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_BUF_SIZE, &size);
if (result) {
dev_dbg(&dev->core, "%s:%d: tx_buf_size failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_TRIGGER, &val);
if (result) {
dev_dbg(&dev->core, "%s:%d: rx_trigger failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
trig->rx = size - val;
dev_dbg(&dev->core, "%s:%d: tx %lxh, rx %lxh\n", __func__, __LINE__,
trig->tx, trig->rx);
return result;
}
int ps3_vuart_set_triggers(struct ps3_system_bus_device *dev, unsigned int tx,
unsigned int rx)
{
int result;
u64 size;
result = lv1_set_virtual_uart_param(dev->port_number,
PARAM_TX_TRIGGER, tx);
if (result) {
dev_dbg(&dev->core, "%s:%d: tx_trigger failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_BUF_SIZE, &size);
if (result) {
dev_dbg(&dev->core, "%s:%d: tx_buf_size failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = lv1_set_virtual_uart_param(dev->port_number,
PARAM_RX_TRIGGER, size - rx);
if (result) {
dev_dbg(&dev->core, "%s:%d: rx_trigger failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
dev_dbg(&dev->core, "%s:%d: tx %xh, rx %xh\n", __func__, __LINE__,
tx, rx);
return result;
}
static int ps3_vuart_get_rx_bytes_waiting(struct ps3_system_bus_device *dev,
u64 *bytes_waiting)
{
int result;
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_RX_BYTES, bytes_waiting);
if (result)
dev_dbg(&dev->core, "%s:%d: rx_bytes failed: %s\n",
__func__, __LINE__, ps3_result(result));
dev_dbg(&dev->core, "%s:%d: %llxh\n", __func__, __LINE__,
*bytes_waiting);
return result;
}
/**
* ps3_vuart_set_interrupt_mask - Enable/disable the port interrupt sources.
* @dev: The struct ps3_system_bus_device instance.
* @bmp: Logical OR of enum vuart_interrupt_mask values. A zero bit disables.
*/
static int ps3_vuart_set_interrupt_mask(struct ps3_system_bus_device *dev,
unsigned long mask)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
dev_dbg(&dev->core, "%s:%d: %lxh\n", __func__, __LINE__, mask);
priv->interrupt_mask = mask;
result = lv1_set_virtual_uart_param(dev->port_number,
PARAM_INTERRUPT_MASK, priv->interrupt_mask);
if (result)
dev_dbg(&dev->core, "%s:%d: interrupt_mask failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
static int ps3_vuart_get_interrupt_status(struct ps3_system_bus_device *dev,
unsigned long *status)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
u64 tmp;
result = lv1_get_virtual_uart_param(dev->port_number,
PARAM_INTERRUPT_STATUS, &tmp);
if (result)
dev_dbg(&dev->core, "%s:%d: interrupt_status failed: %s\n",
__func__, __LINE__, ps3_result(result));
*status = tmp & priv->interrupt_mask;
dev_dbg(&dev->core, "%s:%d: m %llxh, s %llxh, m&s %lxh\n",
__func__, __LINE__, priv->interrupt_mask, tmp, *status);
return result;
}
int ps3_vuart_enable_interrupt_tx(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_TX) ? 0
: ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
| INTERRUPT_MASK_TX);
}
int ps3_vuart_enable_interrupt_rx(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_RX) ? 0
: ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
| INTERRUPT_MASK_RX);
}
int ps3_vuart_enable_interrupt_disconnect(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT) ? 0
: ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
| INTERRUPT_MASK_DISCONNECT);
}
int ps3_vuart_disable_interrupt_tx(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_TX)
? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
& ~INTERRUPT_MASK_TX) : 0;
}
int ps3_vuart_disable_interrupt_rx(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_RX)
? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
& ~INTERRUPT_MASK_RX) : 0;
}
int ps3_vuart_disable_interrupt_disconnect(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
return (priv->interrupt_mask & INTERRUPT_MASK_DISCONNECT)
? ps3_vuart_set_interrupt_mask(dev, priv->interrupt_mask
& ~INTERRUPT_MASK_DISCONNECT) : 0;
}
/**
* ps3_vuart_raw_write - Low level write helper.
* @dev: The struct ps3_system_bus_device instance.
*
* Do not call ps3_vuart_raw_write directly, use ps3_vuart_write.
*/
static int ps3_vuart_raw_write(struct ps3_system_bus_device *dev,
const void *buf, unsigned int bytes, u64 *bytes_written)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
result = lv1_write_virtual_uart(dev->port_number,
ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_written);
if (result) {
dev_dbg(&dev->core, "%s:%d: lv1_write_virtual_uart failed: "
"%s\n", __func__, __LINE__, ps3_result(result));
return result;
}
priv->stats.bytes_written += *bytes_written;
dev_dbg(&dev->core, "%s:%d: wrote %llxh/%xh=>%lxh\n", __func__, __LINE__,
*bytes_written, bytes, priv->stats.bytes_written);
return result;
}
/**
* ps3_vuart_raw_read - Low level read helper.
* @dev: The struct ps3_system_bus_device instance.
*
* Do not call ps3_vuart_raw_read directly, use ps3_vuart_read.
*/
static int ps3_vuart_raw_read(struct ps3_system_bus_device *dev, void *buf,
unsigned int bytes, u64 *bytes_read)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
dev_dbg(&dev->core, "%s:%d: %xh\n", __func__, __LINE__, bytes);
result = lv1_read_virtual_uart(dev->port_number,
ps3_mm_phys_to_lpar(__pa(buf)), bytes, bytes_read);
if (result) {
dev_dbg(&dev->core, "%s:%d: lv1_read_virtual_uart failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
priv->stats.bytes_read += *bytes_read;
dev_dbg(&dev->core, "%s:%d: read %llxh/%xh=>%lxh\n", __func__, __LINE__,
*bytes_read, bytes, priv->stats.bytes_read);
return result;
}
/**
* ps3_vuart_clear_rx_bytes - Discard bytes received.
* @dev: The struct ps3_system_bus_device instance.
* @bytes: Max byte count to discard, zero = all pending.
*
* Used to clear pending rx interrupt source. Will not block.
*/
void ps3_vuart_clear_rx_bytes(struct ps3_system_bus_device *dev,
unsigned int bytes)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
u64 bytes_waiting;
void *tmp;
result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes_waiting);
BUG_ON(result);
bytes = bytes ? min(bytes, (unsigned int)bytes_waiting) : bytes_waiting;
dev_dbg(&dev->core, "%s:%d: %u\n", __func__, __LINE__, bytes);
if (!bytes)
return;
/* Add some extra space for recently arrived data. */
bytes += 128;
tmp = kmalloc(bytes, GFP_KERNEL);
if (!tmp)
return;
ps3_vuart_raw_read(dev, tmp, bytes, &bytes_waiting);
kfree(tmp);
/* Don't include these bytes in the stats. */
priv->stats.bytes_read -= bytes_waiting;
}
EXPORT_SYMBOL_GPL(ps3_vuart_clear_rx_bytes);
/**
* struct list_buffer - An element for a port device fifo buffer list.
*/
struct list_buffer {
struct list_head link;
const unsigned char *head;
const unsigned char *tail;
unsigned long dbg_number;
unsigned char data[];
};
/**
* ps3_vuart_write - the entry point for writing data to a port
* @dev: The struct ps3_system_bus_device instance.
*
* If the port is idle on entry as much of the incoming data is written to
* the port as the port will accept. Otherwise a list buffer is created
* and any remaning incoming data is copied to that buffer. The buffer is
* then enqueued for transmision via the transmit interrupt.
*/
int ps3_vuart_write(struct ps3_system_bus_device *dev, const void *buf,
unsigned int bytes)
{
static unsigned long dbg_number;
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
struct list_buffer *lb;
dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
bytes, bytes);
spin_lock_irqsave(&priv->tx_list.lock, flags);
if (list_empty(&priv->tx_list.head)) {
u64 bytes_written;
result = ps3_vuart_raw_write(dev, buf, bytes, &bytes_written);
spin_unlock_irqrestore(&priv->tx_list.lock, flags);
if (result) {
dev_dbg(&dev->core,
"%s:%d: ps3_vuart_raw_write failed\n",
__func__, __LINE__);
return result;
}
if (bytes_written == bytes) {
dev_dbg(&dev->core, "%s:%d: wrote %xh bytes\n",
__func__, __LINE__, bytes);
return 0;
}
bytes -= bytes_written;
buf += bytes_written;
} else
spin_unlock_irqrestore(&priv->tx_list.lock, flags);
lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_KERNEL);
if (!lb)
return -ENOMEM;
memcpy(lb->data, buf, bytes);
lb->head = lb->data;
lb->tail = lb->data + bytes;
lb->dbg_number = ++dbg_number;
spin_lock_irqsave(&priv->tx_list.lock, flags);
list_add_tail(&lb->link, &priv->tx_list.head);
ps3_vuart_enable_interrupt_tx(dev);
spin_unlock_irqrestore(&priv->tx_list.lock, flags);
dev_dbg(&dev->core, "%s:%d: queued buf_%lu, %xh bytes\n",
__func__, __LINE__, lb->dbg_number, bytes);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_vuart_write);
/**
* ps3_vuart_queue_rx_bytes - Queue waiting bytes into the buffer list.
* @dev: The struct ps3_system_bus_device instance.
* @bytes_queued: Number of bytes queued to the buffer list.
*
* Must be called with priv->rx_list.lock held.
*/
static int ps3_vuart_queue_rx_bytes(struct ps3_system_bus_device *dev,
u64 *bytes_queued)
{
static unsigned long dbg_number;
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
struct list_buffer *lb;
u64 bytes;
*bytes_queued = 0;
result = ps3_vuart_get_rx_bytes_waiting(dev, &bytes);
BUG_ON(result);
if (result)
return -EIO;
if (!bytes)
return 0;
/* Add some extra space for recently arrived data. */
bytes += 128;
lb = kmalloc(sizeof(struct list_buffer) + bytes, GFP_ATOMIC);
if (!lb)
return -ENOMEM;
ps3_vuart_raw_read(dev, lb->data, bytes, &bytes);
lb->head = lb->data;
lb->tail = lb->data + bytes;
lb->dbg_number = ++dbg_number;
list_add_tail(&lb->link, &priv->rx_list.head);
priv->rx_list.bytes_held += bytes;
dev_dbg(&dev->core, "%s:%d: buf_%lu: queued %llxh bytes\n",
__func__, __LINE__, lb->dbg_number, bytes);
*bytes_queued = bytes;
return 0;
}
/**
* ps3_vuart_read - The entry point for reading data from a port.
*
* Queue data waiting at the port, and if enough bytes to satisfy the request
* are held in the buffer list those bytes are dequeued and copied to the
* caller's buffer. Emptied list buffers are retiered. If the request cannot
* be statified by bytes held in the list buffers -EAGAIN is returned.
*/
int ps3_vuart_read(struct ps3_system_bus_device *dev, void *buf,
unsigned int bytes)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
struct list_buffer *lb, *n;
unsigned long bytes_read;
dev_dbg(&dev->core, "%s:%d: %u(%xh) bytes\n", __func__, __LINE__,
bytes, bytes);
spin_lock_irqsave(&priv->rx_list.lock, flags);
/* Queue rx bytes here for polled reads. */
while (priv->rx_list.bytes_held < bytes) {
u64 tmp;
result = ps3_vuart_queue_rx_bytes(dev, &tmp);
if (result || !tmp) {
dev_dbg(&dev->core, "%s:%d: starved for %lxh bytes\n",
__func__, __LINE__,
bytes - priv->rx_list.bytes_held);
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return -EAGAIN;
}
}
list_for_each_entry_safe(lb, n, &priv->rx_list.head, link) {
bytes_read = min((unsigned int)(lb->tail - lb->head), bytes);
memcpy(buf, lb->head, bytes_read);
buf += bytes_read;
bytes -= bytes_read;
priv->rx_list.bytes_held -= bytes_read;
if (bytes_read < lb->tail - lb->head) {
lb->head += bytes_read;
dev_dbg(&dev->core, "%s:%d: buf_%lu: dequeued %lxh "
"bytes\n", __func__, __LINE__, lb->dbg_number,
bytes_read);
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return 0;
}
dev_dbg(&dev->core, "%s:%d: buf_%lu: free, dequeued %lxh "
"bytes\n", __func__, __LINE__, lb->dbg_number,
bytes_read);
list_del(&lb->link);
kfree(lb);
}
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_vuart_read);
/**
* ps3_vuart_work - Asynchronous read handler.
*/
static void ps3_vuart_work(struct work_struct *work)
{
struct ps3_system_bus_device *dev =
ps3_vuart_work_to_system_bus_dev(work);
struct ps3_vuart_port_driver *drv =
ps3_system_bus_dev_to_vuart_drv(dev);
BUG_ON(!drv);
drv->work(dev);
}
int ps3_vuart_read_async(struct ps3_system_bus_device *dev, unsigned int bytes)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
if (priv->rx_list.work.trigger) {
dev_dbg(&dev->core, "%s:%d: warning, multiple calls\n",
__func__, __LINE__);
return -EAGAIN;
}
BUG_ON(!bytes);
PREPARE_WORK(&priv->rx_list.work.work, ps3_vuart_work);
spin_lock_irqsave(&priv->rx_list.lock, flags);
if (priv->rx_list.bytes_held >= bytes) {
dev_dbg(&dev->core, "%s:%d: schedule_work %xh bytes\n",
__func__, __LINE__, bytes);
schedule_work(&priv->rx_list.work.work);
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return 0;
}
priv->rx_list.work.trigger = bytes;
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
dev_dbg(&dev->core, "%s:%d: waiting for %u(%xh) bytes\n", __func__,
__LINE__, bytes, bytes);
return 0;
}
EXPORT_SYMBOL_GPL(ps3_vuart_read_async);
void ps3_vuart_cancel_async(struct ps3_system_bus_device *dev)
{
to_port_priv(dev)->rx_list.work.trigger = 0;
}
EXPORT_SYMBOL_GPL(ps3_vuart_cancel_async);
/**
* ps3_vuart_handle_interrupt_tx - third stage transmit interrupt handler
*
* Services the transmit interrupt for the port. Writes as much data from the
* buffer list as the port will accept. Retires any emptied list buffers and
* adjusts the final list buffer state for a partial write.
*/
static int ps3_vuart_handle_interrupt_tx(struct ps3_system_bus_device *dev)
{
int result = 0;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
struct list_buffer *lb, *n;
unsigned long bytes_total = 0;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
spin_lock_irqsave(&priv->tx_list.lock, flags);
list_for_each_entry_safe(lb, n, &priv->tx_list.head, link) {
u64 bytes_written;
result = ps3_vuart_raw_write(dev, lb->head, lb->tail - lb->head,
&bytes_written);
if (result) {
dev_dbg(&dev->core,
"%s:%d: ps3_vuart_raw_write failed\n",
__func__, __LINE__);
break;
}
bytes_total += bytes_written;
if (bytes_written < lb->tail - lb->head) {
lb->head += bytes_written;
dev_dbg(&dev->core,
"%s:%d cleared buf_%lu, %llxh bytes\n",
__func__, __LINE__, lb->dbg_number,
bytes_written);
goto port_full;
}
dev_dbg(&dev->core, "%s:%d free buf_%lu\n", __func__, __LINE__,
lb->dbg_number);
list_del(&lb->link);
kfree(lb);
}
ps3_vuart_disable_interrupt_tx(dev);
port_full:
spin_unlock_irqrestore(&priv->tx_list.lock, flags);
dev_dbg(&dev->core, "%s:%d wrote %lxh bytes total\n",
__func__, __LINE__, bytes_total);
return result;
}
/**
* ps3_vuart_handle_interrupt_rx - third stage receive interrupt handler
*
* Services the receive interrupt for the port. Creates a list buffer and
* copies all waiting port data to that buffer and enqueues the buffer in the
* buffer list. Buffer list data is dequeued via ps3_vuart_read.
*/
static int ps3_vuart_handle_interrupt_rx(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long flags;
u64 bytes;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
spin_lock_irqsave(&priv->rx_list.lock, flags);
result = ps3_vuart_queue_rx_bytes(dev, &bytes);
if (result) {
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return result;
}
if (priv->rx_list.work.trigger && priv->rx_list.bytes_held
>= priv->rx_list.work.trigger) {
dev_dbg(&dev->core, "%s:%d: schedule_work %lxh bytes\n",
__func__, __LINE__, priv->rx_list.work.trigger);
priv->rx_list.work.trigger = 0;
schedule_work(&priv->rx_list.work.work);
}
spin_unlock_irqrestore(&priv->rx_list.lock, flags);
return result;
}
static int ps3_vuart_handle_interrupt_disconnect(
struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
BUG_ON("no support");
return -1;
}
/**
* ps3_vuart_handle_port_interrupt - second stage interrupt handler
*
* Services any pending interrupt types for the port. Passes control to the
* third stage type specific interrupt handler. Returns control to the first
* stage handler after one iteration.
*/
static int ps3_vuart_handle_port_interrupt(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
unsigned long status;
result = ps3_vuart_get_interrupt_status(dev, &status);
if (result)
return result;
dev_dbg(&dev->core, "%s:%d: status: %lxh\n", __func__, __LINE__,
status);
if (status & INTERRUPT_MASK_DISCONNECT) {
priv->stats.disconnect_interrupts++;
result = ps3_vuart_handle_interrupt_disconnect(dev);
if (result)
ps3_vuart_disable_interrupt_disconnect(dev);
}
if (status & INTERRUPT_MASK_TX) {
priv->stats.tx_interrupts++;
result = ps3_vuart_handle_interrupt_tx(dev);
if (result)
ps3_vuart_disable_interrupt_tx(dev);
}
if (status & INTERRUPT_MASK_RX) {
priv->stats.rx_interrupts++;
result = ps3_vuart_handle_interrupt_rx(dev);
if (result)
ps3_vuart_disable_interrupt_rx(dev);
}
return 0;
}
struct vuart_bus_priv {
struct ports_bmp *bmp;
unsigned int virq;
struct mutex probe_mutex;
int use_count;
struct ps3_system_bus_device *devices[PORT_COUNT];
} static vuart_bus_priv;
/**
* ps3_vuart_irq_handler - first stage interrupt handler
*
* Loops finding any interrupting port and its associated instance data.
* Passes control to the second stage port specific interrupt handler. Loops
* until all outstanding interrupts are serviced.
*/
static irqreturn_t ps3_vuart_irq_handler(int irq, void *_private)
{
struct vuart_bus_priv *bus_priv = _private;
BUG_ON(!bus_priv);
while (1) {
unsigned int port;
dump_ports_bmp(bus_priv->bmp);
port = (BITS_PER_LONG - 1) - __ilog2(bus_priv->bmp->status);
if (port == BITS_PER_LONG)
break;
BUG_ON(port >= PORT_COUNT);
BUG_ON(!bus_priv->devices[port]);
ps3_vuart_handle_port_interrupt(bus_priv->devices[port]);
}
return IRQ_HANDLED;
}
static int ps3_vuart_bus_interrupt_get(void)
{
int result;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
vuart_bus_priv.use_count++;
BUG_ON(vuart_bus_priv.use_count > 2);
if (vuart_bus_priv.use_count != 1)
return 0;
BUG_ON(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = kzalloc(sizeof(struct ports_bmp), GFP_KERNEL);
if (!vuart_bus_priv.bmp) {
pr_debug("%s:%d: kzalloc failed.\n", __func__, __LINE__);
result = -ENOMEM;
goto fail_bmp_malloc;
}
result = ps3_vuart_irq_setup(PS3_BINDING_CPU_ANY, vuart_bus_priv.bmp,
&vuart_bus_priv.virq);
if (result) {
pr_debug("%s:%d: ps3_vuart_irq_setup failed (%d)\n",
__func__, __LINE__, result);
result = -EPERM;
goto fail_alloc_irq;
}
result = request_irq(vuart_bus_priv.virq, ps3_vuart_irq_handler,
IRQF_DISABLED, "vuart", &vuart_bus_priv);
if (result) {
pr_debug("%s:%d: request_irq failed (%d)\n",
__func__, __LINE__, result);
goto fail_request_irq;
}
pr_debug(" <- %s:%d: ok\n", __func__, __LINE__);
return result;
fail_request_irq:
ps3_vuart_irq_destroy(vuart_bus_priv.virq);
vuart_bus_priv.virq = NO_IRQ;
fail_alloc_irq:
kfree(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = NULL;
fail_bmp_malloc:
vuart_bus_priv.use_count--;
pr_debug(" <- %s:%d: failed\n", __func__, __LINE__);
return result;
}
static int ps3_vuart_bus_interrupt_put(void)
{
pr_debug(" -> %s:%d\n", __func__, __LINE__);
vuart_bus_priv.use_count--;
BUG_ON(vuart_bus_priv.use_count < 0);
if (vuart_bus_priv.use_count != 0)
return 0;
free_irq(vuart_bus_priv.virq, &vuart_bus_priv);
ps3_vuart_irq_destroy(vuart_bus_priv.virq);
vuart_bus_priv.virq = NO_IRQ;
kfree(vuart_bus_priv.bmp);
vuart_bus_priv.bmp = NULL;
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
}
static int ps3_vuart_probe(struct ps3_system_bus_device *dev)
{
int result;
struct ps3_vuart_port_driver *drv;
struct ps3_vuart_port_priv *priv = NULL;
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
drv = ps3_system_bus_dev_to_vuart_drv(dev);
dev_dbg(&dev->core, "%s:%d: (%s)\n", __func__, __LINE__,
drv->core.core.name);
BUG_ON(!drv);
if (dev->port_number >= PORT_COUNT) {
BUG();
return -EINVAL;
}
mutex_lock(&vuart_bus_priv.probe_mutex);
result = ps3_vuart_bus_interrupt_get();
if (result)
goto fail_setup_interrupt;
if (vuart_bus_priv.devices[dev->port_number]) {
dev_dbg(&dev->core, "%s:%d: port busy (%d)\n", __func__,
__LINE__, dev->port_number);
result = -EBUSY;
goto fail_busy;
}
vuart_bus_priv.devices[dev->port_number] = dev;
/* Setup dev->driver_priv. */
dev->driver_priv = kzalloc(sizeof(struct ps3_vuart_port_priv),
GFP_KERNEL);
if (!dev->driver_priv) {
result = -ENOMEM;
goto fail_dev_malloc;
}
priv = to_port_priv(dev);
INIT_LIST_HEAD(&priv->tx_list.head);
spin_lock_init(&priv->tx_list.lock);
INIT_LIST_HEAD(&priv->rx_list.head);
spin_lock_init(&priv->rx_list.lock);
INIT_WORK(&priv->rx_list.work.work, NULL);
priv->rx_list.work.trigger = 0;
priv->rx_list.work.dev = dev;
/* clear stale pending interrupts */
ps3_vuart_clear_rx_bytes(dev, 0);
ps3_vuart_set_interrupt_mask(dev, INTERRUPT_MASK_RX);
ps3_vuart_set_triggers(dev, 1, 1);
if (drv->probe)
result = drv->probe(dev);
else {
result = 0;
dev_info(&dev->core, "%s:%d: no probe method\n", __func__,
__LINE__);
}
if (result) {
dev_dbg(&dev->core, "%s:%d: drv->probe failed\n",
__func__, __LINE__);
goto fail_probe;
}
mutex_unlock(&vuart_bus_priv.probe_mutex);
return result;
fail_probe:
ps3_vuart_set_interrupt_mask(dev, 0);
kfree(dev->driver_priv);
dev->driver_priv = NULL;
fail_dev_malloc:
vuart_bus_priv.devices[dev->port_number] = NULL;
fail_busy:
ps3_vuart_bus_interrupt_put();
fail_setup_interrupt:
mutex_unlock(&vuart_bus_priv.probe_mutex);
dev_dbg(&dev->core, "%s:%d: failed\n", __func__, __LINE__);
return result;
}
/**
* ps3_vuart_cleanup - common cleanup helper.
* @dev: The struct ps3_system_bus_device instance.
*
* Cleans interrupts and HV resources. Must be called with
* vuart_bus_priv.probe_mutex held. Used by ps3_vuart_remove and
* ps3_vuart_shutdown. After this call, polled reading will still work.
*/
static int ps3_vuart_cleanup(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, "%s:%d\n", __func__, __LINE__);
ps3_vuart_cancel_async(dev);
ps3_vuart_set_interrupt_mask(dev, 0);
ps3_vuart_bus_interrupt_put();
return 0;
}
/**
* ps3_vuart_remove - Completely clean the device instance.
* @dev: The struct ps3_system_bus_device instance.
*
* Cleans all memory, interrupts and HV resources. After this call the
* device can no longer be used.
*/
static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
struct ps3_vuart_port_driver *drv;
BUG_ON(!dev);
mutex_lock(&vuart_bus_priv.probe_mutex);
dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
dev->match_id);
if (!dev->core.driver) {
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
return 0;
}
drv = ps3_system_bus_dev_to_vuart_drv(dev);
BUG_ON(!drv);
if (drv->remove) {
drv->remove(dev);
} else {
dev_dbg(&dev->core, "%s:%d: no remove method\n", __func__,
__LINE__);
BUG();
}
ps3_vuart_cleanup(dev);
vuart_bus_priv.devices[dev->port_number] = NULL;
kfree(priv);
priv = NULL;
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
return 0;
}
/**
* ps3_vuart_shutdown - Cleans interrupts and HV resources.
* @dev: The struct ps3_system_bus_device instance.
*
* Cleans interrupts and HV resources. After this call the
* device can still be used in polling mode. This behavior required
* by sys-manager to be able to complete the device power operation
* sequence.
*/
static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_driver *drv;
BUG_ON(!dev);
mutex_lock(&vuart_bus_priv.probe_mutex);
dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
dev->match_id);
if (!dev->core.driver) {
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
return 0;
}
drv = ps3_system_bus_dev_to_vuart_drv(dev);
BUG_ON(!drv);
if (drv->shutdown)
drv->shutdown(dev);
else if (drv->remove) {
dev_dbg(&dev->core, "%s:%d: no shutdown, calling remove\n",
__func__, __LINE__);
drv->remove(dev);
} else {
dev_dbg(&dev->core, "%s:%d: no shutdown method\n", __func__,
__LINE__);
BUG();
}
ps3_vuart_cleanup(dev);
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
return 0;
}
static int __init ps3_vuart_bus_init(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
mutex_init(&vuart_bus_priv.probe_mutex);
return 0;
}
static void __exit ps3_vuart_bus_exit(void)
{
pr_debug("%s:%d:\n", __func__, __LINE__);
}
core_initcall(ps3_vuart_bus_init);
module_exit(ps3_vuart_bus_exit);
/**
* ps3_vuart_port_driver_register - Add a vuart port device driver.
*/
int ps3_vuart_port_driver_register(struct ps3_vuart_port_driver *drv)
{
int result;
pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.core.name);
BUG_ON(!drv->core.match_id);
BUG_ON(!drv->core.core.name);
drv->core.probe = ps3_vuart_probe;
drv->core.remove = ps3_vuart_remove;
drv->core.shutdown = ps3_vuart_shutdown;
result = ps3_system_bus_driver_register(&drv->core);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_register);
/**
* ps3_vuart_port_driver_unregister - Remove a vuart port device driver.
*/
void ps3_vuart_port_driver_unregister(struct ps3_vuart_port_driver *drv)
{
pr_debug("%s:%d: (%s)\n", __func__, __LINE__, drv->core.core.name);
ps3_system_bus_driver_unregister(&drv->core);
}
EXPORT_SYMBOL_GPL(ps3_vuart_port_driver_unregister);
| gpl-2.0 |
zeroprobe/ZeroGolf-Overclocked | arch/powerpc/platforms/86xx/gef_ppc9a.c | 4038 | 5803 | /*
* GE PPC9A board support
*
* Author: Martyn Welch <martyn.welch@ge.com>
*
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Based on: mpc86xx_hpcn.c (MPC86xx HPCN board specific routines)
* Copyright 2006 Freescale Semiconductor Inc.
*
* NEC fixup adapted from arch/mips/pci/fixup-lm2e.c
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of_platform.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/nvram.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
#include "mpc86xx.h"
#include "gef_pic.h"
#undef DEBUG
#ifdef DEBUG
#define DBG (fmt...) do { printk(KERN_ERR "PPC9A: " fmt); } while (0)
#else
#define DBG (fmt...) do { } while (0)
#endif
void __iomem *ppc9a_regs;
static void __init gef_ppc9a_init_irq(void)
{
struct device_node *cascade_node = NULL;
mpc86xx_init_irq();
/*
* There is a simple interrupt handler in the main FPGA, this needs
* to be cascaded into the MPIC
*/
cascade_node = of_find_compatible_node(NULL, NULL, "gef,fpga-pic-1.00");
if (!cascade_node) {
printk(KERN_WARNING "PPC9A: No FPGA PIC\n");
return;
}
gef_pic_init(cascade_node);
of_node_put(cascade_node);
}
static void __init gef_ppc9a_setup_arch(void)
{
struct device_node *regs;
#ifdef CONFIG_PCI
struct device_node *np;
for_each_compatible_node(np, "pci", "fsl,mpc8641-pcie") {
fsl_add_bridge(np, 1);
}
#endif
printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n");
#ifdef CONFIG_SMP
mpc86xx_smp_init();
#endif
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "gef,ppc9a-fpga-regs");
if (regs) {
ppc9a_regs = of_iomap(regs, 0);
if (ppc9a_regs == NULL)
printk(KERN_WARNING "Unable to map board registers\n");
of_node_put(regs);
}
#if defined(CONFIG_MMIO_NVRAM)
mmio_nvram_init();
#endif
}
/* Return the PCB revision */
static unsigned int gef_ppc9a_get_pcb_rev(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs);
return (reg >> 16) & 0xff;
}
/* Return the board (software) revision */
static unsigned int gef_ppc9a_get_board_rev(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs);
return (reg >> 8) & 0xff;
}
/* Return the FPGA revision */
static unsigned int gef_ppc9a_get_fpga_rev(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs);
return reg & 0xf;
}
/* Return VME Geographical Address */
static unsigned int gef_ppc9a_get_vme_geo_addr(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs + 0x4);
return reg & 0x1f;
}
/* Return VME System Controller Status */
static unsigned int gef_ppc9a_get_vme_is_syscon(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs + 0x4);
return (reg >> 9) & 0x1;
}
static void gef_ppc9a_show_cpuinfo(struct seq_file *m)
{
uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
seq_printf(m, "Revision\t: %u%c\n", gef_ppc9a_get_pcb_rev(),
('A' + gef_ppc9a_get_board_rev()));
seq_printf(m, "FPGA Revision\t: %u\n", gef_ppc9a_get_fpga_rev());
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
seq_printf(m, "VME geo. addr\t: %u\n", gef_ppc9a_get_vme_geo_addr());
seq_printf(m, "VME syscon\t: %s\n",
gef_ppc9a_get_vme_is_syscon() ? "yes" : "no");
}
static void __init gef_ppc9a_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
/* Do not do the fixup on other platforms! */
if (!machine_is(gef_ppc9a))
return;
printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
/* Ensure ports 1, 2, 3, 4 & 5 are enabled */
pci_read_config_dword(pdev, 0xe0, &val);
pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x5);
/* System clock is 48-MHz Oscillator and EHCI Enabled. */
pci_write_config_dword(pdev, 0xe4, 1 << 5);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
gef_ppc9a_nec_fixup);
/*
* Called very early, device-tree isn't unflattened
*
* This function is called to determine whether the BSP is compatible with the
* supplied device-tree, which is assumed to be the correct one for the actual
* board. It is expected thati, in the future, a kernel may support multiple
* boards.
*/
static int __init gef_ppc9a_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "gef,ppc9a"))
return 1;
return 0;
}
static long __init mpc86xx_time_init(void)
{
unsigned int temp;
/* Set the time base to zero */
mtspr(SPRN_TBWL, 0);
mtspr(SPRN_TBWU, 0);
temp = mfspr(SPRN_HID0);
temp |= HID0_TBEN;
mtspr(SPRN_HID0, temp);
asm volatile("isync");
return 0;
}
static __initdata struct of_device_id of_bus_ids[] = {
{ .compatible = "simple-bus", },
{ .compatible = "gianfar", },
{},
};
static int __init declare_of_platform_devices(void)
{
printk(KERN_DEBUG "Probe platform devices\n");
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(gef_ppc9a, declare_of_platform_devices);
define_machine(gef_ppc9a) {
.name = "GE PPC9A",
.probe = gef_ppc9a_probe,
.setup_arch = gef_ppc9a_setup_arch,
.init_IRQ = gef_ppc9a_init_irq,
.show_cpuinfo = gef_ppc9a_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.time_init = mpc86xx_time_init,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
};
| gpl-2.0 |
mifl/android_kernel_pantech_vegapvw | net/netfilter/nf_conntrack_acct.c | 4550 | 3147 | /* Accouting handling for netfilter. */
/*
* (C) 2008 Krzysztof Piotr Oledzki <ole@ans.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
static bool nf_ct_acct __read_mostly;
module_param_named(acct, nf_ct_acct, bool, 0644);
MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
#ifdef CONFIG_SYSCTL
static struct ctl_table acct_sysctl_table[] = {
{
.procname = "nf_conntrack_acct",
.data = &init_net.ct.sysctl_acct,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{}
};
#endif /* CONFIG_SYSCTL */
unsigned int
seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
{
struct nf_conn_counter *acct;
acct = nf_conn_acct_find(ct);
if (!acct)
return 0;
return seq_printf(s, "packets=%llu bytes=%llu ",
(unsigned long long)atomic64_read(&acct[dir].packets),
(unsigned long long)atomic64_read(&acct[dir].bytes));
};
EXPORT_SYMBOL_GPL(seq_print_acct);
static struct nf_ct_ext_type acct_extend __read_mostly = {
.len = sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]),
.align = __alignof__(struct nf_conn_counter[IP_CT_DIR_MAX]),
.id = NF_CT_EXT_ACCT,
};
#ifdef CONFIG_SYSCTL
static int nf_conntrack_acct_init_sysctl(struct net *net)
{
struct ctl_table *table;
table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
GFP_KERNEL);
if (!table)
goto out;
table[0].data = &net->ct.sysctl_acct;
net->ct.acct_sysctl_header = register_net_sysctl_table(net,
nf_net_netfilter_sysctl_path, table);
if (!net->ct.acct_sysctl_header) {
printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n");
goto out_register;
}
return 0;
out_register:
kfree(table);
out:
return -ENOMEM;
}
static void nf_conntrack_acct_fini_sysctl(struct net *net)
{
struct ctl_table *table;
table = net->ct.acct_sysctl_header->ctl_table_arg;
unregister_net_sysctl_table(net->ct.acct_sysctl_header);
kfree(table);
}
#else
static int nf_conntrack_acct_init_sysctl(struct net *net)
{
return 0;
}
static void nf_conntrack_acct_fini_sysctl(struct net *net)
{
}
#endif
int nf_conntrack_acct_init(struct net *net)
{
int ret;
net->ct.sysctl_acct = nf_ct_acct;
if (net_eq(net, &init_net)) {
ret = nf_ct_extend_register(&acct_extend);
if (ret < 0) {
printk(KERN_ERR "nf_conntrack_acct: Unable to register extension\n");
goto out_extend_register;
}
}
ret = nf_conntrack_acct_init_sysctl(net);
if (ret < 0)
goto out_sysctl;
return 0;
out_sysctl:
if (net_eq(net, &init_net))
nf_ct_extend_unregister(&acct_extend);
out_extend_register:
return ret;
}
void nf_conntrack_acct_fini(struct net *net)
{
nf_conntrack_acct_fini_sysctl(net);
if (net_eq(net, &init_net))
nf_ct_extend_unregister(&acct_extend);
}
| gpl-2.0 |
Tkkg1994/IronKernel | net/atm/pppoatm.c | 4806 | 11092 | /* net/atm/pppoatm.c - RFC2364 PPP over ATM/AAL5 */
/* Copyright 1999-2000 by Mitchell Blank Jr */
/* Based on clip.c; 1995-1999 by Werner Almesberger, EPFL LRC/ICA */
/* And on ppp_async.c; Copyright 1999 Paul Mackerras */
/* And help from Jens Axboe */
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This driver provides the encapsulation and framing for sending
* and receiving PPP frames in ATM AAL5 PDUs.
*/
/*
* One shortcoming of this driver is that it does not comply with
* section 8 of RFC2364 - we are supposed to detect a change
* in encapsulation and immediately abort the connection (in order
* to avoid a black-hole being created if our peer loses state
* and changes encapsulation unilaterally. However, since the
* ppp_generic layer actually does the decapsulation, we need
* a way of notifying it when we _think_ there might be a problem)
* There's two cases:
* 1. LLC-encapsulation was missing when it was enabled. In
* this case, we should tell the upper layer "tear down
* this session if this skb looks ok to you"
* 2. LLC-encapsulation was present when it was disabled. Then
* we need to tell the upper layer "this packet may be
* ok, but if its in error tear down the session"
* These hooks are not yet available in ppp_generic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <linux/capability.h>
#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/ppp_channel.h>
#include <linux/atmppp.h>
#include "common.h"
enum pppoatm_encaps {
e_autodetect = PPPOATM_ENCAPS_AUTODETECT,
e_vc = PPPOATM_ENCAPS_VC,
e_llc = PPPOATM_ENCAPS_LLC,
};
struct pppoatm_vcc {
struct atm_vcc *atmvcc; /* VCC descriptor */
void (*old_push)(struct atm_vcc *, struct sk_buff *);
void (*old_pop)(struct atm_vcc *, struct sk_buff *);
/* keep old push/pop for detaching */
enum pppoatm_encaps encaps;
int flags; /* SC_COMP_PROT - compress protocol */
struct ppp_channel chan; /* interface to generic ppp layer */
struct tasklet_struct wakeup_tasklet;
};
/*
* Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
* ID (0xC021) used in autodetection
*/
static const unsigned char pppllc[6] = { 0xFE, 0xFE, 0x03, 0xCF, 0xC0, 0x21 };
#define LLC_LEN (4)
static inline struct pppoatm_vcc *atmvcc_to_pvcc(const struct atm_vcc *atmvcc)
{
return (struct pppoatm_vcc *) (atmvcc->user_back);
}
static inline struct pppoatm_vcc *chan_to_pvcc(const struct ppp_channel *chan)
{
return (struct pppoatm_vcc *) (chan->private);
}
/*
* We can't do this directly from our _pop handler, since the ppp code
* doesn't want to be called in interrupt context, so we do it from
* a tasklet
*/
static void pppoatm_wakeup_sender(unsigned long arg)
{
ppp_output_wakeup((struct ppp_channel *) arg);
}
/*
* This gets called every time the ATM card has finished sending our
* skb. The ->old_pop will take care up normal atm flow control,
* but we also need to wake up the device if we blocked it
*/
static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
pvcc->old_pop(atmvcc, skb);
/*
* We don't really always want to do this since it's
* really inefficient - it would be much better if we could
* test if we had actually throttled the generic layer.
* Unfortunately then there would be a nasty SMP race where
* we could clear that flag just as we refuse another packet.
* For now we do the safe thing.
*/
tasklet_schedule(&pvcc->wakeup_tasklet);
}
/*
* Unbind from PPP - currently we only do this when closing the socket,
* but we could put this into an ioctl if need be
*/
static void pppoatm_unassign_vcc(struct atm_vcc *atmvcc)
{
struct pppoatm_vcc *pvcc;
pvcc = atmvcc_to_pvcc(atmvcc);
atmvcc->push = pvcc->old_push;
atmvcc->pop = pvcc->old_pop;
tasklet_kill(&pvcc->wakeup_tasklet);
ppp_unregister_channel(&pvcc->chan);
atmvcc->user_back = NULL;
kfree(pvcc);
/* Gee, I hope we have the big kernel lock here... */
module_put(THIS_MODULE);
}
/* Called when an AAL5 PDU comes in */
static void pppoatm_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
pr_debug("\n");
if (skb == NULL) { /* VCC was closed */
pr_debug("removing ATMPPP VCC %p\n", pvcc);
pppoatm_unassign_vcc(atmvcc);
atmvcc->push(atmvcc, NULL); /* Pass along bad news */
return;
}
atm_return(atmvcc, skb->truesize);
switch (pvcc->encaps) {
case e_llc:
if (skb->len < LLC_LEN ||
memcmp(skb->data, pppllc, LLC_LEN))
goto error;
skb_pull(skb, LLC_LEN);
break;
case e_autodetect:
if (pvcc->chan.ppp == NULL) { /* Not bound yet! */
kfree_skb(skb);
return;
}
if (skb->len >= sizeof(pppllc) &&
!memcmp(skb->data, pppllc, sizeof(pppllc))) {
pvcc->encaps = e_llc;
skb_pull(skb, LLC_LEN);
break;
}
if (skb->len >= (sizeof(pppllc) - LLC_LEN) &&
!memcmp(skb->data, &pppllc[LLC_LEN],
sizeof(pppllc) - LLC_LEN)) {
pvcc->encaps = e_vc;
pvcc->chan.mtu += LLC_LEN;
break;
}
pr_debug("Couldn't autodetect yet (skb: %02X %02X %02X %02X %02X %02X)\n",
skb->data[0], skb->data[1], skb->data[2],
skb->data[3], skb->data[4], skb->data[5]);
goto error;
case e_vc:
break;
}
ppp_input(&pvcc->chan, skb);
return;
error:
kfree_skb(skb);
ppp_input_error(&pvcc->chan, 0);
}
/*
* Called by the ppp_generic.c to send a packet - returns true if packet
* was accepted. If we return false, then it's our job to call
* ppp_output_wakeup(chan) when we're feeling more up to it.
* Note that in the ENOMEM case (as opposed to the !atm_may_send case)
* we should really drop the packet, but the generic layer doesn't
* support this yet. We just return 'DROP_PACKET' which we actually define
* as success, just to be clear what we're really doing.
*/
#define DROP_PACKET 1
static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
{
struct pppoatm_vcc *pvcc = chan_to_pvcc(chan);
ATM_SKB(skb)->vcc = pvcc->atmvcc;
pr_debug("(skb=0x%p, vcc=0x%p)\n", skb, pvcc->atmvcc);
if (skb->data[0] == '\0' && (pvcc->flags & SC_COMP_PROT))
(void) skb_pull(skb, 1);
switch (pvcc->encaps) { /* LLC encapsulation needed */
case e_llc:
if (skb_headroom(skb) < LLC_LEN) {
struct sk_buff *n;
n = skb_realloc_headroom(skb, LLC_LEN);
if (n != NULL &&
!atm_may_send(pvcc->atmvcc, n->truesize)) {
kfree_skb(n);
goto nospace;
}
kfree_skb(skb);
skb = n;
if (skb == NULL)
return DROP_PACKET;
} else if (!atm_may_send(pvcc->atmvcc, skb->truesize))
goto nospace;
memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
break;
case e_vc:
if (!atm_may_send(pvcc->atmvcc, skb->truesize))
goto nospace;
break;
case e_autodetect:
pr_debug("Trying to send without setting encaps!\n");
kfree_skb(skb);
return 1;
}
atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
return ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
? DROP_PACKET : 1;
nospace:
/*
* We don't have space to send this SKB now, but we might have
* already applied SC_COMP_PROT compression, so may need to undo
*/
if ((pvcc->flags & SC_COMP_PROT) && skb_headroom(skb) > 0 &&
skb->data[-1] == '\0')
(void) skb_push(skb, 1);
return 0;
}
/* This handles ioctls sent to the /dev/ppp interface */
static int pppoatm_devppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case PPPIOCGFLAGS:
return put_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
? -EFAULT : 0;
case PPPIOCSFLAGS:
return get_user(chan_to_pvcc(chan)->flags, (int __user *) arg)
? -EFAULT : 0;
}
return -ENOTTY;
}
static const struct ppp_channel_ops pppoatm_ops = {
.start_xmit = pppoatm_send,
.ioctl = pppoatm_devppp_ioctl,
};
static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
{
struct atm_backend_ppp be;
struct pppoatm_vcc *pvcc;
int err;
/*
* Each PPPoATM instance has its own tasklet - this is just a
* prototypical one used to initialize them
*/
static const DECLARE_TASKLET(tasklet_proto, pppoatm_wakeup_sender, 0);
if (copy_from_user(&be, arg, sizeof be))
return -EFAULT;
if (be.encaps != PPPOATM_ENCAPS_AUTODETECT &&
be.encaps != PPPOATM_ENCAPS_VC && be.encaps != PPPOATM_ENCAPS_LLC)
return -EINVAL;
pvcc = kzalloc(sizeof(*pvcc), GFP_KERNEL);
if (pvcc == NULL)
return -ENOMEM;
pvcc->atmvcc = atmvcc;
pvcc->old_push = atmvcc->push;
pvcc->old_pop = atmvcc->pop;
pvcc->encaps = (enum pppoatm_encaps) be.encaps;
pvcc->chan.private = pvcc;
pvcc->chan.ops = &pppoatm_ops;
pvcc->chan.mtu = atmvcc->qos.txtp.max_sdu - PPP_HDRLEN -
(be.encaps == e_vc ? 0 : LLC_LEN);
pvcc->wakeup_tasklet = tasklet_proto;
pvcc->wakeup_tasklet.data = (unsigned long) &pvcc->chan;
err = ppp_register_channel(&pvcc->chan);
if (err != 0) {
kfree(pvcc);
return err;
}
atmvcc->user_back = pvcc;
atmvcc->push = pppoatm_push;
atmvcc->pop = pppoatm_pop;
__module_get(THIS_MODULE);
/* re-process everything received between connection setup and
backend setup */
vcc_process_recv_queue(atmvcc);
return 0;
}
/*
* This handles ioctls actually performed on our vcc - we must return
* -ENOIOCTLCMD for any unrecognized ioctl
*/
static int pppoatm_ioctl(struct socket *sock, unsigned int cmd,
unsigned long arg)
{
struct atm_vcc *atmvcc = ATM_SD(sock);
void __user *argp = (void __user *)arg;
if (cmd != ATM_SETBACKEND && atmvcc->push != pppoatm_push)
return -ENOIOCTLCMD;
switch (cmd) {
case ATM_SETBACKEND: {
atm_backend_t b;
if (get_user(b, (atm_backend_t __user *) argp))
return -EFAULT;
if (b != ATM_BACKEND_PPP)
return -ENOIOCTLCMD;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return pppoatm_assign_vcc(atmvcc, argp);
}
case PPPIOCGCHAN:
return put_user(ppp_channel_index(&atmvcc_to_pvcc(atmvcc)->
chan), (int __user *) argp) ? -EFAULT : 0;
case PPPIOCGUNIT:
return put_user(ppp_unit_number(&atmvcc_to_pvcc(atmvcc)->
chan), (int __user *) argp) ? -EFAULT : 0;
}
return -ENOIOCTLCMD;
}
static struct atm_ioctl pppoatm_ioctl_ops = {
.owner = THIS_MODULE,
.ioctl = pppoatm_ioctl,
};
static int __init pppoatm_init(void)
{
register_atm_ioctl(&pppoatm_ioctl_ops);
return 0;
}
static void __exit pppoatm_exit(void)
{
deregister_atm_ioctl(&pppoatm_ioctl_ops);
}
module_init(pppoatm_init);
module_exit(pppoatm_exit);
MODULE_AUTHOR("Mitchell Blank Jr <mitch@sfgoth.com>");
MODULE_DESCRIPTION("RFC2364 PPP over ATM/AAL5");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ZdrowyGosciu/kernel_lge | drivers/staging/mei/interface.c | 4806 | 11130 | /*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/pci.h>
#include "mei_dev.h"
#include "mei.h"
#include "interface.h"
/**
* mei_set_csr_register - writes H_CSR register to the mei device,
* and ignores the H_IS bit for it is write-one-to-zero.
*
* @dev: the device structure
*/
void mei_hcsr_set(struct mei_device *dev)
{
if ((dev->host_hw_state & H_IS) == H_IS)
dev->host_hw_state &= ~H_IS;
mei_reg_write(dev, H_CSR, dev->host_hw_state);
dev->host_hw_state = mei_hcsr_read(dev);
}
/**
* mei_csr_enable_interrupts - enables mei device interrupts
*
* @dev: the device structure
*/
void mei_enable_interrupts(struct mei_device *dev)
{
dev->host_hw_state |= H_IE;
mei_hcsr_set(dev);
}
/**
* mei_csr_disable_interrupts - disables mei device interrupts
*
* @dev: the device structure
*/
void mei_disable_interrupts(struct mei_device *dev)
{
dev->host_hw_state &= ~H_IE;
mei_hcsr_set(dev);
}
/**
* _host_get_filled_slots - gets number of device filled buffer slots
*
* @device: the device structure
*
* returns number of filled slots
*/
static unsigned char _host_get_filled_slots(const struct mei_device *dev)
{
char read_ptr, write_ptr;
read_ptr = (char) ((dev->host_hw_state & H_CBRP) >> 8);
write_ptr = (char) ((dev->host_hw_state & H_CBWP) >> 16);
return (unsigned char) (write_ptr - read_ptr);
}
/**
* mei_host_buffer_is_empty - checks if host buffer is empty.
*
* @dev: the device structure
*
* returns 1 if empty, 0 - otherwise.
*/
int mei_host_buffer_is_empty(struct mei_device *dev)
{
unsigned char filled_slots;
dev->host_hw_state = mei_hcsr_read(dev);
filled_slots = _host_get_filled_slots(dev);
if (filled_slots == 0)
return 1;
return 0;
}
/**
* mei_count_empty_write_slots - counts write empty slots.
*
* @dev: the device structure
*
* returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
*/
int mei_count_empty_write_slots(struct mei_device *dev)
{
unsigned char buffer_depth, filled_slots, empty_slots;
dev->host_hw_state = mei_hcsr_read(dev);
buffer_depth = (unsigned char) ((dev->host_hw_state & H_CBD) >> 24);
filled_slots = _host_get_filled_slots(dev);
empty_slots = buffer_depth - filled_slots;
/* check for overflow */
if (filled_slots > buffer_depth)
return -EOVERFLOW;
return empty_slots;
}
/**
* mei_write_message - writes a message to mei device.
*
* @dev: the device structure
* @header: header of message
* @write_buffer: message buffer will be written
* @write_length: message size will be written
*
* This function returns -EIO if write has failed
*/
int mei_write_message(struct mei_device *dev,
struct mei_msg_hdr *header,
unsigned char *write_buffer,
unsigned long write_length)
{
u32 temp_msg = 0;
unsigned long bytes_written = 0;
unsigned char buffer_depth, filled_slots, empty_slots;
unsigned long dw_to_write;
dev->host_hw_state = mei_hcsr_read(dev);
dev_dbg(&dev->pdev->dev,
"host_hw_state = 0x%08x.\n",
dev->host_hw_state);
dev_dbg(&dev->pdev->dev,
"mei_write_message header=%08x.\n",
*((u32 *) header));
buffer_depth = (unsigned char) ((dev->host_hw_state & H_CBD) >> 24);
filled_slots = _host_get_filled_slots(dev);
empty_slots = buffer_depth - filled_slots;
dev_dbg(&dev->pdev->dev,
"filled = %hu, empty = %hu.\n",
filled_slots, empty_slots);
dw_to_write = ((write_length + 3) / 4);
if (dw_to_write > empty_slots)
return -EIO;
mei_reg_write(dev, H_CB_WW, *((u32 *) header));
while (write_length >= 4) {
mei_reg_write(dev, H_CB_WW,
*(u32 *) (write_buffer + bytes_written));
bytes_written += 4;
write_length -= 4;
}
if (write_length > 0) {
memcpy(&temp_msg, &write_buffer[bytes_written], write_length);
mei_reg_write(dev, H_CB_WW, temp_msg);
}
dev->host_hw_state |= H_IG;
mei_hcsr_set(dev);
dev->me_hw_state = mei_mecsr_read(dev);
if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
return -EIO;
return 0;
}
/**
* mei_count_full_read_slots - counts read full slots.
*
* @dev: the device structure
*
* returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
*/
int mei_count_full_read_slots(struct mei_device *dev)
{
char read_ptr, write_ptr;
unsigned char buffer_depth, filled_slots;
dev->me_hw_state = mei_mecsr_read(dev);
buffer_depth = (unsigned char)((dev->me_hw_state & ME_CBD_HRA) >> 24);
read_ptr = (char) ((dev->me_hw_state & ME_CBRP_HRA) >> 8);
write_ptr = (char) ((dev->me_hw_state & ME_CBWP_HRA) >> 16);
filled_slots = (unsigned char) (write_ptr - read_ptr);
/* check for overflow */
if (filled_slots > buffer_depth)
return -EOVERFLOW;
dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
return (int)filled_slots;
}
/**
* mei_read_slots - reads a message from mei device.
*
* @dev: the device structure
* @buffer: message buffer will be written
* @buffer_length: message size will be read
*/
void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
unsigned long buffer_length)
{
u32 *reg_buf = (u32 *)buffer;
for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
*reg_buf++ = mei_mecbrw_read(dev);
if (buffer_length > 0) {
u32 reg = mei_mecbrw_read(dev);
memcpy(reg_buf, ®, buffer_length);
}
dev->host_hw_state |= H_IG;
mei_hcsr_set(dev);
}
/**
* mei_flow_ctrl_creds - checks flow_control credentials.
*
* @dev: the device structure
* @cl: private data of the file object
*
* returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
* -ENOENT if mei_cl is not present
* -EINVAL if single_recv_buf == 0
*/
int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
{
int i;
if (!dev->me_clients_num)
return 0;
if (cl->mei_flow_ctrl_creds > 0)
return 1;
for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->mei_flow_ctrl_creds) {
if (WARN_ON(me_cl->props.single_recv_buf == 0))
return -EINVAL;
return 1;
} else {
return 0;
}
}
}
return -ENOENT;
}
/**
* mei_flow_ctrl_reduce - reduces flow_control.
*
* @dev: the device structure
* @cl: private data of the file object
* @returns
* 0 on success
* -ENOENT when me client is not found
* -EINVAL when ctrl credits are <= 0
*/
int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
{
int i;
if (!dev->me_clients_num)
return -ENOENT;
for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->props.single_recv_buf != 0) {
if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
return -EINVAL;
dev->me_clients[i].mei_flow_ctrl_creds--;
} else {
if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
return -EINVAL;
cl->mei_flow_ctrl_creds--;
}
return 0;
}
}
return -ENOENT;
}
/**
* mei_send_flow_control - sends flow control to fw.
*
* @dev: the device structure
* @cl: private data of the file object
*
* This function returns -EIO on write failure
*/
int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_flow_control *mei_flow_control;
mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
mei_hdr->host_addr = 0;
mei_hdr->me_addr = 0;
mei_hdr->length = sizeof(struct hbm_flow_control);
mei_hdr->msg_complete = 1;
mei_hdr->reserved = 0;
mei_flow_control = (struct hbm_flow_control *) &dev->wr_msg_buf[1];
memset(mei_flow_control, 0, sizeof(*mei_flow_control));
mei_flow_control->host_addr = cl->host_client_id;
mei_flow_control->me_addr = cl->me_client_id;
mei_flow_control->hbm_cmd = MEI_FLOW_CONTROL_CMD;
memset(mei_flow_control->reserved, 0,
sizeof(mei_flow_control->reserved));
dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
cl->host_client_id, cl->me_client_id);
return mei_write_message(dev, mei_hdr,
(unsigned char *) mei_flow_control,
sizeof(struct hbm_flow_control));
}
/**
* mei_other_client_is_connecting - checks if other
* client with the same client id is connected.
*
* @dev: the device structure
* @cl: private data of the file object
*
* returns 1 if other client is connected, 0 - otherwise.
*/
int mei_other_client_is_connecting(struct mei_device *dev,
struct mei_cl *cl)
{
struct mei_cl *cl_pos = NULL;
struct mei_cl *cl_next = NULL;
list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
if ((cl_pos->state == MEI_FILE_CONNECTING) &&
(cl_pos != cl) &&
cl->me_client_id == cl_pos->me_client_id)
return 1;
}
return 0;
}
/**
* mei_disconnect - sends disconnect message to fw.
*
* @dev: the device structure
* @cl: private data of the file object
*
* This function returns -EIO on write failure
*/
int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_client_disconnect_request *mei_cli_disconnect;
mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
mei_hdr->host_addr = 0;
mei_hdr->me_addr = 0;
mei_hdr->length = sizeof(struct hbm_client_disconnect_request);
mei_hdr->msg_complete = 1;
mei_hdr->reserved = 0;
mei_cli_disconnect =
(struct hbm_client_disconnect_request *) &dev->wr_msg_buf[1];
memset(mei_cli_disconnect, 0, sizeof(*mei_cli_disconnect));
mei_cli_disconnect->host_addr = cl->host_client_id;
mei_cli_disconnect->me_addr = cl->me_client_id;
mei_cli_disconnect->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
mei_cli_disconnect->reserved[0] = 0;
return mei_write_message(dev, mei_hdr,
(unsigned char *) mei_cli_disconnect,
sizeof(struct hbm_client_disconnect_request));
}
/**
* mei_connect - sends connect message to fw.
*
* @dev: the device structure
* @cl: private data of the file object
*
* This function returns -EIO on write failure
*/
int mei_connect(struct mei_device *dev, struct mei_cl *cl)
{
struct mei_msg_hdr *mei_hdr;
struct hbm_client_connect_request *mei_cli_connect;
mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
mei_hdr->host_addr = 0;
mei_hdr->me_addr = 0;
mei_hdr->length = sizeof(struct hbm_client_connect_request);
mei_hdr->msg_complete = 1;
mei_hdr->reserved = 0;
mei_cli_connect =
(struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
mei_cli_connect->host_addr = cl->host_client_id;
mei_cli_connect->me_addr = cl->me_client_id;
mei_cli_connect->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
mei_cli_connect->reserved = 0;
return mei_write_message(dev, mei_hdr,
(unsigned char *) mei_cli_connect,
sizeof(struct hbm_client_connect_request));
}
| gpl-2.0 |
lawnn/Linux_3.4.y | arch/sh/kernel/cpu/sh3/setup-sh7705.c | 5062 | 5837 | /*
* SH7705 Setup
*
* Copyright (C) 2006 - 2009 Paul Mundt
* Copyright (C) 2007 Nobuhiro Iwamatsu
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
#include <asm/rtc.h>
#include <cpu/serial.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5,
PINT07, PINT815,
DMAC, SCIF0, SCIF2, ADC_ADI, USB,
TPU0, TPU1, TPU2, TPU3,
TMU0, TMU1, TMU2,
RTC, WDT, REF_RCMI,
};
static struct intc_vect vectors[] __initdata = {
/* IRQ0->5 are handled in setup-sh3.c */
INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720),
INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820),
INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860),
INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0),
INTC_VECT(SCIF0, 0x8e0),
INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920),
INTC_VECT(SCIF2, 0x960),
INTC_VECT(ADC_ADI, 0x980),
INTC_VECT(USB, 0xa20), INTC_VECT(USB, 0xa40),
INTC_VECT(TPU0, 0xc00), INTC_VECT(TPU1, 0xc20),
INTC_VECT(TPU2, 0xc80), INTC_VECT(TPU3, 0xca0),
INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420),
INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460),
INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0),
INTC_VECT(RTC, 0x4c0),
INTC_VECT(WDT, 0x560),
INTC_VECT(REF_RCMI, 0x580),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } },
{ 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } },
{ 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } },
{ 0xa4000018, 0, 16, 4, /* IPRD */ { PINT07, PINT815, IRQ5, IRQ4 } },
{ 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC, SCIF0, SCIF2, ADC_ADI } },
{ 0xa4080000, 0, 16, 4, /* IPRF */ { 0, 0, USB } },
{ 0xa4080002, 0, 16, 4, /* IPRG */ { TPU0, TPU1 } },
{ 0xa4080004, 0, 16, 4, /* IPRH */ { TPU2, TPU3 } },
};
static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL,
NULL, prio_registers, NULL);
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xa4410000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE |
SCSCR_RE | SCSCR_CKE1 | SCSCR_CKE0,
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.irqs = { 56, 56, 56 },
.ops = &sh770x_sci_port_ops,
.regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct plat_sci_port scif1_platform_data = {
.mapbase = 0xa4400000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_TIE | SCSCR_RIE | SCSCR_TE | SCSCR_RE,
.scbrr_algo_id = SCBRR_ALGO_4,
.type = PORT_SCIF,
.irqs = { 52, 52, 52 },
.ops = &sh770x_sci_port_ops,
.regtype = SCIx_SH7705_SCIF_REGTYPE,
};
static struct platform_device scif1_device = {
.name = "sh-sci",
.id = 1,
.dev = {
.platform_data = &scif1_platform_data,
},
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0xfffffec0,
.end = 0xfffffec0 + 0x1e,
.flags = IORESOURCE_IO,
},
[1] = {
.start = 20,
.flags = IORESOURCE_IRQ,
},
};
static struct sh_rtc_platform_info rtc_info = {
.capabilities = RTC_CAP_4_DIGIT_YEAR,
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
.dev = {
.platform_data = &rtc_info,
},
};
static struct sh_timer_config tmu0_platform_data = {
.channel_offset = 0x02,
.timer_bit = 0,
.clockevent_rating = 200,
};
static struct resource tmu0_resources[] = {
[0] = {
.start = 0xfffffe94,
.end = 0xfffffe9f,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 16,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu0_device = {
.name = "sh_tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct sh_timer_config tmu1_platform_data = {
.channel_offset = 0xe,
.timer_bit = 1,
.clocksource_rating = 200,
};
static struct resource tmu1_resources[] = {
[0] = {
.start = 0xfffffea0,
.end = 0xfffffeab,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 17,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu1_device = {
.name = "sh_tmu",
.id = 1,
.dev = {
.platform_data = &tmu1_platform_data,
},
.resource = tmu1_resources,
.num_resources = ARRAY_SIZE(tmu1_resources),
};
static struct sh_timer_config tmu2_platform_data = {
.channel_offset = 0x1a,
.timer_bit = 2,
};
static struct resource tmu2_resources[] = {
[0] = {
.start = 0xfffffeac,
.end = 0xfffffebb,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 18,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device tmu2_device = {
.name = "sh_tmu",
.id = 2,
.dev = {
.platform_data = &tmu2_platform_data,
},
.resource = tmu2_resources,
.num_resources = ARRAY_SIZE(tmu2_resources),
};
static struct platform_device *sh7705_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
&rtc_device,
};
static int __init sh7705_devices_setup(void)
{
return platform_add_devices(sh7705_devices,
ARRAY_SIZE(sh7705_devices));
}
arch_initcall(sh7705_devices_setup);
static struct platform_device *sh7705_early_devices[] __initdata = {
&scif0_device,
&scif1_device,
&tmu0_device,
&tmu1_device,
&tmu2_device,
};
void __init plat_early_device_setup(void)
{
early_platform_add_devices(sh7705_early_devices,
ARRAY_SIZE(sh7705_early_devices));
}
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
plat_irq_setup_sh3();
}
| gpl-2.0 |
ausdim/GE-Edition-I9505-jfltexx | arch/sh/drivers/pci/fixups-landisk.c | 5062 | 1508 | /*
* arch/sh/drivers/pci/fixups-landisk.c
*
* PCI initialization for the I-O DATA Device, Inc. LANDISK board
*
* Copyright (C) 2006 kogiidena
* Copyright (C) 2010 Nobuhiro Iwamatsu
*
* May be copied or modified under the terms of the GNU General Public
* License. See linux/COPYING for more information.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include "pci-sh4.h"
#define PCIMCR_MRSET_OFF 0xBFFFFFFF
#define PCIMCR_RFSH_OFF 0xFFFFFFFB
int pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
{
/*
* slot0: pin1-4 = irq5,6,7,8
* slot1: pin1-4 = irq6,7,8,5
* slot2: pin1-4 = irq7,8,5,6
* slot3: pin1-4 = irq8,5,6,7
*/
int irq = ((slot + pin - 1) & 0x3) + 5;
if ((slot | (pin - 1)) > 0x3) {
printk(KERN_WARNING "PCI: Bad IRQ mapping request for slot %d pin %c\n",
slot, pin - 1 + 'A');
return -1;
}
return irq;
}
int pci_fixup_pcic(struct pci_channel *chan)
{
unsigned long bcr1, mcr;
bcr1 = __raw_readl(SH7751_BCR1);
bcr1 |= 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */
pci_write_reg(chan, bcr1, SH4_PCIBCR1);
mcr = __raw_readl(SH7751_MCR);
mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF;
pci_write_reg(chan, mcr, SH4_PCIMCR);
pci_write_reg(chan, 0x0c000000, SH7751_PCICONF5);
pci_write_reg(chan, 0xd0000000, SH7751_PCICONF6);
pci_write_reg(chan, 0x0c000000, SH4_PCILAR0);
pci_write_reg(chan, 0x00000000, SH4_PCILAR1);
return 0;
}
| gpl-2.0 |
aidfarh/lightning-kernel | sound/core/memalloc.c | 5062 | 13762 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Takashi Iwai <tiwai@suse.de>
*
* Generic memory allocators
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <sound/memalloc.h>
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Memory allocator for ALSA system.");
MODULE_LICENSE("GPL");
/*
*/
static DEFINE_MUTEX(list_mutex);
static LIST_HEAD(mem_list_head);
/* buffer preservation list */
struct snd_mem_list {
struct snd_dma_buffer buffer;
unsigned int id;
struct list_head list;
};
/* id for pre-allocated buffers */
#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
/*
*
* Generic memory allocators
*
*/
static long snd_allocated_pages; /* holding the number of allocated pages */
static inline void inc_snd_pages(int order)
{
snd_allocated_pages += 1 << order;
}
static inline void dec_snd_pages(int order)
{
snd_allocated_pages -= 1 << order;
}
/**
* snd_malloc_pages - allocate pages with the given size
* @size: the size to allocate in bytes
* @gfp_flags: the allocation conditions, GFP_XXX
*
* Allocates the physically contiguous pages with the given size.
*
* Returns the pointer of the buffer, or NULL if no enoguh memory.
*/
void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
{
int pg;
void *res;
if (WARN_ON(!size))
return NULL;
if (WARN_ON(!gfp_flags))
return NULL;
gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
pg = get_order(size);
if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
inc_snd_pages(pg);
return res;
}
/**
* snd_free_pages - release the pages
* @ptr: the buffer pointer to release
* @size: the allocated buffer size
*
* Releases the buffer allocated via snd_malloc_pages().
*/
void snd_free_pages(void *ptr, size_t size)
{
int pg;
if (ptr == NULL)
return;
pg = get_order(size);
dec_snd_pages(pg);
free_pages((unsigned long) ptr, pg);
}
/*
*
* Bus-specific memory allocators
*
*/
#ifdef CONFIG_HAS_DMA
/* allocate the coherent DMA pages */
static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
{
int pg;
void *res;
gfp_t gfp_flags;
if (WARN_ON(!dma))
return NULL;
pg = get_order(size);
gfp_flags = GFP_KERNEL
| __GFP_COMP /* compound page lets parts be mapped */
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
if (res != NULL)
inc_snd_pages(pg);
return res;
}
/* free the coherent DMA pages */
static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
dma_addr_t dma)
{
int pg;
if (ptr == NULL)
return;
pg = get_order(size);
dec_snd_pages(pg);
dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
}
#endif /* CONFIG_HAS_DMA */
/*
*
* ALSA generic memory management
*
*/
/**
* snd_dma_alloc_pages - allocate the buffer area according to the given type
* @type: the DMA buffer type
* @device: the device pointer
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
* Calls the memory-allocator function for the corresponding
* buffer type.
*
* Returns zero if the buffer with the given size is allocated successfully,
* other a negative value at error.
*/
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
if (WARN_ON(!size))
return -ENXIO;
if (WARN_ON(!dmab))
return -ENXIO;
dmab->dev.type = type;
dmab->dev.dev = device;
dmab->bytes = 0;
switch (type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
dmab->area = snd_malloc_pages(size,
(__force gfp_t)(unsigned long)device);
dmab->addr = 0;
break;
#ifdef CONFIG_HAS_DMA
case SNDRV_DMA_TYPE_DEV:
dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_malloc_sgbuf_pages(device, size, dmab, NULL);
break;
#endif
default:
printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
dmab->area = NULL;
dmab->addr = 0;
return -ENXIO;
}
if (! dmab->area)
return -ENOMEM;
dmab->bytes = size;
return 0;
}
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
* @type: the DMA buffer type
* @device: the device pointer
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
* Calls the memory-allocator function for the corresponding
* buffer type. When no space is left, this function reduces the size and
* tries to allocate again. The size actually allocated is stored in
* res_size argument.
*
* Returns zero if the buffer with the given size is allocated successfully,
* other a negative value at error.
*/
int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
int err;
while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
size_t aligned_size;
if (err != -ENOMEM)
return err;
if (size <= PAGE_SIZE)
return -ENOMEM;
aligned_size = PAGE_SIZE << get_order(size);
if (size != aligned_size)
size = aligned_size;
else
size >>= 1;
}
if (! dmab->area)
return -ENOMEM;
return 0;
}
/**
* snd_dma_free_pages - release the allocated buffer
* @dmab: the buffer allocation record to release
*
* Releases the allocated buffer via snd_dma_alloc_pages().
*/
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{
switch (dmab->dev.type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
snd_free_pages(dmab->area, dmab->bytes);
break;
#ifdef CONFIG_HAS_DMA
case SNDRV_DMA_TYPE_DEV:
snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_free_sgbuf_pages(dmab);
break;
#endif
default:
printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
}
}
/**
* snd_dma_get_reserved - get the reserved buffer for the given device
* @dmab: the buffer allocation record to store
* @id: the buffer id
*
* Looks for the reserved-buffer list and re-uses if the same buffer
* is found in the list. When the buffer is found, it's removed from the free list.
*
* Returns the size of buffer if the buffer is found, or zero if not found.
*/
size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
{
struct snd_mem_list *mem;
if (WARN_ON(!dmab))
return 0;
mutex_lock(&list_mutex);
list_for_each_entry(mem, &mem_list_head, list) {
if (mem->id == id &&
(mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
struct device *dev = dmab->dev.dev;
list_del(&mem->list);
*dmab = mem->buffer;
if (dmab->dev.dev == NULL)
dmab->dev.dev = dev;
kfree(mem);
mutex_unlock(&list_mutex);
return dmab->bytes;
}
}
mutex_unlock(&list_mutex);
return 0;
}
/**
* snd_dma_reserve_buf - reserve the buffer
* @dmab: the buffer to reserve
* @id: the buffer id
*
* Reserves the given buffer as a reserved buffer.
*
* Returns zero if successful, or a negative code at error.
*/
int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
{
struct snd_mem_list *mem;
if (WARN_ON(!dmab))
return -EINVAL;
mem = kmalloc(sizeof(*mem), GFP_KERNEL);
if (! mem)
return -ENOMEM;
mutex_lock(&list_mutex);
mem->buffer = *dmab;
mem->id = id;
list_add_tail(&mem->list, &mem_list_head);
mutex_unlock(&list_mutex);
return 0;
}
/*
* purge all reserved buffers
*/
static void free_all_reserved_pages(void)
{
struct list_head *p;
struct snd_mem_list *mem;
mutex_lock(&list_mutex);
while (! list_empty(&mem_list_head)) {
p = mem_list_head.next;
mem = list_entry(p, struct snd_mem_list, list);
list_del(p);
snd_dma_free_pages(&mem->buffer);
kfree(mem);
}
mutex_unlock(&list_mutex);
}
#ifdef CONFIG_PROC_FS
/*
* proc file interface
*/
#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
static struct proc_dir_entry *snd_mem_proc;
static int snd_mem_proc_read(struct seq_file *seq, void *offset)
{
long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
struct snd_mem_list *mem;
int devno;
static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" };
mutex_lock(&list_mutex);
seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
devno = 0;
list_for_each_entry(mem, &mem_list_head, list) {
devno++;
seq_printf(seq, "buffer %d : ID %08x : type %s\n",
devno, mem->id, types[mem->buffer.dev.type]);
seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
(unsigned long)mem->buffer.addr,
(int)mem->buffer.bytes);
}
mutex_unlock(&list_mutex);
return 0;
}
static int snd_mem_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, snd_mem_proc_read, NULL);
}
/* FIXME: for pci only - other bus? */
#ifdef CONFIG_PCI
#define gettoken(bufp) strsep(bufp, " \t\n")
static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
char buf[128];
char *token, *p;
if (count > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
buf[count] = '\0';
p = buf;
token = gettoken(&p);
if (! token || *token == '#')
return count;
if (strcmp(token, "add") == 0) {
char *endp;
int vendor, device, size, buffers;
long mask;
int i, alloced;
struct pci_dev *pci;
if ((token = gettoken(&p)) == NULL ||
(vendor = simple_strtol(token, NULL, 0)) <= 0 ||
(token = gettoken(&p)) == NULL ||
(device = simple_strtol(token, NULL, 0)) <= 0 ||
(token = gettoken(&p)) == NULL ||
(mask = simple_strtol(token, NULL, 0)) < 0 ||
(token = gettoken(&p)) == NULL ||
(size = memparse(token, &endp)) < 64*1024 ||
size > 16*1024*1024 /* too big */ ||
(token = gettoken(&p)) == NULL ||
(buffers = simple_strtol(token, NULL, 0)) <= 0 ||
buffers > 4) {
printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
return count;
}
vendor &= 0xffff;
device &= 0xffff;
alloced = 0;
pci = NULL;
while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
if (mask > 0 && mask < 0xffffffff) {
if (pci_set_dma_mask(pci, mask) < 0 ||
pci_set_consistent_dma_mask(pci, mask) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
pci_dev_put(pci);
return count;
}
}
for (i = 0; i < buffers; i++) {
struct snd_dma_buffer dmab;
memset(&dmab, 0, sizeof(dmab));
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
size, &dmab) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
pci_dev_put(pci);
return count;
}
snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
}
alloced++;
}
if (! alloced) {
for (i = 0; i < buffers; i++) {
struct snd_dma_buffer dmab;
memset(&dmab, 0, sizeof(dmab));
/* FIXME: We can allocate only in ZONE_DMA
* without a device pointer!
*/
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
size, &dmab) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
break;
}
snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
}
}
} else if (strcmp(token, "erase") == 0)
/* FIXME: need for releasing each buffer chunk? */
free_all_reserved_pages();
else
printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
return count;
}
#endif /* CONFIG_PCI */
static const struct file_operations snd_mem_proc_fops = {
.owner = THIS_MODULE,
.open = snd_mem_proc_open,
.read = seq_read,
#ifdef CONFIG_PCI
.write = snd_mem_proc_write,
#endif
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PROC_FS */
/*
* module entry
*/
static int __init snd_mem_init(void)
{
#ifdef CONFIG_PROC_FS
snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
&snd_mem_proc_fops);
#endif
return 0;
}
static void __exit snd_mem_exit(void)
{
remove_proc_entry(SND_MEM_PROC_FILE, NULL);
free_all_reserved_pages();
if (snd_allocated_pages > 0)
printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
}
module_init(snd_mem_init)
module_exit(snd_mem_exit)
/*
* exports
*/
EXPORT_SYMBOL(snd_dma_alloc_pages);
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
EXPORT_SYMBOL(snd_dma_free_pages);
EXPORT_SYMBOL(snd_dma_get_reserved_buf);
EXPORT_SYMBOL(snd_dma_reserve_buf);
EXPORT_SYMBOL(snd_malloc_pages);
EXPORT_SYMBOL(snd_free_pages);
| gpl-2.0 |
MoKee/android_kernel_bn_omap | drivers/staging/easycap/easycap_testcard.c | 8134 | 3628 | /******************************************************************************
* *
* easycap_testcard.c *
* *
******************************************************************************/
/*
*
* Copyright (C) 2010 R.M. Thomas <rmthomas@sciolus.org>
*
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*****************************************************************************/
#include "easycap.h"
/*****************************************************************************/
#define TESTCARD_BYTESPERLINE (2 * 720)
void
easycap_testcard(struct easycap *peasycap, int field)
{
int total;
int y, u, v, r, g, b;
unsigned char uyvy[4];
int i1, line, k, m, n, more, much, barwidth, barheight;
unsigned char bfbar[TESTCARD_BYTESPERLINE / 8], *p1, *p2;
struct data_buffer *pfield_buffer;
if (!peasycap) {
SAY("ERROR: peasycap is NULL\n");
return;
}
JOM(8, "%i=field\n", field);
switch (peasycap->width) {
case 720:
case 360: {
barwidth = (2 * 720) / 8;
break;
}
case 704:
case 352: {
barwidth = (2 * 704) / 8;
break;
}
case 640:
case 320: {
barwidth = (2 * 640) / 8;
break;
}
default: {
SAM("ERROR: cannot set barwidth\n");
return;
}
}
if (TESTCARD_BYTESPERLINE < barwidth) {
SAM("ERROR: barwidth is too large\n");
return;
}
switch (peasycap->height) {
case 576:
case 288: {
barheight = 576;
break;
}
case 480:
case 240: {
barheight = 480;
break;
}
default: {
SAM("ERROR: cannot set barheight\n");
return;
}
}
total = 0;
k = field;
m = 0;
n = 0;
for (line = 0; line < (barheight / 2); line++) {
for (i1 = 0; i1 < 8; i1++) {
r = (i1 * 256)/8;
g = (i1 * 256)/8;
b = (i1 * 256)/8;
y = 299*r/1000 + 587*g/1000 + 114*b/1000 ;
u = -147*r/1000 - 289*g/1000 + 436*b/1000 ;
u = u + 128;
v = 615*r/1000 - 515*g/1000 - 100*b/1000 ;
v = v + 128;
uyvy[0] = 0xFF & u ;
uyvy[1] = 0xFF & y ;
uyvy[2] = 0xFF & v ;
uyvy[3] = 0xFF & y ;
p1 = &bfbar[0];
while (p1 < &bfbar[barwidth]) {
*p1++ = uyvy[0] ;
*p1++ = uyvy[1] ;
*p1++ = uyvy[2] ;
*p1++ = uyvy[3] ;
total += 4;
}
p1 = &bfbar[0];
more = barwidth;
while (more) {
if ((FIELD_BUFFER_SIZE/PAGE_SIZE) <= m) {
SAM("ERROR: bad m reached\n");
return;
}
if (PAGE_SIZE < n) {
SAM("ERROR: bad n reached\n");
return;
}
if (0 > more) {
SAM("ERROR: internal fault\n");
return;
}
much = PAGE_SIZE - n;
if (much > more)
much = more;
pfield_buffer = &peasycap->field_buffer[k][m];
p2 = pfield_buffer->pgo + n;
memcpy(p2, p1, much);
p1 += much;
n += much;
more -= much;
if (PAGE_SIZE == n) {
m++;
n = 0;
}
}
}
}
return;
}
| gpl-2.0 |
skipisz/linux | arch/x86/mm/pat_rbtree.c | 8902 | 6025 | /*
* Handle caching attributes in page tables (PAT)
*
* Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
* Suresh B Siddha <suresh.b.siddha@intel.com>
*
* Interval tree (augmented rbtree) used to store the PAT memory type
* reservations.
*/
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/gfp.h>
#include <asm/pgtable.h>
#include <asm/pat.h>
#include "pat_internal.h"
/*
* The memtype tree keeps track of memory type for specific
* physical memory areas. Without proper tracking, conflicting memory
* types in different mappings can cause CPU cache corruption.
*
* The tree is an interval tree (augmented rbtree) with tree ordered
* on starting address. Tree can contain multiple entries for
* different regions which overlap. All the aliases have the same
* cache attributes of course.
*
* memtype_lock protects the rbtree.
*/
static struct rb_root memtype_rbroot = RB_ROOT;
static int is_node_overlap(struct memtype *node, u64 start, u64 end)
{
if (node->start >= end || node->end <= start)
return 0;
return 1;
}
static u64 get_subtree_max_end(struct rb_node *node)
{
u64 ret = 0;
if (node) {
struct memtype *data = container_of(node, struct memtype, rb);
ret = data->subtree_max_end;
}
return ret;
}
/* Update 'subtree_max_end' for a node, based on node and its children */
static void memtype_rb_augment_cb(struct rb_node *node, void *__unused)
{
struct memtype *data;
u64 max_end, child_max_end;
if (!node)
return;
data = container_of(node, struct memtype, rb);
max_end = data->end;
child_max_end = get_subtree_max_end(node->rb_right);
if (child_max_end > max_end)
max_end = child_max_end;
child_max_end = get_subtree_max_end(node->rb_left);
if (child_max_end > max_end)
max_end = child_max_end;
data->subtree_max_end = max_end;
}
/* Find the first (lowest start addr) overlapping range from rb tree */
static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
u64 start, u64 end)
{
struct rb_node *node = root->rb_node;
struct memtype *last_lower = NULL;
while (node) {
struct memtype *data = container_of(node, struct memtype, rb);
if (get_subtree_max_end(node->rb_left) > start) {
/* Lowest overlap if any must be on left side */
node = node->rb_left;
} else if (is_node_overlap(data, start, end)) {
last_lower = data;
break;
} else if (start >= data->start) {
/* Lowest overlap if any must be on right side */
node = node->rb_right;
} else {
break;
}
}
return last_lower; /* Returns NULL if there is no overlap */
}
static struct memtype *memtype_rb_exact_match(struct rb_root *root,
u64 start, u64 end)
{
struct memtype *match;
match = memtype_rb_lowest_match(root, start, end);
while (match != NULL && match->start < end) {
struct rb_node *node;
if (match->start == start && match->end == end)
return match;
node = rb_next(&match->rb);
if (node)
match = container_of(node, struct memtype, rb);
else
match = NULL;
}
return NULL; /* Returns NULL if there is no exact match */
}
static int memtype_rb_check_conflict(struct rb_root *root,
u64 start, u64 end,
unsigned long reqtype, unsigned long *newtype)
{
struct rb_node *node;
struct memtype *match;
int found_type = reqtype;
match = memtype_rb_lowest_match(&memtype_rbroot, start, end);
if (match == NULL)
goto success;
if (match->type != found_type && newtype == NULL)
goto failure;
dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end);
found_type = match->type;
node = rb_next(&match->rb);
while (node) {
match = container_of(node, struct memtype, rb);
if (match->start >= end) /* Checked all possible matches */
goto success;
if (is_node_overlap(match, start, end) &&
match->type != found_type) {
goto failure;
}
node = rb_next(&match->rb);
}
success:
if (newtype)
*newtype = found_type;
return 0;
failure:
printk(KERN_INFO "%s:%d conflicting memory types "
"%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
end, cattr_name(found_type), cattr_name(match->type));
return -EBUSY;
}
static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata)
{
struct rb_node **node = &(root->rb_node);
struct rb_node *parent = NULL;
while (*node) {
struct memtype *data = container_of(*node, struct memtype, rb);
parent = *node;
if (newdata->start <= data->start)
node = &((*node)->rb_left);
else if (newdata->start > data->start)
node = &((*node)->rb_right);
}
rb_link_node(&newdata->rb, parent, node);
rb_insert_color(&newdata->rb, root);
rb_augment_insert(&newdata->rb, memtype_rb_augment_cb, NULL);
}
int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type)
{
int err = 0;
err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end,
new->type, ret_type);
if (!err) {
if (ret_type)
new->type = *ret_type;
new->subtree_max_end = new->end;
memtype_rb_insert(&memtype_rbroot, new);
}
return err;
}
struct memtype *rbt_memtype_erase(u64 start, u64 end)
{
struct rb_node *deepest;
struct memtype *data;
data = memtype_rb_exact_match(&memtype_rbroot, start, end);
if (!data)
goto out;
deepest = rb_augment_erase_begin(&data->rb);
rb_erase(&data->rb, &memtype_rbroot);
rb_augment_erase_end(deepest, memtype_rb_augment_cb, NULL);
out:
return data;
}
struct memtype *rbt_memtype_lookup(u64 addr)
{
struct memtype *data;
data = memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE);
return data;
}
#if defined(CONFIG_DEBUG_FS)
int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos)
{
struct rb_node *node;
int i = 1;
node = rb_first(&memtype_rbroot);
while (node && pos != i) {
node = rb_next(node);
i++;
}
if (node) { /* pos == i */
struct memtype *this = container_of(node, struct memtype, rb);
*out = *this;
return 0;
} else {
return 1;
}
}
#endif
| gpl-2.0 |
ndmsystems/linux-2.6.36 | drivers/pci/pcie/aer/ecrc.c | 14022 | 3302 | /*
* Enables/disables PCIe ECRC checking.
*
* (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
* Andrew Patterson <andrew.patterson@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/errno.h>
#include "../../pci.h"
#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
static int ecrc_policy = ECRC_POLICY_DEFAULT;
static const char *ecrc_policy_str[] = {
[ECRC_POLICY_DEFAULT] = "bios",
[ECRC_POLICY_OFF] = "off",
[ECRC_POLICY_ON] = "on"
};
/**
* enable_ercr_checking - enable PCIe ECRC checking for a device
* @dev: the PCI device
*
* Returns 0 on success, or negative on failure.
*/
static int enable_ecrc_checking(struct pci_dev *dev)
{
int pos;
u32 reg32;
if (!pci_is_pcie(dev))
return -ENODEV;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
return -ENODEV;
pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
if (reg32 & PCI_ERR_CAP_ECRC_GENC)
reg32 |= PCI_ERR_CAP_ECRC_GENE;
if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
reg32 |= PCI_ERR_CAP_ECRC_CHKE;
pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
return 0;
}
/**
* disable_ercr_checking - disables PCIe ECRC checking for a device
* @dev: the PCI device
*
* Returns 0 on success, or negative on failure.
*/
static int disable_ecrc_checking(struct pci_dev *dev)
{
int pos;
u32 reg32;
if (!pci_is_pcie(dev))
return -ENODEV;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (!pos)
return -ENODEV;
pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
return 0;
}
/**
* pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
* @dev: the PCI device
*/
void pcie_set_ecrc_checking(struct pci_dev *dev)
{
switch (ecrc_policy) {
case ECRC_POLICY_DEFAULT:
return;
case ECRC_POLICY_OFF:
disable_ecrc_checking(dev);
break;
case ECRC_POLICY_ON:
enable_ecrc_checking(dev);
break;
default:
return;
}
}
/**
* pcie_ecrc_get_policy - parse kernel command-line ecrc option
*/
void pcie_ecrc_get_policy(char *str)
{
int i;
for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
if (!strncmp(str, ecrc_policy_str[i],
strlen(ecrc_policy_str[i])))
break;
if (i >= ARRAY_SIZE(ecrc_policy_str))
return;
ecrc_policy = i;
}
| gpl-2.0 |
nadanomics/linux | drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 199 | 53972 | /*
* Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mlx5/flow_table.h>
#include "en.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
};
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
u16 max_inline;
};
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
u16 eq_ix;
};
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
struct mlx5e_cq_param rx_cq;
struct mlx5e_cq_param tx_cq;
};
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 port_state;
port_state = mlx5_query_vport_state(mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
if (port_state == VPORT_STATE_UP)
netif_carrier_on(priv->netdev);
else
netif_carrier_off(priv->netdev);
}
static void mlx5e_update_carrier_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
update_carrier_work);
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_carrier(priv);
mutex_unlock(&priv->state_lock);
}
static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_pport_stats *s = &priv->stats.pport;
u32 *in;
u32 *out;
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
in = mlx5_vzalloc(sz);
out = mlx5_vzalloc(sz);
if (!in || !out)
goto free_out;
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out,
sz, MLX5_REG_PPCNT, 0, 0);
memcpy(s->IEEE_802_3_counters,
MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
sizeof(s->IEEE_802_3_counters));
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out,
sz, MLX5_REG_PPCNT, 0, 0);
memcpy(s->RFC_2863_counters,
MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
sizeof(s->RFC_2863_counters));
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out,
sz, MLX5_REG_PPCNT, 0, 0);
memcpy(s->RFC_2819_counters,
MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
sizeof(s->RFC_2819_counters));
free_out:
kvfree(in);
kvfree(out);
}
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_vport_stats *s = &priv->stats.vport;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_sq_stats *sq_stats;
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u64 tx_offload_none;
int i, j;
out = mlx5_vzalloc(outlen);
if (!out)
return;
/* Collect firts the SW counters and then HW for consistency */
s->tso_packets = 0;
s->tso_bytes = 0;
s->tx_queue_stopped = 0;
s->tx_queue_wake = 0;
s->tx_queue_dropped = 0;
tx_offload_none = 0;
s->lro_packets = 0;
s->lro_bytes = 0;
s->rx_csum_none = 0;
s->rx_csum_sw = 0;
s->rx_wqe_err = 0;
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
s->lro_packets += rq_stats->lro_packets;
s->lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_sw += rq_stats->csum_sw;
s->rx_wqe_err += rq_stats->wqe_err;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
s->tso_packets += sq_stats->tso_packets;
s->tso_bytes += sq_stats->tso_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
tx_offload_none += sq_stats->csum_offload_none;
}
}
/* HW counters */
memset(in, 0, sizeof(in));
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
memset(out, 0, outlen);
if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
goto free_out;
#define MLX5_GET_CTR(p, x) \
MLX5_GET64(query_vport_counter_out, p, x)
s->rx_error_packets =
MLX5_GET_CTR(out, received_errors.packets);
s->rx_error_bytes =
MLX5_GET_CTR(out, received_errors.octets);
s->tx_error_packets =
MLX5_GET_CTR(out, transmit_errors.packets);
s->tx_error_bytes =
MLX5_GET_CTR(out, transmit_errors.octets);
s->rx_unicast_packets =
MLX5_GET_CTR(out, received_eth_unicast.packets);
s->rx_unicast_bytes =
MLX5_GET_CTR(out, received_eth_unicast.octets);
s->tx_unicast_packets =
MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
s->tx_unicast_bytes =
MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
s->rx_multicast_packets =
MLX5_GET_CTR(out, received_eth_multicast.packets);
s->rx_multicast_bytes =
MLX5_GET_CTR(out, received_eth_multicast.octets);
s->tx_multicast_packets =
MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
s->tx_multicast_bytes =
MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
s->rx_broadcast_packets =
MLX5_GET_CTR(out, received_eth_broadcast.packets);
s->rx_broadcast_bytes =
MLX5_GET_CTR(out, received_eth_broadcast.octets);
s->tx_broadcast_packets =
MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
s->tx_broadcast_bytes =
MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
s->rx_packets =
s->rx_unicast_packets +
s->rx_multicast_packets +
s->rx_broadcast_packets;
s->rx_bytes =
s->rx_unicast_bytes +
s->rx_multicast_bytes +
s->rx_broadcast_bytes;
s->tx_packets =
s->tx_unicast_packets +
s->tx_multicast_packets +
s->tx_broadcast_packets;
s->tx_bytes =
s->tx_unicast_bytes +
s->tx_multicast_bytes +
s->tx_broadcast_bytes;
/* Update calculated offload counters */
s->tx_csum_offload = s->tx_packets - tx_offload_none;
s->rx_csum_good = s->rx_packets - s->rx_csum_none -
s->rx_csum_sw;
mlx5e_update_pport_counters(priv);
free_out:
kvfree(out);
}
static void mlx5e_update_stats_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
update_stats_work);
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
mlx5e_update_stats(priv);
schedule_delayed_work(dwork,
msecs_to_jiffies(
MLX5E_UPDATE_STATS_INTERVAL));
}
mutex_unlock(&priv->state_lock);
}
static void __mlx5e_async_event(struct mlx5e_priv *priv,
enum mlx5_dev_event event)
{
switch (event) {
case MLX5_DEV_EVENT_PORT_UP:
case MLX5_DEV_EVENT_PORT_DOWN:
schedule_work(&priv->update_carrier_work);
break;
default:
break;
}
}
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
spin_lock(&priv->async_events_spinlock);
if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
__mlx5e_async_event(priv, event);
spin_unlock(&priv->async_events_spinlock);
}
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
spin_lock_irq(&priv->async_events_spinlock);
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
spin_unlock_irq(&priv->async_events_spinlock);
}
#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
{
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
int wq_sz;
int err;
int i;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
return err;
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
cpu_to_node(c->cpu));
if (!rq->skb) {
err = -ENOMEM;
goto err_rq_wq_destroy;
}
rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
MLX5E_SW2HW_MTU(priv->netdev->mtu);
rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
wqe->data.lkey = c->mkey_be;
wqe->data.byte_count =
cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
}
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->channel = c;
rq->ix = c->ix;
rq->priv = c->priv;
return 0;
err_rq_wq_destroy:
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
}
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
kfree(rq->skb);
mlx5_wq_destroy(&rq->wq_ctrl);
}
static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
struct mlx5e_priv *priv = rq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
void *rqc;
void *wq;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
sizeof(u64) * rq->wq_ctrl.buf.npages;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
memcpy(rqc, param->rqc, sizeof(param->rqc));
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
mlx5_fill_page_array(&rq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
kvfree(in);
return err;
}
static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
void *rqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
MLX5_SET(rqc, rqc, state, next_state);
err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
kvfree(in);
return err;
}
static void mlx5e_disable_rq(struct mlx5e_rq *rq)
{
mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
}
static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_wq_ll *wq = &rq->wq;
int i;
for (i = 0; i < 1000; i++) {
if (wq->cur_sz >= priv->params.min_rx_wqes)
return 0;
msleep(20);
}
return -ETIMEDOUT;
}
static int mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
{
int err;
err = mlx5e_create_rq(c, param, rq);
if (err)
return err;
err = mlx5e_enable_rq(rq, param);
if (err)
goto err_destroy_rq;
err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_disable_rq;
set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
return 0;
err_disable_rq:
mlx5e_disable_rq(rq);
err_destroy_rq:
mlx5e_destroy_rq(rq);
return err;
}
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
while (!mlx5_wq_ll_is_empty(&rq->wq))
msleep(20);
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
mlx5e_disable_rq(rq);
mlx5e_destroy_rq(rq);
}
static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
{
kfree(sq->dma_fifo);
kfree(sq->skb);
}
static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
numa);
if (!sq->skb || !sq->dma_fifo) {
mlx5e_free_sq_db(sq);
return -ENOMEM;
}
sq->dma_fifo_mask = df_sz - 1;
return 0;
}
static int mlx5e_create_sq(struct mlx5e_channel *c,
int tc,
struct mlx5e_sq_param *param,
struct mlx5e_sq *sq)
{
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *sqc = param->sqc;
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
int txq_ix;
int err;
err = mlx5_alloc_map_uar(mdev, &sq->uar);
if (err)
return err;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq,
&sq->wq_ctrl);
if (err)
goto err_unmap_free_uar;
sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
sq->uar_map = sq->uar.map;
sq->uar_bf_map = sq->uar.bf_map;
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
txq_ix = c->ix + tc * priv->params.num_channels;
sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
sq->pdev = c->pdev;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
priv->txq_to_sq_map[txq_ix] = sq;
return 0;
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
err_unmap_free_uar:
mlx5_unmap_free_uar(mdev, &sq->uar);
return err;
}
static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
{
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
mlx5_unmap_free_uar(priv->mdev, &sq->uar);
}
static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
{
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
void *sqc;
void *wq;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
sizeof(u64) * sq->wq_ctrl.buf.npages;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
memcpy(sqc, param->sqc, sizeof(param->sqc));
MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, tis_lst_sz, 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, uar_page, sq->uar.index);
MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
mlx5_fill_page_array(&sq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
kvfree(in);
return err;
}
static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
{
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
void *sqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
kvfree(in);
return err;
}
static void mlx5e_disable_sq(struct mlx5e_sq *sq)
{
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_sq(mdev, sq->sqn);
}
static int mlx5e_open_sq(struct mlx5e_channel *c,
int tc,
struct mlx5e_sq_param *param,
struct mlx5e_sq *sq)
{
int err;
err = mlx5e_create_sq(c, tc, param, sq);
if (err)
return err;
err = mlx5e_enable_sq(sq, param);
if (err)
goto err_destroy_sq;
err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
if (err)
goto err_disable_sq;
set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
return 0;
err_disable_sq:
mlx5e_disable_sq(sq);
err_destroy_sq:
mlx5e_destroy_sq(sq);
return err;
}
static inline void netif_tx_disable_queue(struct netdev_queue *txq)
{
__netif_tx_lock_bh(txq);
netif_tx_stop_queue(txq);
__netif_tx_unlock_bh(txq);
}
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
netif_tx_disable_queue(sq->txq);
/* ensure hw is notified of all pending wqes */
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
while (sq->cc != sq->pc) /* wait till sq is empty */
msleep(20);
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
napi_synchronize(&sq->channel->napi);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
}
static int mlx5e_create_cq(struct mlx5e_channel *c,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq)
{
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
int irqn;
int err;
u32 i;
param->wq.buf_numa_node = cpu_to_node(c->cpu);
param->wq.db_numa_node = cpu_to_node(c->cpu);
param->eq_ix = c->ix;
err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
return err;
mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
cq->napi = &c->napi;
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
*mcq->set_ci_db = 0;
*mcq->arm_db = 0;
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
mcq->uar = &priv->cq_uar;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
cqe->op_own = 0xf1;
}
cq->channel = c;
cq->priv = priv;
return 0;
}
static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
{
mlx5_wq_destroy(&cq->wq_ctrl);
}
static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
{
struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
void *in;
void *cqc;
int inlen;
int irqn_not_used;
int eqn;
int err;
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
sizeof(u64) * cq->wq_ctrl.buf.npages;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
memcpy(cqc, param->cqc, sizeof(param->cqc));
mlx5_fill_page_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
err = mlx5_core_create_cq(mdev, mcq, in, inlen);
kvfree(in);
if (err)
return err;
mlx5e_cq_arm(cq);
return 0;
}
static void mlx5e_disable_cq(struct mlx5e_cq *cq)
{
struct mlx5e_priv *priv = cq->priv;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_cq(mdev, &cq->mcq);
}
static int mlx5e_open_cq(struct mlx5e_channel *c,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq,
u16 moderation_usecs,
u16 moderation_frames)
{
int err;
struct mlx5e_priv *priv = c->priv;
struct mlx5_core_dev *mdev = priv->mdev;
err = mlx5e_create_cq(c, param, cq);
if (err)
return err;
err = mlx5e_enable_cq(cq, param);
if (err)
goto err_destroy_cq;
err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
moderation_usecs,
moderation_frames);
if (err)
goto err_destroy_cq;
return 0;
err_destroy_cq:
mlx5e_destroy_cq(cq);
return err;
}
static void mlx5e_close_cq(struct mlx5e_cq *cq)
{
mlx5e_disable_cq(cq);
mlx5e_destroy_cq(cq);
}
static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
{
return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
}
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
struct mlx5e_priv *priv = c->priv;
int err;
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
priv->params.tx_cq_moderation_usec,
priv->params.tx_cq_moderation_pkts);
if (err)
goto err_close_tx_cqs;
}
return 0;
err_close_tx_cqs:
for (tc--; tc >= 0; tc--)
mlx5e_close_cq(&c->sq[tc].cq);
return err;
}
static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
{
int tc;
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_close_cq(&c->sq[tc].cq);
}
static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
int err;
int tc;
for (tc = 0; tc < c->num_tc; tc++) {
err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
if (err)
goto err_close_sqs;
}
return 0;
err_close_sqs:
for (tc--; tc >= 0; tc--)
mlx5e_close_sq(&c->sq[tc]);
return err;
}
static void mlx5e_close_sqs(struct mlx5e_channel *c)
{
int tc;
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_close_sq(&c->sq[tc]);
}
static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
{
int i;
for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
priv->channeltc_to_txq_map[ix][i] =
ix + i * priv->params.num_channels;
}
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
int err;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
return -ENOMEM;
c->priv = priv;
c->ix = ix;
c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mr.key);
c->num_tc = priv->params.num_tc;
mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
err = mlx5e_open_tx_cqs(c, cparam);
if (err)
goto err_napi_del;
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
priv->params.rx_cq_moderation_usec,
priv->params.rx_cq_moderation_pkts);
if (err)
goto err_close_tx_cqs;
napi_enable(&c->napi);
err = mlx5e_open_sqs(c, cparam);
if (err)
goto err_disable_napi;
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
goto err_close_sqs;
netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
*cp = c;
return 0;
err_close_sqs:
mlx5e_close_sqs(c);
err_disable_napi:
napi_disable(&c->napi);
mlx5e_close_cq(&c->rq.cq);
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
err_napi_del:
netif_napi_del(&c->napi);
kfree(c);
return err;
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
napi_disable(&c->napi);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
netif_napi_del(&c->napi);
kfree(c);
}
static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->max_inline = priv->params.tx_max_inline;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
mlx5e_build_common_cq_param(priv, param);
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param);
}
static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
struct mlx5e_channel_param *cparam)
{
memset(cparam, 0, sizeof(*cparam));
mlx5e_build_rq_param(priv, &cparam->rq);
mlx5e_build_sq_param(priv, &cparam->sq);
mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
}
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
struct mlx5e_channel_param cparam;
int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
int j;
priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
GFP_KERNEL);
priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);
if (!priv->channel || !priv->txq_to_sq_map)
goto err_free_txq_to_sq_map;
mlx5e_build_channel_param(priv, &cparam);
for (i = 0; i < nch; i++) {
err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
}
for (j = 0; j < nch; j++) {
err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
if (err)
goto err_close_channels;
}
return 0;
err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(priv->channel[i]);
err_free_txq_to_sq_map:
kfree(priv->txq_to_sq_map);
kfree(priv->channel);
return err;
}
static void mlx5e_close_channels(struct mlx5e_priv *priv)
{
int i;
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(priv->channel[i]);
kfree(priv->txq_to_sq_map);
kfree(priv->channel);
}
static int mlx5e_rx_hash_fn(int hfunc)
{
return (hfunc == ETH_RSS_HASH_TOP) ?
MLX5_RX_HASH_FN_TOEPLITZ :
MLX5_RX_HASH_FN_INVERTED_XOR8;
}
static int mlx5e_bits_invert(unsigned long a, int size)
{
int inv = 0;
int i;
for (i = 0; i < size; i++)
inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
return inv;
}
static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
{
int i;
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
int ix = i;
if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
ix = priv->params.indirection_rqt[ix];
ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i],
test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[ix]->rq.rqn :
priv->drop_rq.rqn);
}
}
static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
enum mlx5e_rqt_ix rqt_ix)
{
switch (rqt_ix) {
case MLX5E_INDIRECTION_RQT:
mlx5e_fill_indir_rqt_rqns(priv, rqtc);
break;
default: /* MLX5E_SINGLE_RQ_RQT */
MLX5_SET(rqtc, rqtc, rq_num[0],
test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[0]->rq.rqn :
priv->drop_rq.rqn);
break;
}
}
static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
void *rqtc;
int inlen;
int sz;
int err;
sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
kvfree(in);
return err;
}
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
void *rqtc;
int inlen;
int sz;
int err;
sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
kvfree(in);
return err;
}
static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
{
mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
}
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
{
mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
}
static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
{
if (!priv->params.lro_en)
return;
#define ROUGH_MAX_L2_L3_HDR_SZ 256
MLX5_SET(tirc, tirc, lro_enable_mask,
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(priv->params.lro_wqe_sz -
ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
MLX5_CAP_ETH(priv->mdev,
lro_timer_supported_periods[2]));
}
static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *in;
void *tirc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
mlx5e_build_tir_ctx_lro(tirc, priv);
err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
kvfree(in);
return err;
}
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
int hw_mtu;
int err;
err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
if (err)
return err;
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
__func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
return 0;
}
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int num_txqs;
int err;
set_bit(MLX5E_STATE_OPENED, &priv->state);
num_txqs = priv->params.num_channels * priv->params.num_tc;
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
err = mlx5e_set_dev_port_mtu(netdev);
if (err)
return err;
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
__func__, err);
return err;
}
mlx5e_update_carrier(priv);
mlx5e_redirect_rqts(priv);
schedule_delayed_work(&priv->update_stats_work, 0);
return 0;
}
static int mlx5e_open(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(netdev);
mutex_unlock(&priv->state_lock);
return err;
}
int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
mlx5e_redirect_rqts(priv);
netif_carrier_off(priv->netdev);
mlx5e_close_channels(priv);
return 0;
}
static int mlx5e_close(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
mutex_lock(&priv->state_lock);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
return err;
}
static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *rq,
struct mlx5e_rq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
int err;
param->wq.db_numa_node = param->wq.buf_numa_node;
err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
&rq->wq_ctrl);
if (err)
return err;
rq->priv = priv;
return 0;
}
static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_core_cq *mcq = &cq->mcq;
int eqn_not_used;
int irqn;
int err;
err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
&cq->wq_ctrl);
if (err)
return err;
mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
mcq->cqe_sz = 64;
mcq->set_ci_db = cq->wq_ctrl.db.db;
mcq->arm_db = cq->wq_ctrl.db.db + 1;
*mcq->set_ci_db = 0;
*mcq->arm_db = 0;
mcq->vector = param->eq_ix;
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
mcq->uar = &priv->cq_uar;
cq->priv = priv;
return 0;
}
static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
{
struct mlx5e_cq_param cq_param;
struct mlx5e_rq_param rq_param;
struct mlx5e_rq *rq = &priv->drop_rq;
struct mlx5e_cq *cq = &priv->drop_rq.cq;
int err;
memset(&cq_param, 0, sizeof(cq_param));
memset(&rq_param, 0, sizeof(rq_param));
mlx5e_build_rx_cq_param(priv, &cq_param);
mlx5e_build_rq_param(priv, &rq_param);
err = mlx5e_create_drop_cq(priv, cq, &cq_param);
if (err)
return err;
err = mlx5e_enable_cq(cq, &cq_param);
if (err)
goto err_destroy_cq;
err = mlx5e_create_drop_rq(priv, rq, &rq_param);
if (err)
goto err_disable_cq;
err = mlx5e_enable_rq(rq, &rq_param);
if (err)
goto err_destroy_rq;
return 0;
err_destroy_rq:
mlx5e_destroy_rq(&priv->drop_rq);
err_disable_cq:
mlx5e_disable_cq(&priv->drop_rq.cq);
err_destroy_cq:
mlx5e_destroy_cq(&priv->drop_rq.cq);
return err;
}
static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
{
mlx5e_disable_rq(&priv->drop_rq);
mlx5e_destroy_rq(&priv->drop_rq);
mlx5e_disable_cq(&priv->drop_rq.cq);
mlx5e_destroy_cq(&priv->drop_rq.cq);
}
static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 in[MLX5_ST_SZ_DW(create_tis_in)];
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc);
MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
{
mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
}
static int mlx5e_create_tises(struct mlx5e_priv *priv)
{
int err;
int tc;
for (tc = 0; tc < priv->params.num_tc; tc++) {
err = mlx5e_create_tis(priv, tc);
if (err)
goto err_close_tises;
}
return 0;
err_close_tises:
for (tc--; tc >= 0; tc--)
mlx5e_destroy_tis(priv, tc);
return err;
}
static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
{
int tc;
for (tc = 0; tc < priv->params.num_tc; tc++)
mlx5e_destroy_tis(priv, tc);
}
static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_L4_SPORT |\
MLX5_HASH_FIELD_SEL_L4_DPORT)
#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
switch (tt) {
case MLX5E_TT_ANY:
MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
break;
default:
MLX5_SET(tirc, tirc, indirect_table,
priv->rqtn[MLX5E_INDIRECTION_RQT]);
MLX5_SET(tirc, tirc, rx_hash_fn,
mlx5e_rx_hash_fn(priv->params.rss_hfunc));
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
void *rss_key = MLX5_ADDR_OF(tirc, tirc,
rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, priv->params.toeplitz_hash_key, len);
}
break;
}
switch (tt) {
case MLX5E_TT_IPV4_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_TCP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV6_UDP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_L4PORTS);
break;
case MLX5E_TT_IPV4_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_AH:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV6_IPSEC_ESP:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP_IPSEC_SPI);
break;
case MLX5E_TT_IPV4:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
case MLX5E_TT_IPV6:
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
}
}
static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 *in;
void *tirc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
mlx5e_build_tir_ctx(priv, tirc, tt);
err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
kvfree(in);
return err;
}
static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
{
mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
}
static int mlx5e_create_tirs(struct mlx5e_priv *priv)
{
int err;
int i;
for (i = 0; i < MLX5E_NUM_TT; i++) {
err = mlx5e_create_tir(priv, i);
if (err)
goto err_destroy_tirs;
}
return 0;
err_destroy_tirs:
for (i--; i >= 0; i--)
mlx5e_destroy_tir(priv, i);
return err;
}
static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
{
int i;
for (i = 0; i < MLX5E_NUM_TT; i++)
mlx5e_destroy_tir(priv, i);
}
static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
stats->rx_packets = vstats->rx_packets;
stats->rx_bytes = vstats->rx_bytes;
stats->tx_packets = vstats->tx_packets;
stats->tx_bytes = vstats->tx_bytes;
stats->multicast = vstats->rx_multicast_packets +
vstats->tx_multicast_packets;
stats->tx_errors = vstats->tx_error_packets;
stats->rx_errors = vstats->rx_error_packets;
stats->tx_dropped = vstats->tx_queue_dropped;
stats->rx_crc_errors = 0;
stats->rx_length_errors = 0;
return stats;
}
static void mlx5e_set_rx_mode(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
schedule_work(&priv->set_rx_mode_work);
}
static int mlx5e_set_mac(struct net_device *netdev, void *addr)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct sockaddr *saddr = addr;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev);
ether_addr_copy(netdev->dev_addr, saddr->sa_data);
netif_addr_unlock_bh(netdev);
schedule_work(&priv->set_rx_mode_work);
return 0;
}
static int mlx5e_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err = 0;
netdev_features_t changes = features ^ netdev->features;
mutex_lock(&priv->state_lock);
if (changes & NETIF_F_LRO) {
bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
mlx5e_close_locked(priv->netdev);
priv->params.lro_en = !!(features & NETIF_F_LRO);
mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
if (was_opened)
err = mlx5e_open_locked(priv->netdev);
}
mutex_unlock(&priv->state_lock);
if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
mlx5e_enable_vlan_filter(priv);
else
mlx5e_disable_vlan_filter(priv);
}
return 0;
}
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
bool was_opened;
int max_mtu;
int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
if (new_mtu > max_mtu) {
netdev_err(netdev,
"%s: Bad MTU (%d) > (%d) Max\n",
__func__, new_mtu, max_mtu);
return -EINVAL;
}
mutex_lock(&priv->state_lock);
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
mlx5e_close_locked(netdev);
netdev->mtu = new_mtu;
if (was_opened)
err = mlx5e_open_locked(netdev);
mutex_unlock(&priv->state_lock);
return err;
}
static struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_set_rx_mode = mlx5e_set_rx_mode,
.ndo_set_mac_address = mlx5e_set_mac,
.ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -ENOTSUPP;
if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
!MLX5_CAP_GEN(mdev, nic_flow_table) ||
!MLX5_CAP_ETH(mdev, csum_cap) ||
!MLX5_CAP_ETH(mdev, max_lso_cap) ||
!MLX5_CAP_ETH(mdev, vlan_cap) ||
!MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
MLX5_CAP_FLOWTABLE(mdev,
flow_table_properties_nic_receive.max_ft_level)
< 3) {
mlx5_core_warn(mdev,
"Not creating net device, some required device capabilities are missing\n");
return -ENOTSUPP;
}
return 0;
}
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
{
int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
return bf_buf_size -
sizeof(struct mlx5e_tx_wqe) +
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int i;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
priv->params.log_rq_size =
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
priv->params.rx_cq_moderation_usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
priv->params.rx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
priv->params.tx_cq_moderation_usec =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
priv->params.min_rx_wqes =
MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(priv->params.toeplitz_hash_key,
sizeof(priv->params.toeplitz_hash_key));
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
priv->params.indirection_rqt[i] = i % num_channels;
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
priv->mdev = mdev;
priv->netdev = netdev;
priv->params.num_channels = num_channels;
priv->default_vlan_prio = priv->params.default_vlan_prio;
spin_lock_init(&priv->async_events_spinlock);
mutex_init(&priv->state_lock);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
mlx5_query_nic_vport_mac_address(priv->mdev, netdev->dev_addr);
}
static void mlx5e_build_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
if (priv->params.num_tc > 1)
mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->watchdog_timeo = 15 * HZ;
netdev->ethtool_ops = &mlx5e_ethtool_ops;
netdev->vlan_features |= NETIF_F_SG;
netdev->vlan_features |= NETIF_F_IP_CSUM;
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_GRO;
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
netdev->vlan_features |= NETIF_F_RXCSUM;
netdev->vlan_features |= NETIF_F_RXHASH;
if (!!MLX5_CAP_ETH(mdev, lro_cap))
netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->features = netdev->hw_features;
if (!priv->params.lro_en)
netdev->features &= ~NETIF_F_LRO;
netdev->features |= NETIF_F_HIGHDMA;
netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev);
}
static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
struct mlx5_core_mr *mr)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_create_mkey_mbox_in *in;
int err;
in = mlx5_vzalloc(sizeof(*in));
if (!in)
return -ENOMEM;
in->seg.flags = MLX5_PERM_LOCAL_WRITE |
MLX5_PERM_LOCAL_READ |
MLX5_ACCESS_MODE_PA;
in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
NULL);
kvfree(in);
return err;
}
static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
struct mlx5e_priv *priv;
int nch = min_t(int, mdev->priv.eq_table.num_comp_vectors,
MLX5E_MAX_NUM_CHANNELS);
int err;
if (mlx5e_check_required_hca_cap(mdev))
return NULL;
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
}
mlx5e_build_netdev_priv(mdev, netdev, nch);
mlx5e_build_netdev(netdev);
netif_carrier_off(netdev);
priv = netdev_priv(netdev);
err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
if (err) {
mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
goto err_free_netdev;
}
err = mlx5_core_alloc_pd(mdev, &priv->pdn);
if (err) {
mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
goto err_unmap_free_uar;
}
err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
if (err) {
mlx5_core_err(mdev, "alloc td failed, %d\n", err);
goto err_dealloc_pd;
}
err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
if (err) {
mlx5_core_err(mdev, "create mkey failed, %d\n", err);
goto err_dealloc_transport_domain;
}
err = mlx5e_create_tises(priv);
if (err) {
mlx5_core_warn(mdev, "create tises failed, %d\n", err);
goto err_destroy_mkey;
}
err = mlx5e_open_drop_rq(priv);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
goto err_destroy_tises;
}
err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
if (err) {
mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
goto err_close_drop_rq;
}
err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
if (err) {
mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
goto err_destroy_rqt_indir;
}
err = mlx5e_create_tirs(priv);
if (err) {
mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
goto err_destroy_rqt_single;
}
err = mlx5e_create_flow_tables(priv);
if (err) {
mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
goto err_destroy_tirs;
}
mlx5e_init_eth_addr(priv);
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
goto err_destroy_flow_tables;
}
mlx5e_enable_async_events(priv);
schedule_work(&priv->set_rx_mode_work);
return priv;
err_destroy_flow_tables:
mlx5e_destroy_flow_tables(priv);
err_destroy_tirs:
mlx5e_destroy_tirs(priv);
err_destroy_rqt_single:
mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
err_destroy_rqt_indir:
mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
err_destroy_tises:
mlx5e_destroy_tises(priv);
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mr);
err_dealloc_transport_domain:
mlx5_dealloc_transport_domain(mdev, priv->tdn);
err_dealloc_pd:
mlx5_core_dealloc_pd(mdev, priv->pdn);
err_unmap_free_uar:
mlx5_unmap_free_uar(mdev, &priv->cq_uar);
err_free_netdev:
free_netdev(netdev);
return NULL;
}
static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
struct net_device *netdev = priv->netdev;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
schedule_work(&priv->set_rx_mode_work);
mlx5e_disable_async_events(priv);
flush_scheduled_work();
unregister_netdev(netdev);
mlx5e_destroy_flow_tables(priv);
mlx5e_destroy_tirs(priv);
mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
mlx5e_close_drop_rq(priv);
mlx5e_destroy_tises(priv);
mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
free_netdev(netdev);
}
static void *mlx5e_get_netdev(void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
return priv->netdev;
}
static struct mlx5_interface mlx5e_interface = {
.add = mlx5e_create_netdev,
.remove = mlx5e_destroy_netdev,
.event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_netdev,
};
void mlx5e_init(void)
{
mlx5_register_interface(&mlx5e_interface);
}
void mlx5e_cleanup(void)
{
mlx5_unregister_interface(&mlx5e_interface);
}
| gpl-2.0 |
MyOpenCommunity/linux-davinci-medium | drivers/scsi/lpfc/lpfc_sli.c | 455 | 369394 | /*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
* Public License as published by the Free Software Foundation. *
* This program is distributed in the hope that it will be useful. *
* ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
* WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
* DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
* TO BE LEGALLY INVALID. See the GNU General Public License for *
* more details, a copy of which can be found in the file COPYING *
* included with this package. *
*******************************************************************/
#include <linux/blkdev.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
#include "lpfc_compat.h"
#include "lpfc_debugfs.h"
#include "lpfc_vport.h"
/* There are only four IOCB completion types. */
typedef enum _lpfc_iocb_type {
LPFC_UNKNOWN_IOCB,
LPFC_UNSOL_IOCB,
LPFC_SOL_IOCB,
LPFC_ABORT_IOCB
} lpfc_iocb_type;
/* Provide function prototypes local to this module. */
static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint32_t);
static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
uint8_t *, uint32_t *);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
{
return &iocbq->iocb;
}
/**
* lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
* @q: The Work Queue to operate on.
* @wqe: The work Queue Entry to put on the Work queue.
*
* This routine will copy the contents of @wqe to the next available entry on
* the @q. This function will then ring the Work Queue Doorbell to signal the
* HBA to start processing the Work Queue Entry. This function returns 0 if
* successful. If no entries are available on @q then this function will return
* -ENOMEM.
* The caller is expected to hold the hbalock when calling this routine.
**/
static uint32_t
lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
{
union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
struct lpfc_register doorbell;
uint32_t host_index;
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
/* set consumption flag every once in a while */
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
/* Update the host index before invoking device */
host_index = q->host_index;
q->host_index = ((q->host_index + 1) % q->entry_count);
/* Ring Doorbell */
doorbell.word0 = 0;
bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
return 0;
}
/**
* lpfc_sli4_wq_release - Updates internal hba index for WQ
* @q: The Work Queue to operate on.
* @index: The index to advance the hba index to.
*
* This routine will update the HBA index of a queue to reflect consumption of
* Work Queue Entries by the HBA. When the HBA indicates that it has consumed
* an entry the host calls this function to update the queue's internal
* pointers. This routine returns the number of entries that were consumed by
* the HBA.
**/
static uint32_t
lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
{
uint32_t released = 0;
if (q->hba_index == index)
return 0;
do {
q->hba_index = ((q->hba_index + 1) % q->entry_count);
released++;
} while (q->hba_index != index);
return released;
}
/**
* lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
* @q: The Mailbox Queue to operate on.
* @wqe: The Mailbox Queue Entry to put on the Work queue.
*
* This routine will copy the contents of @mqe to the next available entry on
* the @q. This function will then ring the Work Queue Doorbell to signal the
* HBA to start processing the Work Queue Entry. This function returns 0 if
* successful. If no entries are available on @q then this function will return
* -ENOMEM.
* The caller is expected to hold the hbalock when calling this routine.
**/
static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
{
struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
struct lpfc_register doorbell;
uint32_t host_index;
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
/* Save off the mailbox pointer for completion */
q->phba->mbox = (MAILBOX_t *)temp_mqe;
/* Update the host index before invoking device */
host_index = q->host_index;
q->host_index = ((q->host_index + 1) % q->entry_count);
/* Ring Doorbell */
doorbell.word0 = 0;
bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
return 0;
}
/**
* lpfc_sli4_mq_release - Updates internal hba index for MQ
* @q: The Mailbox Queue to operate on.
*
* This routine will update the HBA index of a queue to reflect consumption of
* a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
* an entry the host calls this function to update the queue's internal
* pointers. This routine returns the number of entries that were consumed by
* the HBA.
**/
static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue *q)
{
/* Clear the mailbox pointer for completion */
q->phba->mbox = NULL;
q->hba_index = ((q->hba_index + 1) % q->entry_count);
return 1;
}
/**
* lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
* @q: The Event Queue to get the first valid EQE from
*
* This routine will get the first valid Event Queue Entry from @q, update
* the queue's internal hba index, and return the EQE. If no valid EQEs are in
* the Queue (no more work to do), or the Queue is full of EQEs that have been
* processed, but not popped back to the HBA then this routine will return NULL.
**/
static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue *q)
{
struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
/* If the next EQE is not valid then we are done */
if (!bf_get(lpfc_eqe_valid, eqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
return NULL;
q->hba_index = ((q->hba_index + 1) % q->entry_count);
return eqe;
}
/**
* lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
* @q: The Event Queue that the host has completed processing for.
* @arm: Indicates whether the host wants to arms this CQ.
*
* This routine will mark all Event Queue Entries on @q, from the last
* known completed entry to the last entry that was processed, as completed
* by clearing the valid bit for each completion queue entry. Then it will
* notify the HBA, by ringing the doorbell, that the EQEs have been processed.
* The internal host index in the @q will be updated by this routine to indicate
* that the host has finished processing the entries. The @arm parameter
* indicates that the queue should be rearmed when ringing the doorbell.
*
* This function will return the number of EQEs that were popped.
**/
uint32_t
lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
{
uint32_t released = 0;
struct lpfc_eqe *temp_eqe;
struct lpfc_register doorbell;
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_eqe = q->qe[q->host_index].eqe;
bf_set(lpfc_eqe_valid, temp_eqe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
if (unlikely(released == 0 && !arm))
return 0;
/* ring doorbell for number popped */
doorbell.word0 = 0;
if (arm) {
bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
}
bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
return released;
}
/**
* lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
* @q: The Completion Queue to get the first valid CQE from
*
* This routine will get the first valid Completion Queue Entry from @q, update
* the queue's internal hba index, and return the CQE. If no valid CQEs are in
* the Queue (no more work to do), or the Queue is full of CQEs that have been
* processed, but not popped back to the HBA then this routine will return NULL.
**/
static struct lpfc_cqe *
lpfc_sli4_cq_get(struct lpfc_queue *q)
{
struct lpfc_cqe *cqe;
/* If the next CQE is not valid then we are done */
if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL;
/* If the host has not yet processed the next entry then we are done */
if (((q->hba_index + 1) % q->entry_count) == q->host_index)
return NULL;
cqe = q->qe[q->hba_index].cqe;
q->hba_index = ((q->hba_index + 1) % q->entry_count);
return cqe;
}
/**
* lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
* @q: The Completion Queue that the host has completed processing for.
* @arm: Indicates whether the host wants to arms this CQ.
*
* This routine will mark all Completion queue entries on @q, from the last
* known completed entry to the last entry that was processed, as completed
* by clearing the valid bit for each completion queue entry. Then it will
* notify the HBA, by ringing the doorbell, that the CQEs have been processed.
* The internal host index in the @q will be updated by this routine to indicate
* that the host has finished processing the entries. The @arm parameter
* indicates that the queue should be rearmed when ringing the doorbell.
*
* This function will return the number of CQEs that were released.
**/
uint32_t
lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
{
uint32_t released = 0;
struct lpfc_cqe *temp_qe;
struct lpfc_register doorbell;
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_qe = q->qe[q->host_index].cqe;
bf_set(lpfc_cqe_valid, temp_qe, 0);
released++;
q->host_index = ((q->host_index + 1) % q->entry_count);
}
if (unlikely(released == 0 && !arm))
return 0;
/* ring doorbell for number popped */
doorbell.word0 = 0;
if (arm)
bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
return released;
}
/**
* lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
* @q: The Header Receive Queue to operate on.
* @wqe: The Receive Queue Entry to put on the Receive queue.
*
* This routine will copy the contents of @wqe to the next available entry on
* the @q. This function will then ring the Receive Queue Doorbell to signal the
* HBA to start processing the Receive Queue Entry. This function returns the
* index that the rqe was copied to if successful. If no entries are available
* on @q then this function will return -ENOMEM.
* The caller is expected to hold the hbalock when calling this routine.
**/
static int
lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
{
struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
struct lpfc_register doorbell;
int put_index = hq->host_index;
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
if (hq->host_index != dq->host_index)
return -EINVAL;
/* If the host has not yet processed the next entry then we are done */
if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
return -EBUSY;
lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
/* Update the host index to point to the next slot */
hq->host_index = ((hq->host_index + 1) % hq->entry_count);
dq->host_index = ((dq->host_index + 1) % dq->entry_count);
/* Ring The Header Receive Queue Doorbell */
if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
doorbell.word0 = 0;
bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
LPFC_RQ_POST_BATCH);
bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
}
return put_index;
}
/**
* lpfc_sli4_rq_release - Updates internal hba index for RQ
* @q: The Header Receive Queue to operate on.
*
* This routine will update the HBA index of a queue to reflect consumption of
* one Receive Queue Entry by the HBA. When the HBA indicates that it has
* consumed an entry the host calls this function to update the queue's
* internal pointers. This routine returns the number of entries that were
* consumed by the HBA.
**/
static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
{
if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
return 0;
hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
return 1;
}
/**
* lpfc_cmd_iocb - Get next command iocb entry in the ring
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
*
* This function returns pointer to next command iocb entry
* in the command ring. The caller must hold hbalock to prevent
* other threads consume the next command iocb.
* SLI-2/SLI-3 provide different sized iocbs.
**/
static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
return (IOCB_t *) (((char *) pring->cmdringaddr) +
pring->cmdidx * phba->iocb_cmd_size);
}
/**
* lpfc_resp_iocb - Get next response iocb entry in the ring
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
*
* This function returns pointer to next response iocb entry
* in the response ring. The caller must hold hbalock to make sure
* that no other thread consume the next response iocb.
* SLI-2/SLI-3 provide different sized iocbs.
**/
static inline IOCB_t *
lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
return (IOCB_t *) (((char *) pring->rspringaddr) +
pring->rspidx * phba->iocb_rsp_size);
}
/**
* __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
* @phba: Pointer to HBA context object.
*
* This function is called with hbalock held. This function
* allocates a new driver iocb object from the iocb pool. If the
* allocation is successful, it returns pointer to the newly
* allocated iocb object else it returns NULL.
**/
static struct lpfc_iocbq *
__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
{
struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
struct lpfc_iocbq * iocbq = NULL;
list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
return iocbq;
}
/**
* __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
* @phba: Pointer to HBA context object.
* @xritag: XRI value.
*
* This function clears the sglq pointer from the array of acive
* sglq's. The xritag that is passed in is used to index into the
* array. Before the xritag can be used it needs to be adjusted
* by subtracting the xribase.
*
* Returns sglq ponter = success, NULL = Failure.
**/
static struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
uint16_t adj_xri;
struct lpfc_sglq *sglq;
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
return NULL;
sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
return sglq;
}
/**
* __lpfc_get_active_sglq - Get the active sglq for this XRI.
* @phba: Pointer to HBA context object.
* @xritag: XRI value.
*
* This function returns the sglq pointer from the array of acive
* sglq's. The xritag that is passed in is used to index into the
* array. Before the xritag can be used it needs to be adjusted
* by subtracting the xribase.
*
* Returns sglq ponter = success, NULL = Failure.
**/
static struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
uint16_t adj_xri;
struct lpfc_sglq *sglq;
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
return NULL;
sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
return sglq;
}
/**
* __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
* @phba: Pointer to HBA context object.
*
* This function is called with hbalock held. This function
* Gets a new driver sglq object from the sglq list. If the
* list is not empty then it is successful, it returns pointer to the newly
* allocated sglq object else it returns NULL.
**/
static struct lpfc_sglq *
__lpfc_sli_get_sglq(struct lpfc_hba *phba)
{
struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
struct lpfc_sglq *sglq = NULL;
uint16_t adj_xri;
list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
return sglq;
}
/**
* lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
* @phba: Pointer to HBA context object.
*
* This function is called with no lock held. This function
* allocates a new driver iocb object from the iocb pool. If the
* allocation is successful, it returns pointer to the newly
* allocated iocb object else it returns NULL.
**/
struct lpfc_iocbq *
lpfc_sli_get_iocbq(struct lpfc_hba *phba)
{
struct lpfc_iocbq * iocbq = NULL;
unsigned long iflags;
spin_lock_irqsave(&phba->hbalock, iflags);
iocbq = __lpfc_sli_get_iocbq(phba);
spin_unlock_irqrestore(&phba->hbalock, iflags);
return iocbq;
}
/**
* __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
* This function is called with hbalock held to release driver
* iocb object to the iocb pool. The iotag in the iocb object
* does not change for each use of the iocb object. This function
* clears all other fields of the iocb object when it is freed.
* The sqlq structure that holds the xritag and phys and virtual
* mappings for the scatter gather list is retrieved from the
* active array of sglq. The get of the sglq pointer also clears
* the entry in the array. If the status of the IO indiactes that
* this IO was aborted then the sglq entry it put on the
* lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
* IO has good status or fails for any other reason then the sglq
* entry is added to the free list (lpfc_sgl_list).
**/
static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
struct lpfc_sglq *sglq;
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
unsigned long iflag;
if (iocbq->sli4_xritag == NO_XRI)
sglq = NULL;
else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
if (sglq) {
if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
|| ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
&& (iocbq->iocb.un.ulpWord[4]
== IOERR_SLI_ABORTED))) {
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
iflag);
list_add(&sglq->list,
&phba->sli4_hba.lpfc_abts_els_sgl_list);
spin_unlock_irqrestore(
&phba->sli4_hba.abts_sgl_list_lock, iflag);
} else
list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
}
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
iocbq->sli4_xritag = NO_XRI;
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
/**
* __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
* This function is called with hbalock held to release driver
* iocb object to the iocb pool. The iotag in the iocb object
* does not change for each use of the iocb object. This function
* clears all other fields of the iocb object when it is freed.
**/
static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
iocbq->sli4_xritag = NO_XRI;
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
/**
* __lpfc_sli_release_iocbq - Release iocb to the iocb pool
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
* This function is called with hbalock held to release driver
* iocb object to the iocb pool. The iotag in the iocb object
* does not change for each use of the iocb object. This function
* clears all other fields of the iocb object when it is freed.
**/
static void
__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
phba->__lpfc_sli_release_iocbq(phba, iocbq);
}
/**
* lpfc_sli_release_iocbq - Release iocb to the iocb pool
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
* This function is called with no lock held to release the iocb to
* iocb pool.
**/
void
lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
unsigned long iflags;
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
spin_lock_irqsave(&phba->hbalock, iflags);
__lpfc_sli_release_iocbq(phba, iocbq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
/**
* lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
* @phba: Pointer to HBA context object.
* @iocblist: List of IOCBs.
* @ulpstatus: ULP status in IOCB command field.
* @ulpWord4: ULP word-4 in IOCB command field.
*
* This function is called with a list of IOCBs to cancel. It cancels the IOCB
* on the list by invoking the complete callback function associated with the
* IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
* fields.
**/
void
lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
uint32_t ulpstatus, uint32_t ulpWord4)
{
struct lpfc_iocbq *piocb;
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
if (!piocb->iocb_cmpl)
lpfc_sli_release_iocbq(phba, piocb);
else {
piocb->iocb.ulpStatus = ulpstatus;
piocb->iocb.un.ulpWord[4] = ulpWord4;
(piocb->iocb_cmpl) (phba, piocb, piocb);
}
}
return;
}
/**
* lpfc_sli_iocb_cmd_type - Get the iocb type
* @iocb_cmnd: iocb command code.
*
* This function is called by ring event handler function to get the iocb type.
* This function translates the iocb command to an iocb command type used to
* decide the final disposition of each completed IOCB.
* The function returns
* LPFC_UNKNOWN_IOCB if it is an unsupported iocb
* LPFC_SOL_IOCB if it is a solicited iocb completion
* LPFC_ABORT_IOCB if it is an abort iocb
* LPFC_UNSOL_IOCB if it is an unsolicited iocb
*
* The caller is not required to hold any lock.
**/
static lpfc_iocb_type
lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
{
lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
if (iocb_cmnd > CMD_MAX_IOCB_CMD)
return 0;
switch (iocb_cmnd) {
case CMD_XMIT_SEQUENCE_CR:
case CMD_XMIT_SEQUENCE_CX:
case CMD_XMIT_BCAST_CN:
case CMD_XMIT_BCAST_CX:
case CMD_ELS_REQUEST_CR:
case CMD_ELS_REQUEST_CX:
case CMD_CREATE_XRI_CR:
case CMD_CREATE_XRI_CX:
case CMD_GET_RPI_CN:
case CMD_XMIT_ELS_RSP_CX:
case CMD_GET_RPI_CR:
case CMD_FCP_IWRITE_CR:
case CMD_FCP_IWRITE_CX:
case CMD_FCP_IREAD_CR:
case CMD_FCP_IREAD_CX:
case CMD_FCP_ICMND_CR:
case CMD_FCP_ICMND_CX:
case CMD_FCP_TSEND_CX:
case CMD_FCP_TRSP_CX:
case CMD_FCP_TRECEIVE_CX:
case CMD_FCP_AUTO_TRSP_CX:
case CMD_ADAPTER_MSG:
case CMD_ADAPTER_DUMP:
case CMD_XMIT_SEQUENCE64_CR:
case CMD_XMIT_SEQUENCE64_CX:
case CMD_XMIT_BCAST64_CN:
case CMD_XMIT_BCAST64_CX:
case CMD_ELS_REQUEST64_CR:
case CMD_ELS_REQUEST64_CX:
case CMD_FCP_IWRITE64_CR:
case CMD_FCP_IWRITE64_CX:
case CMD_FCP_IREAD64_CR:
case CMD_FCP_IREAD64_CX:
case CMD_FCP_ICMND64_CR:
case CMD_FCP_ICMND64_CX:
case CMD_FCP_TSEND64_CX:
case CMD_FCP_TRSP64_CX:
case CMD_FCP_TRECEIVE64_CX:
case CMD_GEN_REQUEST64_CR:
case CMD_GEN_REQUEST64_CX:
case CMD_XMIT_ELS_RSP64_CX:
case DSSCMD_IWRITE64_CR:
case DSSCMD_IWRITE64_CX:
case DSSCMD_IREAD64_CR:
case DSSCMD_IREAD64_CX:
case DSSCMD_INVALIDATE_DEK:
case DSSCMD_SET_KEK:
case DSSCMD_GET_KEK_ID:
case DSSCMD_GEN_XFER:
type = LPFC_SOL_IOCB;
break;
case CMD_ABORT_XRI_CN:
case CMD_ABORT_XRI_CX:
case CMD_CLOSE_XRI_CN:
case CMD_CLOSE_XRI_CX:
case CMD_XRI_ABORTED_CX:
case CMD_ABORT_MXRI64_CN:
type = LPFC_ABORT_IOCB;
break;
case CMD_RCV_SEQUENCE_CX:
case CMD_RCV_ELS_REQ_CX:
case CMD_RCV_SEQUENCE64_CX:
case CMD_RCV_ELS_REQ64_CX:
case CMD_ASYNC_STATUS:
case CMD_IOCB_RCV_SEQ64_CX:
case CMD_IOCB_RCV_ELS64_CX:
case CMD_IOCB_RCV_CONT64_CX:
case CMD_IOCB_RET_XRI64_CX:
type = LPFC_UNSOL_IOCB;
break;
case CMD_IOCB_XMIT_MSEQ64_CR:
case CMD_IOCB_XMIT_MSEQ64_CX:
case CMD_IOCB_RCV_SEQ_LIST64_CX:
case CMD_IOCB_RCV_ELS_LIST64_CX:
case CMD_IOCB_CLOSE_EXTENDED_CN:
case CMD_IOCB_ABORT_EXTENDED_CN:
case CMD_IOCB_RET_HBQE64_CN:
case CMD_IOCB_FCP_IBIDIR64_CR:
case CMD_IOCB_FCP_IBIDIR64_CX:
case CMD_IOCB_FCP_ITASKMGT64_CX:
case CMD_IOCB_LOGENTRY_CN:
case CMD_IOCB_LOGENTRY_ASYNC_CN:
printk("%s - Unhandled SLI-3 Command x%x\n",
__func__, iocb_cmnd);
type = LPFC_UNKNOWN_IOCB;
break;
default:
type = LPFC_UNKNOWN_IOCB;
break;
}
return type;
}
/**
* lpfc_sli_ring_map - Issue config_ring mbox for all rings
* @phba: Pointer to HBA context object.
*
* This function is called from SLI initialization code
* to configure every ring of the HBA's SLI interface. The
* caller is not required to hold any lock. This function issues
* a config_ring mailbox command for each ring.
* This function returns zero if successful else returns a negative
* error code.
**/
static int
lpfc_sli_ring_map(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *pmbox;
int i, rc, ret = 0;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb)
return -ENOMEM;
pmbox = &pmb->u.mb;
phba->link_state = LPFC_INIT_MBX_CMDS;
for (i = 0; i < psli->num_rings; i++) {
lpfc_config_ring(phba, i, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0446 Adapter failed to init (%d), "
"mbxCmd x%x CFG_RING, mbxStatus x%x, "
"ring %d\n",
rc, pmbox->mbxCommand,
pmbox->mbxStatus, i);
phba->link_state = LPFC_HBA_ERROR;
ret = -ENXIO;
break;
}
}
mempool_free(pmb, phba->mbox_mem_pool);
return ret;
}
/**
* lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to the driver iocb object.
*
* This function is called with hbalock held. The function adds the
* new iocb to txcmplq of the given ring. This function always returns
* 0. If this function is called for ELS ring, this function checks if
* there is a vport associated with the ELS command. This function also
* starts els_tmofunc timer if this is an ELS command.
**/
static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
list_add_tail(&piocb->list, &pring->txcmplq);
pring->txcmplq_cnt++;
if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
if (!piocb->vport)
BUG();
else
mod_timer(&piocb->vport->els_tmofunc,
jiffies + HZ * (phba->fc_ratov << 1));
}
return 0;
}
/**
* lpfc_sli_ringtx_get - Get first element of the txq
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
*
* This function is called with hbalock held to get next
* iocb in txq of the given ring. If there is any iocb in
* the txq, the function returns first iocb in the list after
* removing the iocb from the list, else it returns NULL.
**/
static struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_iocbq *cmd_iocb;
list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
if (cmd_iocb != NULL)
pring->txq_cnt--;
return cmd_iocb;
}
/**
* lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
*
* This function is called with hbalock held and the caller must post the
* iocb without releasing the lock. If the caller releases the lock,
* iocb slot returned by the function is not guaranteed to be available.
* The function returns pointer to the next available iocb slot if there
* is available slot in the ring, else it returns NULL.
* If the get index of the ring is ahead of the put index, the function
* will post an error attention event to the worker thread to take the
* HBA to offline state.
**/
static IOCB_t *
lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
uint32_t max_cmd_idx = pring->numCiocb;
if ((pring->next_cmdidx == pring->cmdidx) &&
(++pring->next_cmdidx >= max_cmd_idx))
pring->next_cmdidx = 0;
if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
if (unlikely(pring->local_getidx >= max_cmd_idx)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0315 Ring %d issue: portCmdGet %d "
"is bigger than cmd ring %d\n",
pring->ringno,
pring->local_getidx, max_cmd_idx);
phba->link_state = LPFC_HBA_ERROR;
/*
* All error attention handlers are posted to
* worker thread
*/
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
lpfc_worker_wake_up(phba);
return NULL;
}
if (pring->local_getidx == pring->next_cmdidx)
return NULL;
}
return lpfc_cmd_iocb(phba, pring);
}
/**
* lpfc_sli_next_iotag - Get an iotag for the iocb
* @phba: Pointer to HBA context object.
* @iocbq: Pointer to driver iocb object.
*
* This function gets an iotag for the iocb. If there is no unused iotag and
* the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
* array and assigns a new iotag.
* The function returns the allocated iotag if successful, else returns zero.
* Zero is not a valid iotag.
* The caller is not required to hold any lock.
**/
uint16_t
lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
{
struct lpfc_iocbq **new_arr;
struct lpfc_iocbq **old_arr;
size_t new_len;
struct lpfc_sli *psli = &phba->sli;
uint16_t iotag;
spin_lock_irq(&phba->hbalock);
iotag = psli->last_iotag;
if(++iotag < psli->iocbq_lookup_len) {
psli->last_iotag = iotag;
psli->iocbq_lookup[iotag] = iocbq;
spin_unlock_irq(&phba->hbalock);
iocbq->iotag = iotag;
return iotag;
} else if (psli->iocbq_lookup_len < (0xffff
- LPFC_IOCBQ_LOOKUP_INCREMENT)) {
new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
spin_unlock_irq(&phba->hbalock);
new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
GFP_KERNEL);
if (new_arr) {
spin_lock_irq(&phba->hbalock);
old_arr = psli->iocbq_lookup;
if (new_len <= psli->iocbq_lookup_len) {
/* highly unprobable case */
kfree(new_arr);
iotag = psli->last_iotag;
if(++iotag < psli->iocbq_lookup_len) {
psli->last_iotag = iotag;
psli->iocbq_lookup[iotag] = iocbq;
spin_unlock_irq(&phba->hbalock);
iocbq->iotag = iotag;
return iotag;
}
spin_unlock_irq(&phba->hbalock);
return 0;
}
if (psli->iocbq_lookup)
memcpy(new_arr, old_arr,
((psli->last_iotag + 1) *
sizeof (struct lpfc_iocbq *)));
psli->iocbq_lookup = new_arr;
psli->iocbq_lookup_len = new_len;
psli->last_iotag = iotag;
psli->iocbq_lookup[iotag] = iocbq;
spin_unlock_irq(&phba->hbalock);
iocbq->iotag = iotag;
kfree(old_arr);
return iotag;
}
} else
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
psli->last_iotag);
return 0;
}
/**
* lpfc_sli_submit_iocb - Submit an iocb to the firmware
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @iocb: Pointer to iocb slot in the ring.
* @nextiocb: Pointer to driver iocb object which need to be
* posted to firmware.
*
* This function is called with hbalock held to post a new iocb to
* the firmware. This function copies the new iocb to ring iocb slot and
* updates the ring pointers. It adds the new iocb to txcmplq if there is
* a completion call back for this iocb else the function will free the
* iocb object.
**/
static void
lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
{
/*
* Set up an iotag
*/
nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_debugfs_slow_ring_trc(phba,
"IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
*(((uint32_t *) &nextiocb->iocb) + 4),
*(((uint32_t *) &nextiocb->iocb) + 6),
*(((uint32_t *) &nextiocb->iocb) + 7));
}
/*
* Issue iocb command to adapter
*/
lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
wmb();
pring->stats.iocb_cmd++;
/*
* If there is no completion routine to call, we can release the
* IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
* that have no rsp ring completion, iocb_cmpl MUST be NULL.
*/
if (nextiocb->iocb_cmpl)
lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
else
__lpfc_sli_release_iocbq(phba, nextiocb);
/*
* Let the HBA know what IOCB slot will be the next one the
* driver will put a command into.
*/
pring->cmdidx = pring->next_cmdidx;
writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
}
/**
* lpfc_sli_update_full_ring - Update the chip attention register
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
*
* The caller is not required to hold any lock for calling this function.
* This function updates the chip attention bits for the ring to inform firmware
* that there are pending work to be done for this ring and requests an
* interrupt when there is space available in the ring. This function is
* called when the driver is unable to post more iocbs to the ring due
* to unavailability of space in the ring.
**/
static void
lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
int ringno = pring->ringno;
pring->flag |= LPFC_CALL_RING_AVAILABLE;
wmb();
/*
* Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
* The HBA will tell us when an IOCB entry is available.
*/
writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
readl(phba->CAregaddr); /* flush */
pring->stats.iocb_cmd_full++;
}
/**
* lpfc_sli_update_ring - Update chip attention register
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
*
* This function updates the chip attention register bit for the
* given ring to inform HBA that there is more work to be done
* in this ring. The caller is not required to hold any lock.
**/
static void
lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
int ringno = pring->ringno;
/*
* Tell the HBA that there is work to do in this ring.
*/
if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
wmb();
writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
readl(phba->CAregaddr); /* flush */
}
}
/**
* lpfc_sli_resume_iocb - Process iocbs in the txq
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
*
* This function is called with hbalock held to post pending iocbs
* in the txq to the firmware. This function is called when driver
* detects space available in the ring.
**/
static void
lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
IOCB_t *iocb;
struct lpfc_iocbq *nextiocb;
/*
* Check to see if:
* (a) there is anything on the txq to send
* (b) link is up
* (c) link attention events can be processed (fcp ring only)
* (d) IOCB processing is not blocked by the outstanding mbox command.
*/
if (pring->txq_cnt &&
lpfc_is_link_up(phba) &&
(pring->ringno != phba->sli.fcp_ring ||
phba->sli.sli_flag & LPFC_PROCESS_LA)) {
while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
(nextiocb = lpfc_sli_ringtx_get(phba, pring)))
lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
if (iocb)
lpfc_sli_update_ring(phba, pring);
else
lpfc_sli_update_full_ring(phba, pring);
}
return;
}
/**
* lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
* @phba: Pointer to HBA context object.
* @hbqno: HBQ number.
*
* This function is called with hbalock held to get the next
* available slot for the given HBQ. If there is free slot
* available for the HBQ it will return pointer to the next available
* HBQ entry else it will return NULL.
**/
static struct lpfc_hbq_entry *
lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
{
struct hbq_s *hbqp = &phba->hbqs[hbqno];
if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
++hbqp->next_hbqPutIdx >= hbqp->entry_count)
hbqp->next_hbqPutIdx = 0;
if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
uint32_t raw_index = phba->hbq_get[hbqno];
uint32_t getidx = le32_to_cpu(raw_index);
hbqp->local_hbqGetIdx = getidx;
if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI | LOG_VPORT,
"1802 HBQ %d: local_hbqGetIdx "
"%u is > than hbqp->entry_count %u\n",
hbqno, hbqp->local_hbqGetIdx,
hbqp->entry_count);
phba->link_state = LPFC_HBA_ERROR;
return NULL;
}
if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
return NULL;
}
return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
hbqp->hbqPutIdx;
}
/**
* lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
* @phba: Pointer to HBA context object.
*
* This function is called with no lock held to free all the
* hbq buffers while uninitializing the SLI interface. It also
* frees the HBQ buffers returned by the firmware but not yet
* processed by the upper layers.
**/
void
lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
{
struct lpfc_dmabuf *dmabuf, *next_dmabuf;
struct hbq_dmabuf *hbq_buf;
unsigned long flags;
int i, hbq_count;
uint32_t hbqno;
hbq_count = lpfc_sli_hbq_count();
/* Return all memory used by all HBQs */
spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < hbq_count; ++i) {
list_for_each_entry_safe(dmabuf, next_dmabuf,
&phba->hbqs[i].hbq_buffer_list, list) {
hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
list_del(&hbq_buf->dbuf.list);
(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
}
phba->hbqs[i].buffer_count = 0;
}
/* Return all HBQ buffer that are in-fly */
list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
list) {
hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
list_del(&hbq_buf->dbuf.list);
if (hbq_buf->tag == -1) {
(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
(phba, hbq_buf);
} else {
hbqno = hbq_buf->tag >> 16;
if (hbqno >= LPFC_MAX_HBQS)
(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
(phba, hbq_buf);
else
(phba->hbqs[hbqno].hbq_free_buffer)(phba,
hbq_buf);
}
}
/* Mark the HBQs not in use */
phba->hbq_in_use = 0;
spin_unlock_irqrestore(&phba->hbalock, flags);
}
/**
* lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
* @phba: Pointer to HBA context object.
* @hbqno: HBQ number.
* @hbq_buf: Pointer to HBQ buffer.
*
* This function is called with the hbalock held to post a
* hbq buffer to the firmware. If the function finds an empty
* slot in the HBQ, it will post the buffer. The function will return
* pointer to the hbq entry if it successfully post the buffer
* else it will return NULL.
**/
static int
lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf)
{
return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
}
/**
* lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
* @phba: Pointer to HBA context object.
* @hbqno: HBQ number.
* @hbq_buf: Pointer to HBQ buffer.
*
* This function is called with the hbalock held to post a hbq buffer to the
* firmware. If the function finds an empty slot in the HBQ, it will post the
* buffer and place it on the hbq_buffer_list. The function will return zero if
* it successfully post the buffer else it will return an error.
**/
static int
lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf)
{
struct lpfc_hbq_entry *hbqe;
dma_addr_t physaddr = hbq_buf->dbuf.phys;
/* Get next HBQ entry slot to use */
hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
if (hbqe) {
struct hbq_s *hbqp = &phba->hbqs[hbqno];
hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
hbqe->bde.tus.f.bdeSize = hbq_buf->size;
hbqe->bde.tus.f.bdeFlags = 0;
hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
/* Sync SLIM */
hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
/* flush */
readl(phba->hbq_put + hbqno);
list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
return 0;
} else
return -ENOMEM;
}
/**
* lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
* @phba: Pointer to HBA context object.
* @hbqno: HBQ number.
* @hbq_buf: Pointer to HBQ buffer.
*
* This function is called with the hbalock held to post an RQE to the SLI4
* firmware. If able to post the RQE to the RQ it will queue the hbq entry to
* the hbq_buffer_list and return zero, otherwise it will return an error.
**/
static int
lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
struct hbq_dmabuf *hbq_buf)
{
int rc;
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
&hrqe, &drqe);
if (rc < 0)
return rc;
hbq_buf->tag = rc;
list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
return 0;
}
/* HBQ for ELS and CT traffic. */
static struct lpfc_hbq_init lpfc_els_hbq = {
.rn = 1,
.entry_count = 200,
.mask_count = 0,
.profile = 0,
.ring_mask = (1 << LPFC_ELS_RING),
.buffer_count = 0,
.init_count = 40,
.add_count = 40,
};
/* HBQ for the extra ring if needed */
static struct lpfc_hbq_init lpfc_extra_hbq = {
.rn = 1,
.entry_count = 200,
.mask_count = 0,
.profile = 0,
.ring_mask = (1 << LPFC_EXTRA_RING),
.buffer_count = 0,
.init_count = 0,
.add_count = 5,
};
/* Array of HBQs */
struct lpfc_hbq_init *lpfc_hbq_defs[] = {
&lpfc_els_hbq,
&lpfc_extra_hbq,
};
/**
* lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
* @phba: Pointer to HBA context object.
* @hbqno: HBQ number.
* @count: Number of HBQ buffers to be posted.
*
* This function is called with no lock held to post more hbq buffers to the
* given HBQ. The function returns the number of HBQ buffers successfully
* posted.
**/
static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
{
uint32_t i, posted = 0;
unsigned long flags;
struct hbq_dmabuf *hbq_buffer;
LIST_HEAD(hbq_buf_list);
if (!phba->hbqs[hbqno].hbq_alloc_buffer)
return 0;
if ((phba->hbqs[hbqno].buffer_count + count) >
lpfc_hbq_defs[hbqno]->entry_count)
count = lpfc_hbq_defs[hbqno]->entry_count -
phba->hbqs[hbqno].buffer_count;
if (!count)
return 0;
/* Allocate HBQ entries */
for (i = 0; i < count; i++) {
hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
if (!hbq_buffer)
break;
list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
}
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use)
goto err;
while (!list_empty(&hbq_buf_list)) {
list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
dbuf.list);
hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
(hbqno << 16));
if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
phba->hbqs[hbqno].buffer_count++;
posted++;
} else
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
}
spin_unlock_irqrestore(&phba->hbalock, flags);
return posted;
err:
spin_unlock_irqrestore(&phba->hbalock, flags);
while (!list_empty(&hbq_buf_list)) {
list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
dbuf.list);
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
}
return 0;
}
/**
* lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
* @phba: Pointer to HBA context object.
* @qno: HBQ number.
*
* This function posts more buffers to the HBQ. This function
* is called with no lock held. The function returns the number of HBQ entries
* successfully allocated.
**/
int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
lpfc_hbq_defs[qno]->add_count));
}
/**
* lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
* @phba: Pointer to HBA context object.
* @qno: HBQ queue number.
*
* This function is called from SLI initialization code path with
* no lock held to post initial HBQ buffers to firmware. The
* function returns the number of HBQ entries successfully allocated.
**/
static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
lpfc_hbq_defs[qno]->init_count));
}
/**
* lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
* @phba: Pointer to HBA context object.
* @hbqno: HBQ number.
*
* This function removes the first hbq buffer on an hbq list and returns a
* pointer to that buffer. If it finds no buffers on the list it returns NULL.
**/
static struct hbq_dmabuf *
lpfc_sli_hbqbuf_get(struct list_head *rb_list)
{
struct lpfc_dmabuf *d_buf;
list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
if (!d_buf)
return NULL;
return container_of(d_buf, struct hbq_dmabuf, dbuf);
}
/**
* lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
* @phba: Pointer to HBA context object.
* @tag: Tag of the hbq buffer.
*
* This function is called with hbalock held. This function searches
* for the hbq buffer associated with the given tag in the hbq buffer
* list. If it finds the hbq buffer, it returns the hbq_buffer other wise
* it returns NULL.
**/
static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
{
struct lpfc_dmabuf *d_buf;
struct hbq_dmabuf *hbq_buf;
uint32_t hbqno;
hbqno = tag >> 16;
if (hbqno >= LPFC_MAX_HBQS)
return NULL;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
if (hbq_buf->tag == tag) {
spin_unlock_irq(&phba->hbalock);
return hbq_buf;
}
}
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
"1803 Bad hbq tag. Data: x%x x%x\n",
tag, phba->hbqs[tag >> 16].buffer_count);
return NULL;
}
/**
* lpfc_sli_free_hbq - Give back the hbq buffer to firmware
* @phba: Pointer to HBA context object.
* @hbq_buffer: Pointer to HBQ buffer.
*
* This function is called with hbalock. This function gives back
* the hbq buffer to firmware. If the HBQ does not have space to
* post the buffer, it will free the buffer.
**/
void
lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
{
uint32_t hbqno;
if (hbq_buffer) {
hbqno = hbq_buffer->tag >> 16;
if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
}
}
/**
* lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
* @mbxCommand: mailbox command code.
*
* This function is called by the mailbox event handler function to verify
* that the completed mailbox command is a legitimate mailbox command. If the
* completed mailbox is not known to the function, it will return MBX_SHUTDOWN
* and the mailbox event handler will take the HBA offline.
**/
static int
lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
{
uint8_t ret;
switch (mbxCommand) {
case MBX_LOAD_SM:
case MBX_READ_NV:
case MBX_WRITE_NV:
case MBX_WRITE_VPARMS:
case MBX_RUN_BIU_DIAG:
case MBX_INIT_LINK:
case MBX_DOWN_LINK:
case MBX_CONFIG_LINK:
case MBX_CONFIG_RING:
case MBX_RESET_RING:
case MBX_READ_CONFIG:
case MBX_READ_RCONFIG:
case MBX_READ_SPARM:
case MBX_READ_STATUS:
case MBX_READ_RPI:
case MBX_READ_XRI:
case MBX_READ_REV:
case MBX_READ_LNK_STAT:
case MBX_REG_LOGIN:
case MBX_UNREG_LOGIN:
case MBX_READ_LA:
case MBX_CLEAR_LA:
case MBX_DUMP_MEMORY:
case MBX_DUMP_CONTEXT:
case MBX_RUN_DIAGS:
case MBX_RESTART:
case MBX_UPDATE_CFG:
case MBX_DOWN_LOAD:
case MBX_DEL_LD_ENTRY:
case MBX_RUN_PROGRAM:
case MBX_SET_MASK:
case MBX_SET_VARIABLE:
case MBX_UNREG_D_ID:
case MBX_KILL_BOARD:
case MBX_CONFIG_FARP:
case MBX_BEACON:
case MBX_LOAD_AREA:
case MBX_RUN_BIU_DIAG64:
case MBX_CONFIG_PORT:
case MBX_READ_SPARM64:
case MBX_READ_RPI64:
case MBX_REG_LOGIN64:
case MBX_READ_LA64:
case MBX_WRITE_WWN:
case MBX_SET_DEBUG:
case MBX_LOAD_EXP_ROM:
case MBX_ASYNCEVT_ENABLE:
case MBX_REG_VPI:
case MBX_UNREG_VPI:
case MBX_HEARTBEAT:
case MBX_PORT_CAPABILITIES:
case MBX_PORT_IOV_CONTROL:
case MBX_SLI4_CONFIG:
case MBX_SLI4_REQ_FTRS:
case MBX_REG_FCFI:
case MBX_UNREG_FCFI:
case MBX_REG_VFI:
case MBX_UNREG_VFI:
case MBX_INIT_VPI:
case MBX_INIT_VFI:
case MBX_RESUME_RPI:
ret = mbxCommand;
break;
default:
ret = MBX_SHUTDOWN;
break;
}
return ret;
}
/**
* lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to mailbox command.
*
* This is completion handler function for mailbox commands issued from
* lpfc_sli_issue_mbox_wait function. This function is called by the
* mailbox event handler function with no lock held. This function
* will wake up thread waiting on the wait queue pointed by context1
* of the mailbox.
**/
void
lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
{
wait_queue_head_t *pdone_q;
unsigned long drvr_flag;
/*
* If pdone_q is empty, the driver thread gave up waiting and
* continued running.
*/
pmboxq->mbox_flag |= LPFC_MBX_WAKE;
spin_lock_irqsave(&phba->hbalock, drvr_flag);
pdone_q = (wait_queue_head_t *) pmboxq->context1;
if (pdone_q)
wake_up_interruptible(pdone_q);
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return;
}
/**
* lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
* @phba: Pointer to HBA context object.
* @pmb: Pointer to mailbox object.
*
* This function is the default mailbox completion handler. It
* frees the memory resources associated with the completed mailbox
* command. If the completed command is a REG_LOGIN mailbox command,
* this function will issue a UREG_LOGIN to re-claim the RPI.
**/
void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
{
struct lpfc_dmabuf *mp;
uint16_t rpi, vpi;
int rc;
mp = (struct lpfc_dmabuf *) (pmb->context1);
if (mp) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
}
if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
(phba->sli_rev == LPFC_SLI_REV4))
lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
/*
* If a REG_LOGIN succeeded after node is destroyed or node
* is in re-discovery driver need to cleanup the RPI.
*/
if (!(phba->pport->load_flag & FC_UNLOADING) &&
pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
return;
}
if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
lpfc_sli4_mbox_cmd_free(phba, pmb);
else
mempool_free(pmb, phba->mbox_mem_pool);
}
/**
* lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
* @phba: Pointer to HBA context object.
*
* This function is called with no lock held. This function processes all
* the completed mailbox commands and gives it to upper layers. The interrupt
* service routine processes mailbox completion interrupt and adds completed
* mailbox commands to the mboxq_cmpl queue and signals the worker thread.
* Worker thread call lpfc_sli_handle_mb_event, which will return the
* completed mailbox commands in mboxq_cmpl queue to the upper layers. This
* function returns the mailbox commands to the upper layer by calling the
* completion handler function of each mailbox.
**/
int
lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
{
MAILBOX_t *pmbox;
LPFC_MBOXQ_t *pmb;
int rc;
LIST_HEAD(cmplq);
phba->sli.slistat.mbox_event++;
/* Get all completed mailboxe buffers into the cmplq */
spin_lock_irq(&phba->hbalock);
list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
spin_unlock_irq(&phba->hbalock);
/* Get a Mailbox buffer to setup mailbox commands for callback */
do {
list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
if (pmb == NULL)
break;
pmbox = &pmb->u.mb;
if (pmbox->mbxCommand != MBX_HEARTBEAT) {
if (pmb->vport) {
lpfc_debugfs_disc_trc(pmb->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
(uint32_t)pmbox->mbxCommand,
pmbox->un.varWords[0],
pmbox->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX cmpl: cmd:x%x mb:x%x x%x",
(uint32_t)pmbox->mbxCommand,
pmbox->un.varWords[0],
pmbox->un.varWords[1]);
}
}
/*
* It is a fatal error if unknown mbox command completion.
*/
if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
MBX_SHUTDOWN) {
/* Unknow mailbox command compl */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0323 Unknown Mailbox command "
"x%x (x%x) Cmpl\n",
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, pmb));
phba->link_state = LPFC_HBA_ERROR;
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
continue;
}
if (pmbox->mbxStatus) {
phba->sli.slistat.mbox_stat_err++;
if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
/* Mbox cmd cmpl error - RETRYing */
lpfc_printf_log(phba, KERN_INFO,
LOG_MBOX | LOG_SLI,
"(%d):0305 Mbox cmd cmpl "
"error - RETRYing Data: x%x "
"(x%x) x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi :0,
pmbox->mbxCommand,
lpfc_sli4_mbox_opcode_get(phba,
pmb),
pmbox->mbxStatus,
pmbox->un.varWords[0],
pmb->vport->port_state);
pmbox->mbxStatus = 0;
pmbox->mbxOwner = OWN_HOST;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_NOT_FINISHED)
continue;
}
}
/* Mailbox cmd <cmd> Cmpl <cmpl> */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
pmb->vport ? pmb->vport->vpi : 0,
pmbox->mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, pmb),
pmb->mbox_cmpl,
*((uint32_t *) pmbox),
pmbox->un.varWords[0],
pmbox->un.varWords[1],
pmbox->un.varWords[2],
pmbox->un.varWords[3],
pmbox->un.varWords[4],
pmbox->un.varWords[5],
pmbox->un.varWords[6],
pmbox->un.varWords[7]);
if (pmb->mbox_cmpl)
pmb->mbox_cmpl(phba,pmb);
} while (1);
return 0;
}
/**
* lpfc_sli_get_buff - Get the buffer associated with the buffer tag
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @tag: buffer tag.
*
* This function is called with no lock held. When QUE_BUFTAG_BIT bit
* is set in the tag the buffer is posted for a particular exchange,
* the function will return the buffer without replacing the buffer.
* If the buffer is for unsolicited ELS or CT traffic, this function
* returns the buffer and also posts another buffer to the firmware.
**/
static struct lpfc_dmabuf *
lpfc_sli_get_buff(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
uint32_t tag)
{
struct hbq_dmabuf *hbq_entry;
if (tag & QUE_BUFTAG_BIT)
return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
if (!hbq_entry)
return NULL;
return &hbq_entry->dbuf;
}
/**
* lpfc_complete_unsol_iocb - Complete an unsolicited sequence
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @saveq: Pointer to the iocbq struct representing the sequence starting frame.
* @fch_r_ctl: the r_ctl for the first frame of the sequence.
* @fch_type: the type for the first frame of the sequence.
*
* This function is called with no lock held. This function uses the r_ctl and
* type of the received sequence to find the correct callback function to call
* to process the sequence.
**/
static int
lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
uint32_t fch_type)
{
int i;
/* unSolicited Responses */
if (pring->prt[0].profile) {
if (pring->prt[0].lpfc_sli_rcv_unsol_event)
(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
saveq);
return 1;
}
/* We must search, based on rctl / type
for the right routine */
for (i = 0; i < pring->num_mask; i++) {
if ((pring->prt[i].rctl == fch_r_ctl) &&
(pring->prt[i].type == fch_type)) {
if (pring->prt[i].lpfc_sli_rcv_unsol_event)
(pring->prt[i].lpfc_sli_rcv_unsol_event)
(phba, pring, saveq);
return 1;
}
}
return 0;
}
/**
* lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @saveq: Pointer to the unsolicited iocb.
*
* This function is called with no lock held by the ring event handler
* when there is an unsolicited iocb posted to the response ring by the
* firmware. This function gets the buffer associated with the iocbs
* and calls the event handler for the ring. This function handles both
* qring buffers and hbq buffers.
* When the function returns 1 the caller can free the iocb object otherwise
* upper layer functions will free the iocb objects.
**/
static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
{
IOCB_t * irsp;
WORD5 * w5p;
uint32_t Rctl, Type;
uint32_t match;
struct lpfc_iocbq *iocbq;
struct lpfc_dmabuf *dmzbuf;
match = 0;
irsp = &(saveq->iocb);
if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
if (pring->lpfc_sli_rcv_async_status)
pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
else
lpfc_printf_log(phba,
KERN_WARNING,
LOG_SLI,
"0316 Ring %d handler: unexpected "
"ASYNC_STATUS iocb received evt_code "
"0x%x\n",
pring->ringno,
irsp->un.asyncstat.evt_code);
return 1;
}
if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
if (irsp->ulpBdeCount > 0) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
irsp->un.ulpWord[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 1) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
irsp->unsli3.sli3Words[3]);
lpfc_in_buf_free(phba, dmzbuf);
}
if (irsp->ulpBdeCount > 2) {
dmzbuf = lpfc_sli_get_buff(phba, pring,
irsp->unsli3.sli3Words[7]);
lpfc_in_buf_free(phba, dmzbuf);
}
return 1;
}
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
if (irsp->ulpBdeCount != 0) {
saveq->context2 = lpfc_sli_get_buff(phba, pring,
irsp->un.ulpWord[3]);
if (!saveq->context2)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
"0341 Ring %d Cannot find buffer for "
"an unsolicited iocb. tag 0x%x\n",
pring->ringno,
irsp->un.ulpWord[3]);
}
if (irsp->ulpBdeCount == 2) {
saveq->context3 = lpfc_sli_get_buff(phba, pring,
irsp->unsli3.sli3Words[7]);
if (!saveq->context3)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
"0342 Ring %d Cannot find buffer for an"
" unsolicited iocb. tag 0x%x\n",
pring->ringno,
irsp->unsli3.sli3Words[7]);
}
list_for_each_entry(iocbq, &saveq->list, list) {
irsp = &(iocbq->iocb);
if (irsp->ulpBdeCount != 0) {
iocbq->context2 = lpfc_sli_get_buff(phba, pring,
irsp->un.ulpWord[3]);
if (!iocbq->context2)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
"0343 Ring %d Cannot find "
"buffer for an unsolicited iocb"
". tag 0x%x\n", pring->ringno,
irsp->un.ulpWord[3]);
}
if (irsp->ulpBdeCount == 2) {
iocbq->context3 = lpfc_sli_get_buff(phba, pring,
irsp->unsli3.sli3Words[7]);
if (!iocbq->context3)
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
"0344 Ring %d Cannot find "
"buffer for an unsolicited "
"iocb. tag 0x%x\n",
pring->ringno,
irsp->unsli3.sli3Words[7]);
}
}
}
if (irsp->ulpBdeCount != 0 &&
(irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
int found = 0;
/* search continue save q for same XRI */
list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) {
list_add_tail(&saveq->list, &iocbq->list);
found = 1;
break;
}
}
if (!found)
list_add_tail(&saveq->clist,
&pring->iocb_continue_saveq);
if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
list_del_init(&iocbq->clist);
saveq = iocbq;
irsp = &(saveq->iocb);
} else
return 0;
}
if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
(irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
(irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
Rctl = FC_ELS_REQ;
Type = FC_ELS_DATA;
} else {
w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
Rctl = w5p->hcsw.Rctl;
Type = w5p->hcsw.Type;
/* Firmware Workaround */
if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
Rctl = FC_ELS_REQ;
Type = FC_ELS_DATA;
w5p->hcsw.Rctl = Rctl;
w5p->hcsw.Type = Type;
}
}
if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0313 Ring %d handler: unexpected Rctl x%x "
"Type x%x received\n",
pring->ringno, Rctl, Type);
return 1;
}
/**
* lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @prspiocb: Pointer to response iocb object.
*
* This function looks up the iocb_lookup table to get the command iocb
* corresponding to the given response iocb using the iotag of the
* response iocb. This function is called with the hbalock held.
* This function returns the command iocb object if it finds the command
* iocb else returns NULL.
**/
static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring,
struct lpfc_iocbq *prspiocb)
{
struct lpfc_iocbq *cmd_iocb = NULL;
uint16_t iotag;
iotag = prspiocb->iocb.ulpIoTag;
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
pring->txcmplq_cnt--;
return cmd_iocb;
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0317 iotag x%x is out off "
"range: max iotag x%x wd0 x%x\n",
iotag, phba->sli.last_iotag,
*(((uint32_t *) &prspiocb->iocb) + 7));
return NULL;
}
/**
* lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @iotag: IOCB tag.
*
* This function looks up the iocb_lookup table to get the command iocb
* corresponding to the given iotag. This function is called with the
* hbalock held.
* This function returns the command iocb object if it finds the command
* iocb else returns NULL.
**/
static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint16_t iotag)
{
struct lpfc_iocbq *cmd_iocb;
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
cmd_iocb = phba->sli.iocbq_lookup[iotag];
list_del_init(&cmd_iocb->list);
pring->txcmplq_cnt--;
return cmd_iocb;
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0372 iotag x%x is out off range: max iotag (x%x)\n",
iotag, phba->sli.last_iotag);
return NULL;
}
/**
* lpfc_sli_process_sol_iocb - process solicited iocb completion
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @saveq: Pointer to the response iocb to be processed.
*
* This function is called by the ring event handler for non-fcp
* rings when there is a new response iocb in the response ring.
* The caller is not required to hold any locks. This function
* gets the command iocb associated with the response iocb and
* calls the completion handler for the command iocb. If there
* is no completion handler, the function will free the resources
* associated with command iocb. If the response iocb is for
* an already aborted command iocb, the status of the completion
* is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
* This function always returns 1.
**/
static int
lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *saveq)
{
struct lpfc_iocbq *cmdiocbp;
int rc = 1;
unsigned long iflag;
/* Based on the iotag field, get the cmd IOCB from the txcmplq */
spin_lock_irqsave(&phba->hbalock, iflag);
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (cmdiocbp) {
if (cmdiocbp->iocb_cmpl) {
/*
* If an ELS command failed send an event to mgmt
* application.
*/
if (saveq->iocb.ulpStatus &&
(pring->ringno == LPFC_ELS_RING) &&
(cmdiocbp->iocb.ulpCommand ==
CMD_ELS_REQUEST64_CR))
lpfc_send_els_failure_event(phba,
cmdiocbp, saveq);
/*
* Post all ELS completions to the worker thread.
* All other are passed to the completion callback.
*/
if (pring->ringno == LPFC_ELS_RING) {
if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
cmdiocbp->iocb_flag &=
~LPFC_DRIVER_ABORTED;
saveq->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
saveq->iocb.un.ulpWord[4] =
IOERR_SLI_ABORTED;
/* Firmware could still be in progress
* of DMAing payload, so don't free data
* buffer till after a hbeat.
*/
saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
}
}
(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
} else
lpfc_sli_release_iocbq(phba, cmdiocbp);
} else {
/*
* Unknown initiating command based on the response iotag.
* This could be the case on the ELS ring because of
* lpfc_els_abort().
*/
if (pring->ringno != LPFC_ELS_RING) {
/*
* Ring <ringno> handler: unexpected completion IoTag
* <IoTag>
*/
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0322 Ring %d handler: "
"unexpected completion IoTag x%x "
"Data: x%x x%x x%x x%x\n",
pring->ringno,
saveq->iocb.ulpIoTag,
saveq->iocb.ulpStatus,
saveq->iocb.un.ulpWord[4],
saveq->iocb.ulpCommand,
saveq->iocb.ulpContext);
}
}
return rc;
}
/**
* lpfc_sli_rsp_pointers_error - Response ring pointer error handler
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
*
* This function is called from the iocb ring event handlers when
* put pointer is ahead of the get pointer for a ring. This function signal
* an error attention condition to the worker thread and the worker
* thread will transition the HBA to offline state.
**/
static void
lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
/*
* Ring <ringno> handler: portRspPut <portRspPut> is bigger than
* rsp ring <portRspMax>
*/
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0312 Ring %d handler: portRspPut %d "
"is bigger than rsp ring %d\n",
pring->ringno, le32_to_cpu(pgp->rspPutInx),
pring->numRiocb);
phba->link_state = LPFC_HBA_ERROR;
/*
* All error attention handlers are posted to
* worker thread
*/
phba->work_ha |= HA_ERATT;
phba->work_hs = HS_FFER3;
lpfc_worker_wake_up(phba);
return;
}
/**
* lpfc_poll_eratt - Error attention polling timer timeout handler
* @ptr: Pointer to address of HBA context object.
*
* This function is invoked by the Error Attention polling timer when the
* timer times out. It will check the SLI Error Attention register for
* possible attention events. If so, it will post an Error Attention event
* and wake up worker thread to process it. Otherwise, it will set up the
* Error Attention polling timer for the next poll.
**/
void lpfc_poll_eratt(unsigned long ptr)
{
struct lpfc_hba *phba;
uint32_t eratt = 0;
phba = (struct lpfc_hba *)ptr;
/* Check chip HA register for error event */
eratt = lpfc_sli_check_eratt(phba);
if (eratt)
/* Tell the worker thread there is work to do */
lpfc_worker_wake_up(phba);
else
/* Restart the timer for next eratt poll */
mod_timer(&phba->eratt_poll, jiffies +
HZ * LPFC_ERATT_POLL_INTERVAL);
return;
}
/**
* lpfc_sli_poll_fcp_ring - Handle FCP ring completion in polling mode
* @phba: Pointer to HBA context object.
*
* This function is called from lpfc_queuecommand, lpfc_poll_timeout,
* lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING
* is enabled.
*
* The caller does not hold any lock.
* The function processes each response iocb in the response ring until it
* finds an iocb with LE bit set and chains all the iocbs upto the iocb with
* LE bit set. The function will call the completion handler of the command iocb
* if the response iocb indicates a completion for a command iocb or it is
* an abort completion.
**/
void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
IOCB_t *irsp = NULL;
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
struct lpfc_iocbq rspiocbq;
struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
uint32_t status;
uint32_t portRspPut, portRspMax;
int type;
uint32_t rsp_cmpl = 0;
uint32_t ha_copy;
unsigned long iflags;
pring->stats.iocb_event++;
/*
* The next available response entry should never exceed the maximum
* entries. If it does, treat it as an adapter hardware error.
*/
portRspMax = pring->numRiocb;
portRspPut = le32_to_cpu(pgp->rspPutInx);
if (unlikely(portRspPut >= portRspMax)) {
lpfc_sli_rsp_pointers_error(phba, pring);
return;
}
rmb();
while (pring->rspidx != portRspPut) {
entry = lpfc_resp_iocb(phba, pring);
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
phba->iocb_rsp_size);
irsp = &rspiocbq.iocb;
type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
pring->stats.iocb_rsp++;
rsp_cmpl++;
if (unlikely(irsp->ulpStatus)) {
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0326 Rsp Ring %d error: IOCB Data: "
"x%x x%x x%x x%x x%x x%x x%x x%x\n",
pring->ringno,
irsp->un.ulpWord[0],
irsp->un.ulpWord[1],
irsp->un.ulpWord[2],
irsp->un.ulpWord[3],
irsp->un.ulpWord[4],
irsp->un.ulpWord[5],
*(uint32_t *)&irsp->un1,
*((uint32_t *)&irsp->un1 + 1));
}
switch (type) {
case LPFC_ABORT_IOCB:
case LPFC_SOL_IOCB:
/*
* Idle exchange closed via ABTS from port. No iocb
* resources need to be recovered.
*/
if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0314 IOCB cmd 0x%x "
"processed. Skipping "
"completion",
irsp->ulpCommand);
break;
}
spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
}
break;
default:
if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
char adaptermsg[LPFC_MAX_ADPTMSG];
memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
memcpy(&adaptermsg[0], (uint8_t *) irsp,
MAX_MSG_DATA);
dev_warn(&((phba->pcidev)->dev),
"lpfc%d: %s\n",
phba->brd_no, adaptermsg);
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0321 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
type, irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
irsp->ulpContext);
}
break;
}
/*
* The response IOCB has been processed. Update the ring
* pointer in SLIM. If the port response put pointer has not
* been updated, sync the pgp->rspPutInx and fetch the new port
* response put pointer.
*/
writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
if (pring->rspidx == portRspPut)
portRspPut = le32_to_cpu(pgp->rspPutInx);
}
ha_copy = readl(phba->HAregaddr);
ha_copy >>= (LPFC_FCP_RING * 4);
if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
spin_lock_irqsave(&phba->hbalock, iflags);
pring->stats.iocb_rsp_full++;
status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
writel(status, phba->CAregaddr);
readl(phba->CAregaddr);
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
if ((ha_copy & HA_R0CE_RSP) &&
(pring->flag & LPFC_CALL_RING_AVAILABLE)) {
spin_lock_irqsave(&phba->hbalock, iflags);
pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
pring->stats.iocb_cmd_empty++;
/* Force update of the local copy of cmdGetInx */
pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
lpfc_sli_resume_iocb(phba, pring);
if ((pring->lpfc_sli_cmd_available))
(pring->lpfc_sli_cmd_available) (phba, pring);
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
return;
}
/**
* lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @mask: Host attention register mask for this ring.
*
* This function is called from the interrupt context when there is a ring
* event for the fcp ring. The caller does not hold any lock.
* The function processes each response iocb in the response ring until it
* finds an iocb with LE bit set and chains all the iocbs upto the iocb with
* LE bit set. The function will call the completion handler of the command iocb
* if the response iocb indicates a completion for a command iocb or it is
* an abort completion. The function will call lpfc_sli_process_unsol_iocb
* function if this is an unsolicited iocb.
* This routine presumes LPFC_FCP_RING handling and doesn't bother
* to check it explicitly. This function always returns 1.
**/
static int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
IOCB_t *irsp = NULL;
IOCB_t *entry = NULL;
struct lpfc_iocbq *cmdiocbq = NULL;
struct lpfc_iocbq rspiocbq;
uint32_t status;
uint32_t portRspPut, portRspMax;
int rc = 1;
lpfc_iocb_type type;
unsigned long iflag;
uint32_t rsp_cmpl = 0;
spin_lock_irqsave(&phba->hbalock, iflag);
pring->stats.iocb_event++;
/*
* The next available response entry should never exceed the maximum
* entries. If it does, treat it as an adapter hardware error.
*/
portRspMax = pring->numRiocb;
portRspPut = le32_to_cpu(pgp->rspPutInx);
if (unlikely(portRspPut >= portRspMax)) {
lpfc_sli_rsp_pointers_error(phba, pring);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return 1;
}
rmb();
while (pring->rspidx != portRspPut) {
/*
* Fetch an entry off the ring and copy it into a local data
* structure. The copy involves a byte-swap since the
* network byte order and pci byte orders are different.
*/
entry = lpfc_resp_iocb(phba, pring);
phba->last_completion_time = jiffies;
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
lpfc_sli_pcimem_bcopy((uint32_t *) entry,
(uint32_t *) &rspiocbq.iocb,
phba->iocb_rsp_size);
INIT_LIST_HEAD(&(rspiocbq.list));
irsp = &rspiocbq.iocb;
type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
pring->stats.iocb_rsp++;
rsp_cmpl++;
if (unlikely(irsp->ulpStatus)) {
/*
* If resource errors reported from HBA, reduce
* queuedepths of the SCSI device.
*/
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->lpfc_rampdown_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
}
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0336 Rsp Ring %d error: IOCB Data: "
"x%x x%x x%x x%x x%x x%x x%x x%x\n",
pring->ringno,
irsp->un.ulpWord[0],
irsp->un.ulpWord[1],
irsp->un.ulpWord[2],
irsp->un.ulpWord[3],
irsp->un.ulpWord[4],
irsp->un.ulpWord[5],
*(uint32_t *)&irsp->un1,
*((uint32_t *)&irsp->un1 + 1));
}
switch (type) {
case LPFC_ABORT_IOCB:
case LPFC_SOL_IOCB:
/*
* Idle exchange closed via ABTS from port. No iocb
* resources need to be recovered.
*/
if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0333 IOCB cmd 0x%x"
" processed. Skipping"
" completion\n",
irsp->ulpCommand);
break;
}
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq);
if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
} else {
spin_unlock_irqrestore(&phba->hbalock,
iflag);
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
&rspiocbq);
spin_lock_irqsave(&phba->hbalock,
iflag);
}
}
break;
case LPFC_UNSOL_IOCB:
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
spin_lock_irqsave(&phba->hbalock, iflag);
break;
default:
if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
char adaptermsg[LPFC_MAX_ADPTMSG];
memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
memcpy(&adaptermsg[0], (uint8_t *) irsp,
MAX_MSG_DATA);
dev_warn(&((phba->pcidev)->dev),
"lpfc%d: %s\n",
phba->brd_no, adaptermsg);
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0334 Unknown IOCB command "
"Data: x%x, x%x x%x x%x x%x\n",
type, irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
irsp->ulpContext);
}
break;
}
/*
* The response IOCB has been processed. Update the ring
* pointer in SLIM. If the port response put pointer has not
* been updated, sync the pgp->rspPutInx and fetch the new port
* response put pointer.
*/
writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
if (pring->rspidx == portRspPut)
portRspPut = le32_to_cpu(pgp->rspPutInx);
}
if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
pring->stats.iocb_rsp_full++;
status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
writel(status, phba->CAregaddr);
readl(phba->CAregaddr);
}
if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
pring->stats.iocb_cmd_empty++;
/* Force update of the local copy of cmdGetInx */
pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
lpfc_sli_resume_iocb(phba, pring);
if ((pring->lpfc_sli_cmd_available))
(pring->lpfc_sli_cmd_available) (phba, pring);
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
return rc;
}
/**
* lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @rspiocbp: Pointer to driver response IOCB object.
*
* This function is called from the worker thread when there is a slow-path
* response IOCB to process. This function chains all the response iocbs until
* seeing the iocb with the LE bit set. The function will call
* lpfc_sli_process_sol_iocb function if the response iocb indicates a
* completion of a command iocb. The function will call the
* lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
* The function frees the resources or calls the completion handler if this
* iocb is an abort completion. The function returns NULL when the response
* iocb has the LE bit set and all the chained iocbs are processed, otherwise
* this function shall chain the iocb on to the iocb_continueq and return the
* response iocb passed in.
**/
static struct lpfc_iocbq *
lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *rspiocbp)
{
struct lpfc_iocbq *saveq;
struct lpfc_iocbq *cmdiocbp;
struct lpfc_iocbq *next_iocb;
IOCB_t *irsp = NULL;
uint32_t free_saveq;
uint8_t iocb_cmd_type;
lpfc_iocb_type type;
unsigned long iflag;
int rc;
spin_lock_irqsave(&phba->hbalock, iflag);
/* First add the response iocb to the countinueq list */
list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
pring->iocb_continueq_cnt++;
/* Now, determine whetehr the list is completed for processing */
irsp = &rspiocbp->iocb;
if (irsp->ulpLe) {
/*
* By default, the driver expects to free all resources
* associated with this iocb completion.
*/
free_saveq = 1;
saveq = list_get_first(&pring->iocb_continueq,
struct lpfc_iocbq, list);
irsp = &(saveq->iocb);
list_del_init(&pring->iocb_continueq);
pring->iocb_continueq_cnt = 0;
pring->stats.iocb_rsp++;
/*
* If resource errors reported from HBA, reduce
* queuedepths of the SCSI device.
*/
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->lpfc_rampdown_queue_depth(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
}
if (irsp->ulpStatus) {
/* Rsp ring <ringno> error: IOCB */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0328 Rsp Ring %d error: "
"IOCB Data: "
"x%x x%x x%x x%x "
"x%x x%x x%x x%x "
"x%x x%x x%x x%x "
"x%x x%x x%x x%x\n",
pring->ringno,
irsp->un.ulpWord[0],
irsp->un.ulpWord[1],
irsp->un.ulpWord[2],
irsp->un.ulpWord[3],
irsp->un.ulpWord[4],
irsp->un.ulpWord[5],
*(((uint32_t *) irsp) + 6),
*(((uint32_t *) irsp) + 7),
*(((uint32_t *) irsp) + 8),
*(((uint32_t *) irsp) + 9),
*(((uint32_t *) irsp) + 10),
*(((uint32_t *) irsp) + 11),
*(((uint32_t *) irsp) + 12),
*(((uint32_t *) irsp) + 13),
*(((uint32_t *) irsp) + 14),
*(((uint32_t *) irsp) + 15));
}
/*
* Fetch the IOCB command type and call the correct completion
* routine. Solicited and Unsolicited IOCBs on the ELS ring
* get freed back to the lpfc_iocb_list by the discovery
* kernel thread.
*/
iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
switch (type) {
case LPFC_SOL_IOCB:
spin_unlock_irqrestore(&phba->hbalock, iflag);
rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
spin_lock_irqsave(&phba->hbalock, iflag);
break;
case LPFC_UNSOL_IOCB:
spin_unlock_irqrestore(&phba->hbalock, iflag);
rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
spin_lock_irqsave(&phba->hbalock, iflag);
if (!rc)
free_saveq = 0;
break;
case LPFC_ABORT_IOCB:
cmdiocbp = NULL;
if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
saveq);
if (cmdiocbp) {
/* Call the specified completion routine */
if (cmdiocbp->iocb_cmpl) {
spin_unlock_irqrestore(&phba->hbalock,
iflag);
(cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
saveq);
spin_lock_irqsave(&phba->hbalock,
iflag);
} else
__lpfc_sli_release_iocbq(phba,
cmdiocbp);
}
break;
case LPFC_UNKNOWN_IOCB:
if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
char adaptermsg[LPFC_MAX_ADPTMSG];
memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
memcpy(&adaptermsg[0], (uint8_t *)irsp,
MAX_MSG_DATA);
dev_warn(&((phba->pcidev)->dev),
"lpfc%d: %s\n",
phba->brd_no, adaptermsg);
} else {
/* Unknown IOCB command */
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0335 Unknown IOCB "
"command Data: x%x "
"x%x x%x x%x\n",
irsp->ulpCommand,
irsp->ulpStatus,
irsp->ulpIoTag,
irsp->ulpContext);
}
break;
}
if (free_saveq) {
list_for_each_entry_safe(rspiocbp, next_iocb,
&saveq->list, list) {
list_del(&rspiocbp->list);
__lpfc_sli_release_iocbq(phba, rspiocbp);
}
__lpfc_sli_release_iocbq(phba, saveq);
}
rspiocbp = NULL;
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
return rspiocbp;
}
/**
* lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @mask: Host attention register mask for this ring.
*
* This routine wraps the actual slow_ring event process routine from the
* API jump table function pointer from the lpfc_hba struct.
**/
void
lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
}
/**
* lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @mask: Host attention register mask for this ring.
*
* This function is called from the worker thread when there is a ring event
* for non-fcp rings. The caller does not hold any lock. The function will
* remove each response iocb in the response ring and calls the handle
* response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
**/
static void
lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
struct lpfc_pgp *pgp;
IOCB_t *entry;
IOCB_t *irsp = NULL;
struct lpfc_iocbq *rspiocbp = NULL;
uint32_t portRspPut, portRspMax;
unsigned long iflag;
uint32_t status;
pgp = &phba->port_gp[pring->ringno];
spin_lock_irqsave(&phba->hbalock, iflag);
pring->stats.iocb_event++;
/*
* The next available response entry should never exceed the maximum
* entries. If it does, treat it as an adapter hardware error.
*/
portRspMax = pring->numRiocb;
portRspPut = le32_to_cpu(pgp->rspPutInx);
if (portRspPut >= portRspMax) {
/*
* Ring <ringno> handler: portRspPut <portRspPut> is bigger than
* rsp ring <portRspMax>
*/
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0303 Ring %d handler: portRspPut %d "
"is bigger than rsp ring %d\n",
pring->ringno, portRspPut, portRspMax);
phba->link_state = LPFC_HBA_ERROR;
spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->work_hs = HS_FFER3;
lpfc_handle_eratt(phba);
return;
}
rmb();
while (pring->rspidx != portRspPut) {
/*
* Build a completion list and call the appropriate handler.
* The process is to get the next available response iocb, get
* a free iocb from the list, copy the response data into the
* free iocb, insert to the continuation list, and update the
* next response index to slim. This process makes response
* iocb's in the ring available to DMA as fast as possible but
* pays a penalty for a copy operation. Since the iocb is
* only 32 bytes, this penalty is considered small relative to
* the PCI reads for register values and a slim write. When
* the ulpLe field is set, the entire Command has been
* received.
*/
entry = lpfc_resp_iocb(phba, pring);
phba->last_completion_time = jiffies;
rspiocbp = __lpfc_sli_get_iocbq(phba);
if (rspiocbp == NULL) {
printk(KERN_ERR "%s: out of buffers! Failing "
"completion.\n", __func__);
break;
}
lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
phba->iocb_rsp_size);
irsp = &rspiocbp->iocb;
if (++pring->rspidx >= portRspMax)
pring->rspidx = 0;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_debugfs_slow_ring_trc(phba,
"IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
*(((uint32_t *) irsp) + 4),
*(((uint32_t *) irsp) + 6),
*(((uint32_t *) irsp) + 7));
}
writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
spin_unlock_irqrestore(&phba->hbalock, iflag);
/* Handle the response IOCB */
rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
spin_lock_irqsave(&phba->hbalock, iflag);
/*
* If the port response put pointer has not been updated, sync
* the pgp->rspPutInx in the MAILBOX_tand fetch the new port
* response put pointer.
*/
if (pring->rspidx == portRspPut) {
portRspPut = le32_to_cpu(pgp->rspPutInx);
}
} /* while (pring->rspidx != portRspPut) */
if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
/* At least one response entry has been freed */
pring->stats.iocb_rsp_full++;
/* SET RxRE_RSP in Chip Att register */
status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
writel(status, phba->CAregaddr);
readl(phba->CAregaddr); /* flush */
}
if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
pring->stats.iocb_cmd_empty++;
/* Force update of the local copy of cmdGetInx */
pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
lpfc_sli_resume_iocb(phba, pring);
if ((pring->lpfc_sli_cmd_available))
(pring->lpfc_sli_cmd_available) (phba, pring);
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
/**
* lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @mask: Host attention register mask for this ring.
*
* This function is called from the worker thread when there is a pending
* ELS response iocb on the driver internal slow-path response iocb worker
* queue. The caller does not hold any lock. The function will remove each
* response iocb from the response worker queue and calls the handle
* response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
**/
static void
lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint32_t mask)
{
struct lpfc_iocbq *irspiocbq;
unsigned long iflag;
while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
/* Get the response iocb from the head of work queue */
spin_lock_irqsave(&phba->hbalock, iflag);
list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
irspiocbq, struct lpfc_iocbq, list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
/* Process the response iocb */
lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
}
}
/**
* lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
*
* This function aborts all iocbs in the given ring and frees all the iocb
* objects in txq. This function issues an abort iocb for all the iocb commands
* in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
* the return of this function. The caller is not required to hold any locks.
**/
void
lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
{
LIST_HEAD(completions);
struct lpfc_iocbq *iocb, *next_iocb;
if (pring->ringno == LPFC_ELS_RING) {
lpfc_fabric_abort_hba(phba);
}
/* Error everything on txq and txcmplq
* First do the txq.
*/
spin_lock_irq(&phba->hbalock);
list_splice_init(&pring->txq, &completions);
pring->txq_cnt = 0;
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
spin_unlock_irq(&phba->hbalock);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
}
/**
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
* @phba: Pointer to HBA context object.
*
* This function flushes all iocbs in the fcp ring and frees all the iocb
* objects in txq and txcmplq. This function will not issue abort iocbs
* for all the iocb commands in txcmplq, they will just be returned with
* IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
* slot has been permanently disabled.
**/
void
lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
{
LIST_HEAD(txq);
LIST_HEAD(txcmplq);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
/* Currently, only one fcp ring */
pring = &psli->ring[psli->fcp_ring];
spin_lock_irq(&phba->hbalock);
/* Retrieve everything on txq */
list_splice_init(&pring->txq, &txq);
pring->txq_cnt = 0;
/* Retrieve everything on the txcmplq */
list_splice_init(&pring->txcmplq, &txcmplq);
pring->txcmplq_cnt = 0;
spin_unlock_irq(&phba->hbalock);
/* Flush the txq */
lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
/* Flush the txcmpq */
lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
}
/**
* lpfc_sli_brdready_s3 - Check for sli3 host ready status
* @phba: Pointer to HBA context object.
* @mask: Bit mask to be checked.
*
* This function reads the host status register and compares
* with the provided bit mask to check if HBA completed
* the restart. This function will wait in a loop for the
* HBA to complete restart. If the HBA does not restart within
* 15 iterations, the function will reset the HBA again. The
* function returns 1 when HBA fail to restart otherwise returns
* zero.
**/
static int
lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
{
uint32_t status;
int i = 0;
int retval = 0;
/* Read the HBA Host Status Register */
status = readl(phba->HSregaddr);
/*
* Check status register every 100ms for 5 retries, then every
* 500ms for 5, then every 2.5 sec for 5, then reset board and
* every 2.5 sec for 4.
* Break our of the loop if errors occurred during init.
*/
while (((status & mask) != mask) &&
!(status & HS_FFERM) &&
i++ < 20) {
if (i <= 5)
msleep(10);
else if (i <= 10)
msleep(500);
else
msleep(2500);
if (i == 15) {
/* Do post */
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
status = readl(phba->HSregaddr);
}
/* Check to see if any errors occurred during init */
if ((status & HS_FFERM) || (i >= 20)) {
phba->link_state = LPFC_HBA_ERROR;
retval = 1;
}
return retval;
}
/**
* lpfc_sli_brdready_s4 - Check for sli4 host ready status
* @phba: Pointer to HBA context object.
* @mask: Bit mask to be checked.
*
* This function checks the host status register to check if HBA is
* ready. This function will wait in a loop for the HBA to be ready
* If the HBA is not ready , the function will will reset the HBA PCI
* function again. The function returns 1 when HBA fail to be ready
* otherwise returns zero.
**/
static int
lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
{
uint32_t status;
int retval = 0;
/* Read the HBA Host Status Register */
status = lpfc_sli4_post_status_check(phba);
if (status) {
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
status = lpfc_sli4_post_status_check(phba);
}
/* Check to see if any errors occurred during init */
if (status) {
phba->link_state = LPFC_HBA_ERROR;
retval = 1;
} else
phba->sli4_hba.intr_enable = 0;
return retval;
}
/**
* lpfc_sli_brdready - Wrapper func for checking the hba readyness
* @phba: Pointer to HBA context object.
* @mask: Bit mask to be checked.
*
* This routine wraps the actual SLI3 or SLI4 hba readyness check routine
* from the API jump table function pointer from the lpfc_hba struct.
**/
int
lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
{
return phba->lpfc_sli_brdready(phba, mask);
}
#define BARRIER_TEST_PATTERN (0xdeadbeef)
/**
* lpfc_reset_barrier - Make HBA ready for HBA reset
* @phba: Pointer to HBA context object.
*
* This function is called before resetting an HBA. This
* function requests HBA to quiesce DMAs before a reset.
**/
void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
volatile uint32_t mbox;
uint32_t hc_copy;
int i;
uint8_t hdrtype;
pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
if (hdrtype != 0x80 ||
(FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
return;
/*
* Tell the other part of the chip to suspend temporarily all
* its DMA activity.
*/
resp_buf = phba->MBslimaddr;
/* Disable the error attention */
hc_copy = readl(phba->HCregaddr);
writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
phba->link_flag |= LS_IGNORE_ERATT;
if (readl(phba->HAregaddr) & HA_ERATT) {
/* Clear Chip error bit */
writel(HA_ERATT, phba->HAregaddr);
phba->pport->stopped = 1;
}
mbox = 0;
((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
mbox_buf = phba->MBslimaddr;
writel(mbox, mbox_buf);
for (i = 0;
readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
mdelay(1);
if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
phba->pport->stopped)
goto restore_hc;
else
goto clear_errat;
}
((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
for (i = 0; readl(resp_buf) != mbox && i < 500; i++)
mdelay(1);
clear_errat:
while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
mdelay(1);
if (readl(phba->HAregaddr) & HA_ERATT) {
writel(HA_ERATT, phba->HAregaddr);
phba->pport->stopped = 1;
}
restore_hc:
phba->link_flag &= ~LS_IGNORE_ERATT;
writel(hc_copy, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
/**
* lpfc_sli_brdkill - Issue a kill_board mailbox command
* @phba: Pointer to HBA context object.
*
* This function issues a kill_board mailbox command and waits for
* the error attention interrupt. This function is called for stopping
* the firmware processing. The caller is not required to hold any
* locks. This function calls lpfc_hba_down_post function to free
* any pending commands after the kill. The function will return 1 when it
* fails to kill the board else will return 0.
**/
int
lpfc_sli_brdkill(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
LPFC_MBOXQ_t *pmb;
uint32_t status;
uint32_t ha_copy;
int retval;
int i = 0;
psli = &phba->sli;
/* Kill HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0329 Kill HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb)
return 1;
/* Disable the error attention */
spin_lock_irq(&phba->hbalock);
status = readl(phba->HCregaddr);
status &= ~HC_ERINT_ENA;
writel(status, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
phba->link_flag |= LS_IGNORE_ERATT;
spin_unlock_irq(&phba->hbalock);
lpfc_kill_board(phba, pmb);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (retval != MBX_SUCCESS) {
if (retval != MBX_BUSY)
mempool_free(pmb, phba->mbox_mem_pool);
spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_IGNORE_ERATT;
spin_unlock_irq(&phba->hbalock);
return 1;
}
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
mempool_free(pmb, phba->mbox_mem_pool);
/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
* attention every 100ms for 3 seconds. If we don't get ERATT after
* 3 seconds we still set HBA_ERROR state because the status of the
* board is now undefined.
*/
ha_copy = readl(phba->HAregaddr);
while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
mdelay(100);
ha_copy = readl(phba->HAregaddr);
}
del_timer_sync(&psli->mbox_tmo);
if (ha_copy & HA_ERATT) {
writel(HA_ERATT, phba->HAregaddr);
phba->pport->stopped = 1;
}
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
psli->mbox_active = NULL;
phba->link_flag &= ~LS_IGNORE_ERATT;
spin_unlock_irq(&phba->hbalock);
lpfc_hba_down_post(phba);
phba->link_state = LPFC_HBA_ERROR;
return ha_copy & HA_ERATT ? 0 : 1;
}
/**
* lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
* @phba: Pointer to HBA context object.
*
* This function resets the HBA by writing HC_INITFF to the control
* register. After the HBA resets, this function resets all the iocb ring
* indices. This function disables PCI layer parity checking during
* the reset.
* This function returns 0 always.
* The caller is not required to hold any locks.
**/
int
lpfc_sli_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
uint16_t cfg_value;
int i;
psli = &phba->sli;
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0325 Reset HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
/* perform board reset */
phba->fc_eventTag = 0;
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
/* Turn off parity checking and serr during the physical reset */
pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
pci_write_config_word(phba->pcidev, PCI_COMMAND,
(cfg_value &
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
/* Now toggle INITFF bit in the Host Control Register */
writel(HC_INITFF, phba->HCregaddr);
mdelay(1);
readl(phba->HCregaddr); /* flush */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
/* Restore PCI cmd register */
pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
/* Initialize relevant SLI info */
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
pring->flag = 0;
pring->rspidx = 0;
pring->next_cmdidx = 0;
pring->local_getidx = 0;
pring->cmdidx = 0;
pring->missbufcnt = 0;
}
phba->link_state = LPFC_WARM_START;
return 0;
}
/**
* lpfc_sli4_brdreset - Reset a sli-4 HBA
* @phba: Pointer to HBA context object.
*
* This function resets a SLI4 HBA. This function disables PCI layer parity
* checking during resets the device. The caller is not required to hold
* any locks.
*
* This function returns 0 always.
**/
int
lpfc_sli4_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint16_t cfg_value;
uint8_t qindx;
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0295 Reset HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
/* perform board reset */
phba->fc_eventTag = 0;
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
/* Turn off parity checking and serr during the physical reset */
pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
pci_write_config_word(phba->pcidev, PCI_COMMAND,
(cfg_value &
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~(LPFC_PROCESS_LA);
phba->fcf.fcf_flag = 0;
/* Clean up the child queue list for the CQs */
list_del_init(&phba->sli4_hba.mbx_wq->list);
list_del_init(&phba->sli4_hba.els_wq->list);
list_del_init(&phba->sli4_hba.hdr_rq->list);
list_del_init(&phba->sli4_hba.dat_rq->list);
list_del_init(&phba->sli4_hba.mbx_cq->list);
list_del_init(&phba->sli4_hba.els_cq->list);
list_del_init(&phba->sli4_hba.rxq_cq->list);
for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
/* Perform FCoE PCI function reset */
lpfc_pci_function_reset(phba);
return 0;
}
/**
* lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
* @phba: Pointer to HBA context object.
*
* This function is called in the SLI initialization code path to
* restart the HBA. The caller is not required to hold any lock.
* This function writes MBX_RESTART mailbox command to the SLIM and
* resets the HBA. At the end of the function, it calls lpfc_hba_down_post
* function to free any pending commands. The function enables
* POST only during the first initialization. The function returns zero.
* The function does not guarantee completion of MBX_RESTART mailbox
* command before the return of this function.
**/
static int
lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
{
MAILBOX_t *mb;
struct lpfc_sli *psli;
volatile uint32_t word0;
void __iomem *to_slim;
spin_lock_irq(&phba->hbalock);
psli = &phba->sli;
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0337 Restart HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
word0 = 0;
mb = (MAILBOX_t *) &word0;
mb->mbxCommand = MBX_RESTART;
mb->mbxHc = 1;
lpfc_reset_barrier(phba);
to_slim = phba->MBslimaddr;
writel(*(uint32_t *) mb, to_slim);
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
if (phba->pport->port_state)
word0 = 1; /* This is really setting up word1 */
else
word0 = 0; /* This is really setting up word1 */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
writel(*(uint32_t *) mb, to_slim);
readl(to_slim); /* flush */
lpfc_sli_brdreset(phba);
phba->pport->stopped = 0;
phba->link_state = LPFC_INIT_START;
phba->hba_flag = 0;
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
psli->stats_start = get_seconds();
/* Give the INITFF and Post time to settle. */
mdelay(100);
lpfc_hba_down_post(phba);
return 0;
}
/**
* lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
* @phba: Pointer to HBA context object.
*
* This function is called in the SLI initialization code path to restart
* a SLI4 HBA. The caller is not required to hold any lock.
* At the end of the function, it calls lpfc_hba_down_post function to
* free any pending commands.
**/
static int
lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
/* Restart HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0296 Restart HBA Data: x%x x%x\n",
phba->pport->port_state, psli->sli_flag);
lpfc_sli4_brdreset(phba);
spin_lock_irq(&phba->hbalock);
phba->pport->stopped = 0;
phba->link_state = LPFC_INIT_START;
phba->hba_flag = 0;
spin_unlock_irq(&phba->hbalock);
memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
psli->stats_start = get_seconds();
lpfc_hba_down_post(phba);
return 0;
}
/**
* lpfc_sli_brdrestart - Wrapper func for restarting hba
* @phba: Pointer to HBA context object.
*
* This routine wraps the actual SLI3 or SLI4 hba restart routine from the
* API jump table function pointer from the lpfc_hba struct.
**/
int
lpfc_sli_brdrestart(struct lpfc_hba *phba)
{
return phba->lpfc_sli_brdrestart(phba);
}
/**
* lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
* @phba: Pointer to HBA context object.
*
* This function is called after a HBA restart to wait for successful
* restart of the HBA. Successful restart of the HBA is indicated by
* HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
* iteration, the function will restart the HBA again. The function returns
* zero if HBA successfully restarted else returns negative error code.
**/
static int
lpfc_sli_chipset_init(struct lpfc_hba *phba)
{
uint32_t status, i = 0;
/* Read the HBA Host Status Register */
status = readl(phba->HSregaddr);
/* Check status register to see what current state is */
i = 0;
while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
/* Check every 100ms for 5 retries, then every 500ms for 5, then
* every 2.5 sec for 5, then reset board and every 2.5 sec for
* 4.
*/
if (i++ >= 20) {
/* Adapter failed to init, timeout, status reg
<status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0436 Adapter failed to init, "
"timeout, status reg x%x, "
"FW Data: A8 x%x AC x%x\n", status,
readl(phba->MBslimaddr + 0xa8),
readl(phba->MBslimaddr + 0xac));
phba->link_state = LPFC_HBA_ERROR;
return -ETIMEDOUT;
}
/* Check to see if any errors occurred during init */
if (status & HS_FFERM) {
/* ERROR: During chipset initialization */
/* Adapter failed to init, chipset, status reg
<status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0437 Adapter failed to init, "
"chipset, status reg x%x, "
"FW Data: A8 x%x AC x%x\n", status,
readl(phba->MBslimaddr + 0xa8),
readl(phba->MBslimaddr + 0xac));
phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
if (i <= 5) {
msleep(10);
} else if (i <= 10) {
msleep(500);
} else {
msleep(2500);
}
if (i == 15) {
/* Do post */
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
}
/* Read the HBA Host Status Register */
status = readl(phba->HSregaddr);
}
/* Check to see if any errors occurred during init */
if (status & HS_FFERM) {
/* ERROR: During chipset initialization */
/* Adapter failed to init, chipset, status reg <status> */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0438 Adapter failed to init, chipset, "
"status reg x%x, "
"FW Data: A8 x%x AC x%x\n", status,
readl(phba->MBslimaddr + 0xa8),
readl(phba->MBslimaddr + 0xac));
phba->link_state = LPFC_HBA_ERROR;
return -EIO;
}
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
/* setup host attn register */
writel(0xffffffff, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
return 0;
}
/**
* lpfc_sli_hbq_count - Get the number of HBQs to be configured
*
* This function calculates and returns the number of HBQs required to be
* configured.
**/
int
lpfc_sli_hbq_count(void)
{
return ARRAY_SIZE(lpfc_hbq_defs);
}
/**
* lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
*
* This function adds the number of hbq entries in every HBQ to get
* the total number of hbq entries required for the HBA and returns
* the total count.
**/
static int
lpfc_sli_hbq_entry_count(void)
{
int hbq_count = lpfc_sli_hbq_count();
int count = 0;
int i;
for (i = 0; i < hbq_count; ++i)
count += lpfc_hbq_defs[i]->entry_count;
return count;
}
/**
* lpfc_sli_hbq_size - Calculate memory required for all hbq entries
*
* This function calculates amount of memory required for all hbq entries
* to be configured and returns the total memory required.
**/
int
lpfc_sli_hbq_size(void)
{
return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
}
/**
* lpfc_sli_hbq_setup - configure and initialize HBQs
* @phba: Pointer to HBA context object.
*
* This function is called during the SLI initialization to configure
* all the HBQs and post buffers to the HBQ. The caller is not
* required to hold any locks. This function will return zero if successful
* else it will return negative error code.
**/
static int
lpfc_sli_hbq_setup(struct lpfc_hba *phba)
{
int hbq_count = lpfc_sli_hbq_count();
LPFC_MBOXQ_t *pmb;
MAILBOX_t *pmbox;
uint32_t hbqno;
uint32_t hbq_entry_index;
/* Get a Mailbox buffer to setup mailbox
* commands for HBA initialization
*/
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb)
return -ENOMEM;
pmbox = &pmb->u.mb;
/* Initialize the struct lpfc_sli_hbq structure for each hbq */
phba->link_state = LPFC_INIT_MBX_CMDS;
phba->hbq_in_use = 1;
hbq_entry_index = 0;
for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
phba->hbqs[hbqno].next_hbqPutIdx = 0;
phba->hbqs[hbqno].hbqPutIdx = 0;
phba->hbqs[hbqno].local_hbqGetIdx = 0;
phba->hbqs[hbqno].entry_count =
lpfc_hbq_defs[hbqno]->entry_count;
lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
hbq_entry_index, pmb);
hbq_entry_index += phba->hbqs[hbqno].entry_count;
if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
mbxStatus <status>, ring <num> */
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI | LOG_VPORT,
"1805 Adapter failed to init. "
"Data: x%x x%x x%x\n",
pmbox->mbxCommand,
pmbox->mbxStatus, hbqno);
phba->link_state = LPFC_HBA_ERROR;
mempool_free(pmb, phba->mbox_mem_pool);
return ENXIO;
}
}
phba->hbq_count = hbq_count;
mempool_free(pmb, phba->mbox_mem_pool);
/* Initially populate or replenish the HBQs */
for (hbqno = 0; hbqno < hbq_count; ++hbqno)
lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
return 0;
}
/**
* lpfc_sli4_rb_setup - Initialize and post RBs to HBA
* @phba: Pointer to HBA context object.
*
* This function is called during the SLI initialization to configure
* all the HBQs and post buffers to the HBQ. The caller is not
* required to hold any locks. This function will return zero if successful
* else it will return negative error code.
**/
static int
lpfc_sli4_rb_setup(struct lpfc_hba *phba)
{
phba->hbq_in_use = 1;
phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
phba->hbq_count = 1;
/* Initially populate or replenish the HBQs */
lpfc_sli_hbqbuf_init_hbqs(phba, 0);
return 0;
}
/**
* lpfc_sli_config_port - Issue config port mailbox command
* @phba: Pointer to HBA context object.
* @sli_mode: sli mode - 2/3
*
* This function is called by the sli intialization code path
* to issue config_port mailbox command. This function restarts the
* HBA firmware and issues a config_port mailbox command to configure
* the SLI interface in the sli mode specified by sli_mode
* variable. The caller is not required to hold any locks.
* The function returns 0 if successful, else returns negative error
* code.
**/
int
lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
{
LPFC_MBOXQ_t *pmb;
uint32_t resetcount = 0, rc = 0, done = 0;
pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
phba->link_state = LPFC_HBA_ERROR;
return -ENOMEM;
}
phba->sli_rev = sli_mode;
while (resetcount < 2 && !done) {
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irq(&phba->hbalock);
phba->pport->port_state = LPFC_VPORT_UNKNOWN;
lpfc_sli_brdrestart(phba);
rc = lpfc_sli_chipset_init(phba);
if (rc)
break;
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irq(&phba->hbalock);
resetcount++;
/* Call pre CONFIG_PORT mailbox command initialization. A
* value of 0 means the call was successful. Any other
* nonzero value is a failure, but if ERESTART is returned,
* the driver may reset the HBA and try again.
*/
rc = lpfc_config_port_prep(phba);
if (rc == -ERESTART) {
phba->link_state = LPFC_LINK_UNKNOWN;
continue;
} else if (rc)
break;
phba->link_state = LPFC_INIT_MBX_CMDS;
lpfc_config_port(phba, pmb);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
LPFC_SLI3_HBQ_ENABLED |
LPFC_SLI3_CRP_ENABLED |
LPFC_SLI3_INB_ENABLED |
LPFC_SLI3_BG_ENABLED);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0442 Adapter failed to init, mbxCmd x%x "
"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
rc = -ENXIO;
} else {
/* Allow asynchronous mailbox command to go through */
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
done = 1;
}
}
if (!done) {
rc = -EINVAL;
goto do_prep_failed;
}
if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
if (!pmb->u.mb.un.varCfgPort.cMA) {
rc = -ENXIO;
goto do_prep_failed;
}
if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
phba->max_vports = (phba->max_vpi > phba->max_vports) ?
phba->max_vpi : phba->max_vports;
} else
phba->max_vpi = 0;
if (pmb->u.mb.un.varCfgPort.gdss)
phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
if (pmb->u.mb.un.varCfgPort.gerbm)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
if (pmb->u.mb.un.varCfgPort.gcrp)
phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
if (pmb->u.mb.un.varCfgPort.ginb) {
phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy;
phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter;
phba->inb_last_counter =
phba->mbox->us.s3_inb_pgp.counter;
} else {
phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
phba->port_gp = phba->mbox->us.s3_pgp.port;
phba->inb_ha_copy = NULL;
phba->inb_counter = NULL;
}
if (phba->cfg_enable_bg) {
if (pmb->u.mb.un.varCfgPort.gbg)
phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0443 Adapter did not grant "
"BlockGuard\n");
}
} else {
phba->hbq_get = NULL;
phba->port_gp = phba->mbox->us.s2.port;
phba->inb_ha_copy = NULL;
phba->inb_counter = NULL;
phba->max_vpi = 0;
}
do_prep_failed:
mempool_free(pmb, phba->mbox_mem_pool);
return rc;
}
/**
* lpfc_sli_hba_setup - SLI intialization function
* @phba: Pointer to HBA context object.
*
* This function is the main SLI intialization function. This function
* is called by the HBA intialization code, HBA reset code and HBA
* error attention handler code. Caller is not required to hold any
* locks. This function issues config_port mailbox command to configure
* the SLI, setup iocb rings and HBQ rings. In the end the function
* calls the config_port_post function to issue init_link mailbox
* command and to start the discovery. The function will return zero
* if successful, else it will return negative error code.
**/
int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
uint32_t rc;
int mode = 3;
switch (lpfc_sli_mode) {
case 2:
if (phba->cfg_enable_npiv) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"1824 NPIV enabled: Override lpfc_sli_mode "
"parameter (%d) to auto (0).\n",
lpfc_sli_mode);
break;
}
mode = 2;
break;
case 0:
case 3:
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"1819 Unrecognized lpfc_sli_mode "
"parameter: %d.\n", lpfc_sli_mode);
break;
}
rc = lpfc_sli_config_port(phba, mode);
if (rc && lpfc_sli_mode == 3)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
"1820 Unable to select SLI-3. "
"Not supported by adapter.\n");
if (rc && mode != 2)
rc = lpfc_sli_config_port(phba, 2);
if (rc)
goto lpfc_sli_hba_setup_error;
if (phba->sli_rev == 3) {
phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
} else {
phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
phba->sli3_options = 0;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0444 Firmware in SLI %x mode. Max_vpi %d\n",
phba->sli_rev, phba->max_vpi);
rc = lpfc_sli_ring_map(phba);
if (rc)
goto lpfc_sli_hba_setup_error;
/* Init HBQs */
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
rc = lpfc_sli_hbq_setup(phba);
if (rc)
goto lpfc_sli_hba_setup_error;
}
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_PROCESS_LA;
spin_unlock_irq(&phba->hbalock);
rc = lpfc_config_port_post(phba);
if (rc)
goto lpfc_sli_hba_setup_error;
return rc;
lpfc_sli_hba_setup_error:
phba->link_state = LPFC_HBA_ERROR;
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0445 Firmware initialization failed\n");
return rc;
}
/**
* lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
* @phba: Pointer to HBA context object.
* @mboxq: mailbox pointer.
* This function issue a dump mailbox command to read config region
* 23 and parse the records in the region and populate driver
* data structure.
**/
static int
lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
LPFC_MBOXQ_t *mboxq)
{
struct lpfc_dmabuf *mp;
struct lpfc_mqe *mqe;
uint32_t data_length;
int rc;
/* Program the default value of vlan_id and fc_map */
phba->valid_vlan = 0;
phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
mqe = &mboxq->u.mqe;
if (lpfc_dump_fcoe_param(phba, mboxq))
return -ENOMEM;
mp = (struct lpfc_dmabuf *) mboxq->context1;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):2571 Mailbox cmd x%x Status x%x "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
"CQ: x%x x%x x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
bf_get(lpfc_mqe_command, mqe),
bf_get(lpfc_mqe_status, mqe),
mqe->un.mb_words[0], mqe->un.mb_words[1],
mqe->un.mb_words[2], mqe->un.mb_words[3],
mqe->un.mb_words[4], mqe->un.mb_words[5],
mqe->un.mb_words[6], mqe->un.mb_words[7],
mqe->un.mb_words[8], mqe->un.mb_words[9],
mqe->un.mb_words[10], mqe->un.mb_words[11],
mqe->un.mb_words[12], mqe->un.mb_words[13],
mqe->un.mb_words[14], mqe->un.mb_words[15],
mqe->un.mb_words[16], mqe->un.mb_words[50],
mboxq->mcqe.word0,
mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
mboxq->mcqe.trailer);
if (rc) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
return -EIO;
}
data_length = mqe->un.mb_words[5];
if (data_length > DMP_RGN23_SIZE) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
return -EIO;
}
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
return 0;
}
/**
* lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
* @phba: pointer to lpfc hba data structure.
* @mboxq: pointer to the LPFC_MBOXQ_t structure.
* @vpd: pointer to the memory to hold resulting port vpd data.
* @vpd_size: On input, the number of bytes allocated to @vpd.
* On output, the number of data bytes in @vpd.
*
* This routine executes a READ_REV SLI4 mailbox command. In
* addition, this routine gets the port vpd data.
*
* Return codes
* 0 - sucessful
* ENOMEM - could not allocated memory.
**/
static int
lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
uint8_t *vpd, uint32_t *vpd_size)
{
int rc = 0;
uint32_t dma_size;
struct lpfc_dmabuf *dmabuf;
struct lpfc_mqe *mqe;
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
return -ENOMEM;
/*
* Get a DMA buffer for the vpd data resulting from the READ_REV
* mailbox command.
*/
dma_size = *vpd_size;
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
dma_size,
&dmabuf->phys,
GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
return -ENOMEM;
}
memset(dmabuf->virt, 0, dma_size);
/*
* The SLI4 implementation of READ_REV conflicts at word1,
* bits 31:16 and SLI4 adds vpd functionality not present
* in SLI3. This code corrects the conflicts.
*/
lpfc_read_rev(phba, mboxq);
mqe = &mboxq->u.mqe;
mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
mqe->un.read_rev.word1 &= 0x0000FFFF;
bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (rc) {
dma_free_coherent(&phba->pcidev->dev, dma_size,
dmabuf->virt, dmabuf->phys);
return -EIO;
}
/*
* The available vpd length cannot be bigger than the
* DMA buffer passed to the port. Catch the less than
* case and update the caller's size.
*/
if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
*vpd_size = mqe->un.read_rev.avail_vpd_len;
lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
dma_free_coherent(&phba->pcidev->dev, dma_size,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
return 0;
}
/**
* lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
* @phba: pointer to lpfc hba data structure.
*
* This routine is called to explicitly arm the SLI4 device's completion and
* event queues
**/
static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
{
uint8_t fcp_eqidx;
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
LPFC_QUEUE_REARM);
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
LPFC_QUEUE_REARM);
}
/**
* lpfc_sli4_hba_setup - SLI4 device intialization PCI function
* @phba: Pointer to HBA context object.
*
* This function is the main SLI4 device intialization PCI function. This
* function is called by the HBA intialization code, HBA reset code and
* HBA error attention handler code. Caller is not required to hold any
* locks.
**/
int
lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{
int rc;
LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe;
uint8_t *vpd;
uint32_t vpd_size;
uint32_t ftr_rsp = 0;
struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
if (unlikely(rc))
return -ENODEV;
/* Check the HBA Host Status Register for readyness */
rc = lpfc_sli4_post_status_check(phba);
if (unlikely(rc))
return -ENODEV;
else {
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
}
/*
* Allocate a single mailbox container for initializing the
* port.
*/
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
/*
* Continue initialization with default values even if driver failed
* to read FCoE param config regions
*/
if (lpfc_sli4_read_fcoe_params(phba, mboxq))
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
"2570 Failed to read FCoE parameters\n");
/* Issue READ_REV to collect vpd and FW information. */
vpd_size = PAGE_SIZE;
vpd = kzalloc(vpd_size, GFP_KERNEL);
if (!vpd) {
rc = -ENOMEM;
goto out_free_mbox;
}
rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
if (unlikely(rc))
goto out_free_vpd;
mqe = &mboxq->u.mqe;
phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
phba->hba_flag |= HBA_FCOE_SUPPORT;
if (phba->sli_rev != LPFC_SLI_REV4 ||
!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0376 READ_REV Error. SLI Level %d "
"FCoE enabled %d\n",
phba->sli_rev, phba->hba_flag & HBA_FCOE_SUPPORT);
rc = -EIO;
goto out_free_vpd;
}
/*
* Evaluate the read rev and vpd data. Populate the driver
* state with the results. If this routine fails, the failure
* is not fatal as the driver will use generic values.
*/
rc = lpfc_parse_vpd(phba, vpd, vpd_size);
if (unlikely(!rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0377 Error %d parsing vpd. "
"Using defaults.\n", rc);
rc = 0;
}
/* Save information as VPD data */
phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
&mqe->un.read_rev);
phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
&mqe->un.read_rev);
phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
&mqe->un.read_rev);
phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
&mqe->un.read_rev);
phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0380 READ_REV Status x%x "
"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
bf_get(lpfc_mqe_status, mqe),
phba->vpd.rev.opFwName,
phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
/*
* Discover the port's supported feature set and match it against the
* hosts requests.
*/
lpfc_request_features(phba, mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
if (unlikely(rc)) {
rc = -EIO;
goto out_free_vpd;
}
/*
* The port must support FCP initiator mode as this is the
* only mode running in the host.
*/
if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"0378 No support for fcpi mode.\n");
ftr_rsp++;
}
/*
* If the port cannot support the host's requested features
* then turn off the global config parameters to disable the
* feature in the driver. This is not a fatal error.
*/
if ((phba->cfg_enable_bg) &&
!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
ftr_rsp++;
if (phba->max_vpi && phba->cfg_enable_npiv &&
!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
ftr_rsp++;
if (ftr_rsp) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"0379 Feature Mismatch Data: x%08x %08x "
"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
phba->cfg_enable_npiv, phba->max_vpi);
if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
phba->cfg_enable_bg = 0;
if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
phba->cfg_enable_npiv = 0;
}
/* These SLI3 features are assumed in SLI4 */
spin_lock_irq(&phba->hbalock);
phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
spin_unlock_irq(&phba->hbalock);
/* Read the port's service parameters. */
lpfc_read_sparam(phba, mboxq, vport->vpi);
mboxq->vport = vport;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
mp = (struct lpfc_dmabuf *) mboxq->context1;
if (rc == MBX_SUCCESS) {
memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
rc = 0;
}
/*
* This memory was allocated by the lpfc_read_sparam routine. Release
* it to the mbuf pool.
*/
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
mboxq->context1 = NULL;
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0382 READ_SPARAM command failed "
"status %d, mbxStatus x%x\n",
rc, bf_get(lpfc_mqe_status, mqe));
phba->link_state = LPFC_HBA_ERROR;
rc = -EIO;
goto out_free_vpd;
}
if (phba->cfg_soft_wwnn)
u64_to_wwn(phba->cfg_soft_wwnn,
vport->fc_sparam.nodeName.u.wwn);
if (phba->cfg_soft_wwpn)
u64_to_wwn(phba->cfg_soft_wwpn,
vport->fc_sparam.portName.u.wwn);
memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
sizeof(struct lpfc_name));
memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
sizeof(struct lpfc_name));
/* Update the fc_host data structures with new wwn. */
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
/* Register SGL pool to the device using non-embedded mailbox command */
rc = lpfc_sli4_post_sgl_list(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0582 Error %d during sgl post operation", rc);
rc = -ENODEV;
goto out_free_vpd;
}
/* Register SCSI SGL pool to the device */
rc = lpfc_sli4_repost_scsi_sgl_list(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"0383 Error %d during scsi sgl post opeation",
rc);
/* Some Scsi buffers were moved to the abort scsi list */
/* A pci function reset will repost them */
rc = -ENODEV;
goto out_free_vpd;
}
/* Post the rpi header region to the device. */
rc = lpfc_sli4_post_all_rpi_hdrs(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0393 Error %d during rpi post operation\n",
rc);
rc = -ENODEV;
goto out_free_vpd;
}
if (phba->cfg_enable_fip)
bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 1);
else
bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
/* Set up all the queues to the device */
rc = lpfc_sli4_queue_setup(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0381 Error %d during queue setup.\n ", rc);
goto out_stop_timers;
}
/* Arm the CQs and then EQs on device */
lpfc_sli4_arm_cqeq_intr(phba);
/* Indicate device interrupt mode */
phba->sli4_hba.intr_enable = 1;
/* Allow asynchronous mailbox command to go through */
spin_lock_irq(&phba->hbalock);
phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
/* Post receive buffers to the device */
lpfc_sli4_rb_setup(phba);
/* Start the ELS watchdog timer */
mod_timer(&vport->els_tmofunc,
jiffies + HZ * (phba->fc_ratov * 2));
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
phba->hb_outstanding = 0;
phba->last_completion_time = jiffies;
/* Start error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
/*
* The port is ready, set the host's link state to LINK_DOWN
* in preparation for link interrupts.
*/
lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
lpfc_set_loopback_flag(phba);
/* Change driver state to LPFC_LINK_DOWN right before init link */
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (unlikely(rc != MBX_NOT_FINISHED)) {
kfree(vpd);
return 0;
} else
rc = -EIO;
/* Unset all the queues set up in this routine when error out */
if (rc)
lpfc_sli4_queue_unset(phba);
out_stop_timers:
if (rc)
lpfc_stop_hba_timers(phba);
out_free_vpd:
kfree(vpd);
out_free_mbox:
mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
}
/**
* lpfc_mbox_timeout - Timeout call back function for mbox timer
* @ptr: context object - pointer to hba structure.
*
* This is the callback function for mailbox timer. The mailbox
* timer is armed when a new mailbox command is issued and the timer
* is deleted when the mailbox complete. The function is called by
* the kernel timer code when a mailbox does not complete within
* expected time. This function wakes up the worker thread to
* process the mailbox timeout and returns. All the processing is
* done by the worker thread function lpfc_mbox_timeout_handler.
**/
void
lpfc_mbox_timeout(unsigned long ptr)
{
struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
unsigned long iflag;
uint32_t tmo_posted;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
if (!tmo_posted)
phba->pport->work_port_events |= WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
if (!tmo_posted)
lpfc_worker_wake_up(phba);
return;
}
/**
* lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
* @phba: Pointer to HBA context object.
*
* This function is called from worker thread when a mailbox command times out.
* The caller is not required to hold any locks. This function will reset the
* HBA and recover all the pending commands.
**/
void
lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
MAILBOX_t *mb = &pmbox->u.mb;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
/* Check the pmbox pointer first. There is a race condition
* between the mbox timeout handler getting executed in the
* worklist and the mailbox actually completing. When this
* race condition occurs, the mbox_active will be NULL.
*/
spin_lock_irq(&phba->hbalock);
if (pmbox == NULL) {
lpfc_printf_log(phba, KERN_WARNING,
LOG_MBOX | LOG_SLI,
"0353 Active Mailbox cleared - mailbox timeout "
"exiting\n");
spin_unlock_irq(&phba->hbalock);
return;
}
/* Mbox cmd <mbxCommand> timeout */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
mb->mbxCommand,
phba->pport->port_state,
phba->sli.sli_flag,
phba->sli.mbox_active);
spin_unlock_irq(&phba->hbalock);
/* Setting state unknown so lpfc_sli_abort_iocb_ring
* would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
* it to fail all oustanding SCSI IO.
*/
spin_lock_irq(&phba->pport->work_port_lock);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irq(&phba->pport->work_port_lock);
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_UNKNOWN;
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
pring = &psli->ring[psli->fcp_ring];
lpfc_sli_abort_iocb_ring(phba, pring);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0345 Resetting board due to mailbox timeout\n");
/* Reset the HBA device */
lpfc_reset_hba(phba);
}
/**
* lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
* @phba: Pointer to HBA context object.
* @pmbox: Pointer to mailbox object.
* @flag: Flag indicating how the mailbox need to be processed.
*
* This function is called by discovery code and HBA management code
* to submit a mailbox command to firmware with SLI-3 interface spec. This
* function gets the hbalock to protect the data structures.
* The mailbox command can be submitted in polling mode, in which case
* this function will wait in a polling loop for the completion of the
* mailbox.
* If the mailbox is submitted in no_wait mode (not polling) the
* function will submit the command and returns immediately without waiting
* for the mailbox completion. The no_wait is supported only when HBA
* is in SLI2/SLI3 mode - interrupts are enabled.
* The SLI interface allows only one mailbox pending at a time. If the
* mailbox is issued in polling mode and there is already a mailbox
* pending, then the function will return an error. If the mailbox is issued
* in NO_WAIT mode and there is a mailbox pending already, the function
* will return MBX_BUSY after queuing the mailbox into mailbox queue.
* The sli layer owns the mailbox object until the completion of mailbox
* command if this function return MBX_BUSY or MBX_SUCCESS. For all other
* return codes the caller owns the mailbox command after the return of
* the function.
**/
static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
uint32_t flag)
{
MAILBOX_t *mb;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
uint32_t ha_copy;
int i;
unsigned long timeout;
unsigned long drvr_flag = 0;
uint32_t word0, ldata;
void __iomem *to_slim;
int processing_queue = 0;
spin_lock_irqsave(&phba->hbalock, drvr_flag);
if (!pmbox) {
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* processing mbox queue from intr_handler */
if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return MBX_SUCCESS;
}
processing_queue = 1;
pmbox = lpfc_mbox_get(phba);
if (!pmbox) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return MBX_SUCCESS;
}
}
if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
if(!pmbox->vport) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX | LOG_VPORT,
"1806 Mbox x%x failed. No vport\n",
pmbox->u.mb.mbxCommand);
dump_stack();
goto out_not_finished;
}
}
/* If the PCI channel is in offline state, do not post mbox. */
if (unlikely(pci_channel_offline(phba->pcidev))) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
goto out_not_finished;
}
/* If HBA has a deferred error attention, fail the iocb. */
if (unlikely(phba->hba_flag & DEFER_ERATT)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
goto out_not_finished;
}
psli = &phba->sli;
mb = &pmbox->u.mb;
status = MBX_SUCCESS;
if (phba->link_state == LPFC_HBA_ERROR) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):0311 Mailbox command x%x cannot "
"issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
goto out_not_finished;
}
if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
!(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2528 Mailbox command x%x cannot "
"issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
goto out_not_finished;
}
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
/* Polling for a mbox command when another one is already active
* is not allowed in SLI. Also, the driver must have established
* SLI2 mode to queue and process multiple mbox commands.
*/
if (flag & MBX_POLL) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2529 Mailbox command x%x "
"cannot issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
pmbox->u.mb.mbxCommand,
psli->sli_flag, flag);
goto out_not_finished;
}
if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2530 Mailbox command x%x "
"cannot issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
pmbox->u.mb.mbxCommand,
psli->sli_flag, flag);
goto out_not_finished;
}
/* Another mailbox command is still being processed, queue this
* command to be processed later.
*/
lpfc_mbox_put(phba, pmbox);
/* Mbox cmd issue - BUSY */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
mb->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
if (pmbox->vport) {
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Bsy vport: cmd:x%x mb:x%x x%x",
(uint32_t)mb->mbxCommand,
mb->un.varWords[0], mb->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Bsy: cmd:x%x mb:x%x x%x",
(uint32_t)mb->mbxCommand,
mb->un.varWords[0], mb->un.varWords[1]);
}
return MBX_BUSY;
}
psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
/* If we are not polling, we MUST be in SLI2 mode */
if (flag != MBX_POLL) {
if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
(mb->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2531 Mailbox command x%x "
"cannot issue Data: x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
pmbox->u.mb.mbxCommand,
psli->sli_flag, flag);
goto out_not_finished;
}
/* timeout active mbox command */
mod_timer(&psli->mbox_tmo, (jiffies +
(HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
}
/* Mailbox cmd <cmd> issue */
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
mb->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
if (mb->mbxCommand != MBX_HEARTBEAT) {
if (pmbox->vport) {
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Send vport: cmd:x%x mb:x%x x%x",
(uint32_t)mb->mbxCommand,
mb->un.varWords[0], mb->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Send: cmd:x%x mb:x%x x%x",
(uint32_t)mb->mbxCommand,
mb->un.varWords[0], mb->un.varWords[1]);
}
}
psli->slistat.mbox_cmd++;
evtctr = psli->slistat.mbox_event;
/* next set own bit for the adapter and copy over command word */
mb->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* First copy command data to host SLIM area */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
} else {
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
word */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
MAILBOX_CMD_SIZE - sizeof (uint32_t));
/* Next copy over first word, with mbxOwner set */
ldata = *((uint32_t *)mb);
to_slim = phba->MBslimaddr;
writel(ldata, to_slim);
readl(to_slim); /* flush */
if (mb->mbxCommand == MBX_CONFIG_PORT) {
/* switch over to host mailbox */
psli->sli_flag |= LPFC_SLI_ACTIVE;
}
}
wmb();
switch (flag) {
case MBX_NOWAIT:
/* Set up reference to mailbox command */
psli->mbox_active = pmbox;
/* Interrupt board to do it */
writel(CA_MBATT, phba->CAregaddr);
readl(phba->CAregaddr); /* flush */
/* Don't wait for it to finish, just return */
break;
case MBX_POLL:
/* Set up null reference to mailbox command */
psli->mbox_active = NULL;
/* Interrupt board to do it */
writel(CA_MBATT, phba->CAregaddr);
readl(phba->CAregaddr); /* flush */
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* First read mbox status word */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
} else {
/* First read mbox status word */
word0 = readl(phba->MBslimaddr);
}
/* Read the HBA Host Attention Register */
ha_copy = readl(phba->HAregaddr);
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
mb->mbxCommand) *
1000) + jiffies;
i = 0;
/* Wait for command to complete */
while (((word0 & OWN_CHIP) == OWN_CHIP) ||
(!(ha_copy & HA_MBATT) &&
(phba->link_state > LPFC_WARM_START))) {
if (time_after(jiffies, timeout)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock,
drvr_flag);
goto out_not_finished;
}
/* Check if we took a mbox interrupt while we were
polling */
if (((word0 & OWN_CHIP) != OWN_CHIP)
&& (evtctr != psli->slistat.mbox_event))
break;
if (i++ > 10) {
spin_unlock_irqrestore(&phba->hbalock,
drvr_flag);
msleep(1);
spin_lock_irqsave(&phba->hbalock, drvr_flag);
}
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* First copy command data */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
if (mb->mbxCommand == MBX_CONFIG_PORT) {
MAILBOX_t *slimmb;
uint32_t slimword0;
/* Check real SLIM for any errors */
slimword0 = readl(phba->MBslimaddr);
slimmb = (MAILBOX_t *) & slimword0;
if (((slimword0 & OWN_CHIP) != OWN_CHIP)
&& slimmb->mbxStatus) {
psli->sli_flag &=
~LPFC_SLI_ACTIVE;
word0 = slimword0;
}
}
} else {
/* First copy command data */
word0 = readl(phba->MBslimaddr);
}
/* Read the HBA Host Attention Register */
ha_copy = readl(phba->HAregaddr);
}
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
pmbox->context2) {
lpfc_memcpy_from_slim((void *)pmbox->context2,
phba->MBslimaddr + DMP_RSP_OFFSET,
mb->un.varDmp.word_cnt);
}
}
writel(HA_MBATT, phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
status = mb->mbxStatus;
}
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
return status;
out_not_finished:
if (processing_queue) {
pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
lpfc_mbox_cmpl_put(phba, pmbox);
}
return MBX_NOT_FINISHED;
}
/**
* lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
* @phba: Pointer to HBA context object.
*
* The function blocks the posting of SLI4 asynchronous mailbox commands from
* the driver internal pending mailbox queue. It will then try to wait out the
* possible outstanding mailbox command before return.
*
* Returns:
* 0 - the outstanding mailbox command completed; otherwise, the wait for
* the outstanding mailbox command timed out.
**/
static int
lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint8_t actcmd = MBX_HEARTBEAT;
int rc = 0;
unsigned long timeout;
/* Mark the asynchronous mailbox command posting as blocked */
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
if (phba->sli.mbox_active)
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
spin_unlock_irq(&phba->hbalock);
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
jiffies;
/* Wait for the outstnading mailbox command to complete */
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
if (time_after(jiffies, timeout)) {
/* Timeout, marked the outstanding cmd not complete */
rc = 1;
break;
}
}
/* Can not cleanly block async mailbox command, fails it */
if (rc) {
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
}
return rc;
}
/**
* lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
* @phba: Pointer to HBA context object.
*
* The function unblocks and resume posting of SLI4 asynchronous mailbox
* commands from the driver internal pending mailbox queue. It makes sure
* that there is no outstanding mailbox command before resuming posting
* asynchronous mailbox commands. If, for any reason, there is outstanding
* mailbox command, it will try to wait it out before resuming asynchronous
* mailbox command posting.
**/
static void
lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
spin_lock_irq(&phba->hbalock);
if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
/* Asynchronous mailbox posting is not blocked, do nothing */
spin_unlock_irq(&phba->hbalock);
return;
}
/* Outstanding synchronous mailbox command is guaranteed to be done,
* successful or timeout, after timing-out the outstanding mailbox
* command shall always be removed, so just unblock posting async
* mailbox command and resume
*/
psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
/* wake up worker thread to post asynchronlous mailbox command */
lpfc_worker_wake_up(phba);
}
/**
* lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
* @phba: Pointer to HBA context object.
* @mboxq: Pointer to mailbox object.
*
* The function posts a mailbox to the port. The mailbox is expected
* to be comletely filled in and ready for the port to operate on it.
* This routine executes a synchronous completion operation on the
* mailbox by polling for its completion.
*
* The caller must not be holding any locks when calling this routine.
*
* Returns:
* MBX_SUCCESS - mailbox posted successfully
* Any of the MBX error values.
**/
static int
lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
int rc = MBX_SUCCESS;
unsigned long iflag;
uint32_t db_ready;
uint32_t mcqe_status;
uint32_t mbx_cmnd;
unsigned long timeout;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_mqe *mb = &mboxq->u.mqe;
struct lpfc_bmbx_create *mbox_rgn;
struct dma_address *dma_address;
struct lpfc_register bmbx_reg;
/*
* Only one mailbox can be active to the bootstrap mailbox region
* at a time and there is no queueing provided.
*/
spin_lock_irqsave(&phba->hbalock, iflag);
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2532 Mailbox command x%x (x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, MBX_POLL);
return MBXERR_ERROR;
}
/* The server grabs the token and owns it until release */
psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
phba->sli.mbox_active = mboxq;
spin_unlock_irqrestore(&phba->hbalock, iflag);
/*
* Initialize the bootstrap memory region to avoid stale data areas
* in the mailbox post. Then copy the caller's mailbox contents to
* the bmbx mailbox region.
*/
mbx_cmnd = bf_get(lpfc_mqe_command, mb);
memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
sizeof(struct lpfc_mqe));
/* Post the high mailbox dma address to the port and wait for ready. */
dma_address = &phba->sli4_hba.bmbx.dma_address;
writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
* 1000) + jiffies;
do {
bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
if (!db_ready)
msleep(2);
if (time_after(jiffies, timeout)) {
rc = MBXERR_ERROR;
goto exit;
}
} while (!db_ready);
/* Post the low mailbox dma address to the port. */
writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
* 1000) + jiffies;
do {
bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
if (!db_ready)
msleep(2);
if (time_after(jiffies, timeout)) {
rc = MBXERR_ERROR;
goto exit;
}
} while (!db_ready);
/*
* Read the CQ to ensure the mailbox has completed.
* If so, update the mailbox status so that the upper layers
* can complete the request normally.
*/
lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
sizeof(struct lpfc_mqe));
mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
sizeof(struct lpfc_mcqe));
mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
/* Prefix the mailbox status with range x4000 to note SLI4 status. */
if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
rc = MBXERR_ERROR;
}
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
" x%x x%x CQ: x%x x%x x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
bf_get(lpfc_mqe_status, mb),
mb->un.mb_words[0], mb->un.mb_words[1],
mb->un.mb_words[2], mb->un.mb_words[3],
mb->un.mb_words[4], mb->un.mb_words[5],
mb->un.mb_words[6], mb->un.mb_words[7],
mb->un.mb_words[8], mb->un.mb_words[9],
mb->un.mb_words[10], mb->un.mb_words[11],
mb->un.mb_words[12], mboxq->mcqe.word0,
mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1,
mboxq->mcqe.trailer);
exit:
/* We are holding the token, no needed for lock when release */
spin_lock_irqsave(&phba->hbalock, iflag);
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
phba->sli.mbox_active = NULL;
spin_unlock_irqrestore(&phba->hbalock, iflag);
return rc;
}
/**
* lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
* @phba: Pointer to HBA context object.
* @pmbox: Pointer to mailbox object.
* @flag: Flag indicating how the mailbox need to be processed.
*
* This function is called by discovery code and HBA management code to submit
* a mailbox command to firmware with SLI-4 interface spec.
*
* Return codes the caller owns the mailbox command after the return of the
* function.
**/
static int
lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
uint32_t flag)
{
struct lpfc_sli *psli = &phba->sli;
unsigned long iflags;
int rc;
rc = lpfc_mbox_dev_check(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2544 Mailbox command x%x (x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
goto out_not_finished;
}
/* Detect polling mode and jump to a handler */
if (!phba->sli4_hba.intr_enable) {
if (flag == MBX_POLL)
rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
else
rc = -EIO;
if (rc != MBX_SUCCESS)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2541 Mailbox command x%x "
"(x%x) cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
return rc;
} else if (flag == MBX_POLL) {
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
"(%d):2542 Try to issue mailbox command "
"x%x (x%x) synchronously ahead of async"
"mailbox command queue: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
/* Try to block the asynchronous mailbox posting */
rc = lpfc_sli4_async_mbox_block(phba);
if (!rc) {
/* Successfully blocked, now issue sync mbox cmd */
rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
if (rc != MBX_SUCCESS)
lpfc_printf_log(phba, KERN_ERR,
LOG_MBOX | LOG_SLI,
"(%d):2597 Mailbox command "
"x%x (x%x) cannot issue "
"Data: x%x x%x\n",
mboxq->vport ?
mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba,
mboxq),
psli->sli_flag, flag);
/* Unblock the async mailbox posting afterward */
lpfc_sli4_async_mbox_unblock(phba);
}
return rc;
}
/* Now, interrupt mode asynchrous mailbox command */
rc = lpfc_mbox_cmd_check(phba, mboxq);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2543 Mailbox command x%x (x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, flag);
goto out_not_finished;
}
/* Put the mailbox command to the driver internal FIFO */
psli->slistat.mbox_busy++;
spin_lock_irqsave(&phba->hbalock, iflags);
lpfc_mbox_put(phba, mboxq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0354 Mbox cmd issue - Enqueue Data: "
"x%x (x%x) x%x x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0xffffff,
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
lpfc_sli4_mbox_opcode_get(phba, mboxq),
phba->pport->port_state,
psli->sli_flag, MBX_NOWAIT);
/* Wake up worker thread to transport mailbox command from head */
lpfc_worker_wake_up(phba);
return MBX_BUSY;
out_not_finished:
return MBX_NOT_FINISHED;
}
/**
* lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
* @phba: Pointer to HBA context object.
*
* This function is called by worker thread to send a mailbox command to
* SLI4 HBA firmware.
*
**/
int
lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
LPFC_MBOXQ_t *mboxq;
int rc = MBX_SUCCESS;
unsigned long iflags;
struct lpfc_mqe *mqe;
uint32_t mbx_cmnd;
/* Check interrupt mode before post async mailbox command */
if (unlikely(!phba->sli4_hba.intr_enable))
return MBX_NOT_FINISHED;
/* Check for mailbox command service token */
spin_lock_irqsave(&phba->hbalock, iflags);
if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
return MBX_NOT_FINISHED;
}
if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
return MBX_NOT_FINISHED;
}
if (unlikely(phba->sli.mbox_active)) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0384 There is pending active mailbox cmd\n");
return MBX_NOT_FINISHED;
}
/* Take the mailbox command service token */
psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
/* Get the next mailbox command from head of queue */
mboxq = lpfc_mbox_get(phba);
/* If no more mailbox command waiting for post, we're done */
if (!mboxq) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return MBX_SUCCESS;
}
phba->sli.mbox_active = mboxq;
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Check device readiness for posting mailbox command */
rc = lpfc_mbox_dev_check(phba);
if (unlikely(rc))
/* Driver clean routine will clean up pending mailbox */
goto out_not_finished;
/* Prepare the mbox command to be posted */
mqe = &mboxq->u.mqe;
mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
/* Start timer for the mbox_tmo and log some mailbox post messages */
mod_timer(&psli->mbox_tmo, (jiffies +
(HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
"(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
"x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
phba->pport->port_state, psli->sli_flag);
if (mbx_cmnd != MBX_HEARTBEAT) {
if (mboxq->vport) {
lpfc_debugfs_disc_trc(mboxq->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Send vport: cmd:x%x mb:x%x x%x",
mbx_cmnd, mqe->un.mb_words[0],
mqe->un.mb_words[1]);
} else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Send: cmd:x%x mb:x%x x%x",
mbx_cmnd, mqe->un.mb_words[0],
mqe->un.mb_words[1]);
}
}
psli->slistat.mbox_cmd++;
/* Post the mailbox command to the port */
rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"(%d):2533 Mailbox command x%x (x%x) "
"cannot issue Data: x%x x%x\n",
mboxq->vport ? mboxq->vport->vpi : 0,
mboxq->u.mb.mbxCommand,
lpfc_sli4_mbox_opcode_get(phba, mboxq),
psli->sli_flag, MBX_NOWAIT);
goto out_not_finished;
}
return rc;
out_not_finished:
spin_lock_irqsave(&phba->hbalock, iflags);
mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
__lpfc_mbox_cmpl_put(phba, mboxq);
/* Release the token */
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
phba->sli.mbox_active = NULL;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return MBX_NOT_FINISHED;
}
/**
* lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
* @phba: Pointer to HBA context object.
* @pmbox: Pointer to mailbox object.
* @flag: Flag indicating how the mailbox need to be processed.
*
* This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
* the API jump table function pointer from the lpfc_hba struct.
*
* Return codes the caller owns the mailbox command after the return of the
* function.
**/
int
lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
{
return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
}
/**
* lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
* @phba: The hba struct for which this call is being executed.
* @dev_grp: The HBA PCI-Device group number.
*
* This routine sets up the mbox interface API function jump table in @phba
* struct.
* Returns: 0 - success, -ENODEV - failure.
**/
int
lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
phba->lpfc_sli_handle_slow_ring_event =
lpfc_sli_handle_slow_ring_event_s3;
phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
break;
case LPFC_PCI_DEV_OC:
phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
phba->lpfc_sli_handle_slow_ring_event =
lpfc_sli_handle_slow_ring_event_s4;
phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1420 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
break;
}
return 0;
}
/**
* __lpfc_sli_ringtx_put - Add an iocb to the txq
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to address of newly added command iocb.
*
* This function is called with hbalock held to add a command
* iocb to the txq when SLI layer cannot submit the command iocb
* to the ring.
**/
static void
__lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *piocb)
{
/* Insert the caller's iocb in the txq tail for later processing. */
list_add_tail(&piocb->list, &pring->txq);
pring->txq_cnt++;
}
/**
* lpfc_sli_next_iocb - Get the next iocb in the txq
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to address of newly added command iocb.
*
* This function is called with hbalock held before a new
* iocb is submitted to the firmware. This function checks
* txq to flush the iocbs in txq to Firmware before
* submitting new iocbs to the Firmware.
* If there are iocbs in the txq which need to be submitted
* to firmware, lpfc_sli_next_iocb returns the first element
* of the txq after dequeuing it from txq.
* If there is no iocb in the txq then the function will return
* *piocb and *piocb is set to NULL. Caller needs to check
* *piocb to find if there are more commands in the txq.
**/
static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq **piocb)
{
struct lpfc_iocbq * nextiocb;
nextiocb = lpfc_sli_ringtx_get(phba, pring);
if (!nextiocb) {
nextiocb = *piocb;
*piocb = NULL;
}
return nextiocb;
}
/**
* __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
* @ring_number: SLI ring number to issue iocb on.
* @piocb: Pointer to command iocb.
* @flag: Flag indicating if this command can be put into txq.
*
* __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
* an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
* recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
* flag is turned on, the function returns IOCB_ERROR. When the link is down,
* this function allows only iocbs for posting buffers. This function finds
* next available slot in the command ring and posts the command to the
* available slot and writes the port attention register to request HBA start
* processing new iocb. If there is no slot available in the ring and
* flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
* the function returns IOCB_BUSY.
*
* This function is called with hbalock held. The function will return success
* after it successfully submit the iocb to firmware or after adding to the
* txq.
**/
static int
__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_iocbq *nextiocb;
IOCB_t *iocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
if (piocb->iocb_cmpl && (!piocb->vport) &&
(piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
(piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
lpfc_printf_log(phba, KERN_ERR,
LOG_SLI | LOG_VPORT,
"1807 IOCB x%x failed. No vport\n",
piocb->iocb.ulpCommand);
dump_stack();
return IOCB_ERROR;
}
/* If the PCI channel is in offline state, do not post iocbs. */
if (unlikely(pci_channel_offline(phba->pcidev)))
return IOCB_ERROR;
/* If HBA has a deferred error attention, fail the iocb. */
if (unlikely(phba->hba_flag & DEFER_ERATT))
return IOCB_ERROR;
/*
* We should never get an IOCB if we are in a < LINK_DOWN state
*/
if (unlikely(phba->link_state < LPFC_LINK_DOWN))
return IOCB_ERROR;
/*
* Check to see if we are blocking IOCB processing because of a
* outstanding event.
*/
if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
goto iocb_busy;
if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
/*
* Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
* can be issued if the link is not up.
*/
switch (piocb->iocb.ulpCommand) {
case CMD_GEN_REQUEST64_CR:
case CMD_GEN_REQUEST64_CX:
if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
(piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
FC_FCP_CMND) ||
(piocb->iocb.un.genreq64.w5.hcsw.Type !=
MENLO_TRANSPORT_TYPE))
goto iocb_busy;
break;
case CMD_QUE_RING_BUF_CN:
case CMD_QUE_RING_BUF64_CN:
/*
* For IOCBs, like QUE_RING_BUF, that have no rsp ring
* completion, iocb_cmpl MUST be 0.
*/
if (piocb->iocb_cmpl)
piocb->iocb_cmpl = NULL;
/*FALLTHROUGH*/
case CMD_CREATE_XRI_CR:
case CMD_CLOSE_XRI_CN:
case CMD_CLOSE_XRI_CX:
break;
default:
goto iocb_busy;
}
/*
* For FCP commands, we must be in a state where we can process link
* attention events.
*/
} else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
!(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
goto iocb_busy;
}
while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
(nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
if (iocb)
lpfc_sli_update_ring(phba, pring);
else
lpfc_sli_update_full_ring(phba, pring);
if (!piocb)
return IOCB_SUCCESS;
goto out_busy;
iocb_busy:
pring->stats.iocb_cmd_delay++;
out_busy:
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba, pring, piocb);
return IOCB_SUCCESS;
}
return IOCB_BUSY;
}
/**
* lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
* @phba: Pointer to HBA context object.
* @piocb: Pointer to command iocb.
* @sglq: Pointer to the scatter gather queue object.
*
* This routine converts the bpl or bde that is in the IOCB
* to a sgl list for the sli4 hardware. The physical address
* of the bpl/bde is converted back to a virtual address.
* If the IOCB contains a BPL then the list of BDE's is
* converted to sli4_sge's. If the IOCB contains a single
* BDE then it is converted to a single sli_sge.
* The IOCB is still in cpu endianess so the contents of
* the bpl can be used without byte swapping.
*
* Returns valid XRI = Success, NO_XRI = Failure.
**/
static uint16_t
lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
struct lpfc_sglq *sglq)
{
uint16_t xritag = NO_XRI;
struct ulp_bde64 *bpl = NULL;
struct ulp_bde64 bde;
struct sli4_sge *sgl = NULL;
IOCB_t *icmd;
int numBdes = 0;
int i = 0;
if (!piocbq || !sglq)
return xritag;
sgl = (struct sli4_sge *)sglq->sgl;
icmd = &piocbq->iocb;
if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
numBdes = icmd->un.genreq64.bdl.bdeSize /
sizeof(struct ulp_bde64);
/* The addrHigh and addrLow fields within the IOCB
* have not been byteswapped yet so there is no
* need to swap them back.
*/
bpl = (struct ulp_bde64 *)
((struct lpfc_dmabuf *)piocbq->context3)->virt;
if (!bpl)
return xritag;
for (i = 0; i < numBdes; i++) {
/* Should already be byte swapped. */
sgl->addr_hi = bpl->addrHigh;
sgl->addr_lo = bpl->addrLow;
/* swap the size field back to the cpu so we
* can assign it to the sgl.
*/
bde.tus.w = le32_to_cpu(bpl->tus.w);
bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
if ((i+1) == numBdes)
bf_set(lpfc_sli4_sge_last, sgl, 1);
else
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->word3 = cpu_to_le32(sgl->word3);
bpl++;
sgl++;
}
} else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
/* The addrHigh and addrLow fields of the BDE have not
* been byteswapped yet so they need to be swapped
* before putting them in the sgl.
*/
sgl->addr_hi =
cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
sgl->addr_lo =
cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
bf_set(lpfc_sli4_sge_len, sgl,
icmd->un.genreq64.bdl.bdeSize);
bf_set(lpfc_sli4_sge_last, sgl, 1);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl->word3 = cpu_to_le32(sgl->word3);
}
return sglq->sli4_xritag;
}
/**
* lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
* @phba: Pointer to HBA context object.
*
* This routine performs a round robin SCSI command to SLI4 FCP WQ index
* distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
* held.
*
* Return: index into SLI4 fast-path FCP queue index.
**/
static uint32_t
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
++phba->fcp_qidx;
if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
phba->fcp_qidx = 0;
return phba->fcp_qidx;
}
/**
* lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
* @phba: Pointer to HBA context object.
* @piocb: Pointer to command iocb.
* @wqe: Pointer to the work queue entry.
*
* This routine converts the iocb command to its Work Queue Entry
* equivalent. The wqe pointer should not have any fields set when
* this routine is called because it will memcpy over them.
* This routine does not set the CQ_ID or the WQEC bits in the
* wqe.
*
* Returns: 0 = Success, IOCB_ERROR = Failure.
**/
static int
lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
union lpfc_wqe *wqe)
{
uint32_t payload_len = 0;
uint8_t ct = 0;
uint32_t fip;
uint32_t abort_tag;
uint8_t command_type = ELS_COMMAND_NON_FIP;
uint8_t cmnd;
uint16_t xritag;
struct ulp_bde64 *bpl = NULL;
fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
/* The fcp commands will set command type */
if (iocbq->iocb_flag & LPFC_IO_FCP)
command_type = FCP_COMMAND;
else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS))
command_type = ELS_COMMAND_FIP;
else
command_type = ELS_COMMAND_NON_FIP;
/* Some of the fields are in the right position already */
memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
abort_tag = (uint32_t) iocbq->iotag;
xritag = iocbq->sli4_xritag;
wqe->words[7] = 0; /* The ct field has moved so reset */
/* words0-2 bpl convert bde */
if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
bpl = (struct ulp_bde64 *)
((struct lpfc_dmabuf *)iocbq->context3)->virt;
if (!bpl)
return IOCB_ERROR;
/* Should already be byte swapped. */
wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh);
wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow);
/* swap the size field back to the cpu so we
* can assign it to the sgl.
*/
wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w);
payload_len = wqe->generic.bde.tus.f.bdeSize;
} else
payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
iocbq->iocb.ulpIoTag = iocbq->iotag;
cmnd = iocbq->iocb.ulpCommand;
switch (iocbq->iocb.ulpCommand) {
case CMD_ELS_REQUEST64_CR:
if (!iocbq->iocb.ulpLe) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2007 Only Limited Edition cmd Format"
" supported 0x%x\n",
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
wqe->els_req.payload_len = payload_len;
/* Els_reguest64 has a TMO */
bf_set(wqe_tmo, &wqe->els_req.wqe_com,
iocbq->iocb.ulpTimeout);
/* Need a VF for word 4 set the vf bit*/
bf_set(els_req64_vf, &wqe->els_req, 0);
/* And a VFID for word 12 */
bf_set(els_req64_vfid, &wqe->els_req, 0);
/*
* Set ct field to 3, indicates that the context_tag field
* contains the FCFI and remote N_Port_ID is
* in word 5.
*/
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
bf_set(lpfc_wqe_gen_context, &wqe->generic,
iocbq->iocb.ulpContext);
bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
/* CCP CCPE PV PRI in word10 were set in the memcpy */
break;
case CMD_XMIT_SEQUENCE64_CR:
/* word3 iocb=io_tag32 wqe=payload_offset */
/* payload offset used for multilpe outstanding
* sequences on the same exchange
*/
wqe->words[3] = 0;
/* word4 relative_offset memcpy */
/* word5 r_ctl/df_ctl memcpy */
bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
wqe->xmit_sequence.xmit_len = payload_len;
break;
case CMD_XMIT_BCAST64_CN:
/* word3 iocb=iotag32 wqe=payload_len */
wqe->words[3] = 0; /* no definition for this in wqe */
/* word4 iocb=rsvd wqe=rsvd */
/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
bf_set(lpfc_wqe_gen_ct, &wqe->generic,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
break;
case CMD_FCP_IWRITE64_CR:
command_type = FCP_COMMAND_DATA_OUT;
/* The struct for wqe fcp_iwrite has 3 fields that are somewhat
* confusing.
* word3 is payload_len: byte offset to the sgl entry for the
* fcp_command.
* word4 is total xfer len, same as the IOCB->ulpParameter.
* word5 is initial xfer len 0 = wait for xfer-ready
*/
/* Always wait for xfer-ready before sending data */
wqe->fcp_iwrite.initial_xfer_len = 0;
/* word 4 (xfer length) should have been set on the memcpy */
/* allow write to fall through to read */
case CMD_FCP_IREAD64_CR:
/* FCP_CMD is always the 1st sgl entry */
wqe->fcp_iread.payload_len =
payload_len + sizeof(struct fcp_rsp);
/* word 4 (xfer length) should have been set on the memcpy */
bf_set(lpfc_wqe_gen_erp, &wqe->generic,
iocbq->iocb.ulpFCP2Rcvy);
bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
/* The XC bit and the XS bit are similar. The driver never
* tracked whether or not the exchange was previouslly open.
* XC = Exchange create, 0 is create. 1 is already open.
* XS = link cmd: 1 do not close the exchange after command.
* XS = 0 close exchange when command completes.
* The only time we would not set the XC bit is when the XS bit
* is set and we are sending our 2nd or greater command on
* this exchange.
*/
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
wqe->words[10] &= 0xffff0000; /* zero out ebde count */
bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
break;
case CMD_FCP_ICMND64_CR:
/* Always open the exchange */
bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
wqe->words[4] = 0;
wqe->words[10] &= 0xffff0000; /* zero out ebde count */
bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
break;
case CMD_GEN_REQUEST64_CR:
/* word3 command length is described as byte offset to the
* rsp_data. Would always be 16, sizeof(struct sli4_sge)
* sgl[0] = cmnd
* sgl[1] = rsp.
*
*/
wqe->gen_req.command_len = payload_len;
/* Word4 parameter copied in the memcpy */
/* Word5 [rctl, type, df_ctl, la] copied in memcpy */
/* word6 context tag copied in memcpy */
if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) {
ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2015 Invalid CT %x command 0x%x\n",
ct, iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
iocbq->iocb.ulpTimeout);
bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
command_type = OTHER_COMMAND;
break;
case CMD_XMIT_ELS_RSP64_CX:
/* words0-2 BDE memcpy */
/* word3 iocb=iotag32 wqe=rsvd */
wqe->words[3] = 0;
/* word4 iocb=did wge=rsvd. */
wqe->words[4] = 0;
/* word5 iocb=rsvd wge=did */
bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
iocbq->iocb.un.elsreq64.remoteID);
bf_set(lpfc_wqe_gen_ct, &wqe->generic,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
bf_set(lpfc_wqe_gen_context, &wqe->generic,
iocbq->vport->vpi + phba->vpi_base);
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
case CMD_ABORT_XRI_CN:
case CMD_ABORT_XRI_CX:
/* words 0-2 memcpy should be 0 rserved */
/* port will send abts */
if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
/*
* The link is down so the fw does not need to send abts
* on the wire.
*/
bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
else
bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
wqe->words[5] = 0;
bf_set(lpfc_wqe_gen_ct, &wqe->generic,
((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
abort_tag = iocbq->iocb.un.acxri.abortIoTag;
wqe->generic.abort_tag = abort_tag;
/*
* The abort handler will send us CMD_ABORT_XRI_CN or
* CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
*/
bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
cmnd = CMD_ABORT_XRI_CX;
command_type = OTHER_COMMAND;
xritag = 0;
break;
case CMD_XRI_ABORTED_CX:
case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
/* words0-2 are all 0's no bde */
/* word3 and word4 are rsvrd */
wqe->words[3] = 0;
wqe->words[4] = 0;
/* word5 iocb=rsvd wge=did */
/* There is no remote port id in the IOCB? */
/* Let this fall through and fail */
case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
case CMD_FCP_TRSP64_CX: /* Target mode rcv */
case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2014 Invalid command 0x%x\n",
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
break;
}
bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
wqe->generic.abort_tag = abort_tag;
bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
return 0;
}
/**
* __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
* @ring_number: SLI ring number to issue iocb on.
* @piocb: Pointer to command iocb.
* @flag: Flag indicating if this command can be put into txq.
*
* __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
* an iocb command to an HBA with SLI-4 interface spec.
*
* This function is called with hbalock held. The function will return success
* after it successfully submit the iocb to firmware or after adding to the
* txq.
**/
static int
__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
struct lpfc_sglq *sglq;
uint16_t xritag;
union lpfc_wqe wqe;
struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
uint32_t fcp_wqidx;
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
sglq = NULL;
else {
sglq = __lpfc_sli_get_sglq(phba);
if (!sglq)
return IOCB_ERROR;
piocb->sli4_xritag = sglq->sli4_xritag;
}
} else if (piocb->iocb_flag & LPFC_IO_FCP) {
sglq = NULL; /* These IO's already have an XRI and
* a mapped sgl.
*/
} else {
/* This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
if (!sglq)
return IOCB_ERROR;
}
if (sglq) {
xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
if (xritag != sglq->sli4_xritag)
return IOCB_ERROR;
}
if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
return IOCB_ERROR;
if (piocb->iocb_flag & LPFC_IO_FCP) {
fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
return IOCB_ERROR;
} else {
if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
return IOCB_ERROR;
}
lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
return 0;
}
/**
* __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
*
* This routine wraps the actual lockless version for issusing IOCB function
* pointer from the lpfc_hba struct.
*
* Return codes:
* IOCB_ERROR - Error
* IOCB_SUCCESS - Success
* IOCB_BUSY - Busy
**/
static inline int
__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
}
/**
* lpfc_sli_api_table_setup - Set up sli api fucntion jump table
* @phba: The hba struct for which this call is being executed.
* @dev_grp: The HBA PCI-Device group number.
*
* This routine sets up the SLI interface API function jump table in @phba
* struct.
* Returns: 0 - success, -ENODEV - failure.
**/
int
lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
{
switch (dev_grp) {
case LPFC_PCI_DEV_LP:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
break;
case LPFC_PCI_DEV_OC:
phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1419 Invalid HBA PCI-device group: 0x%x\n",
dev_grp);
return -ENODEV;
break;
}
phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
return 0;
}
/**
* lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @piocb: Pointer to command iocb.
* @flag: Flag indicating if this command can be put into txq.
*
* lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
* function. This function gets the hbalock and calls
* __lpfc_sli_issue_iocb function and will return the error returned
* by __lpfc_sli_issue_iocb function. This wrapper is used by
* functions which do not hold hbalock.
**/
int
lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
unsigned long iflags;
int rc;
spin_lock_irqsave(&phba->hbalock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&phba->hbalock, iflags);
return rc;
}
/**
* lpfc_extra_ring_setup - Extra ring setup function
* @phba: Pointer to HBA context object.
*
* This function is called while driver attaches with the
* HBA to setup the extra ring. The extra ring is used
* only when driver needs to support target mode functionality
* or IP over FC functionalities.
*
* This function is called with no lock held.
**/
static int
lpfc_extra_ring_setup( struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
psli = &phba->sli;
/* Adjust cmd/rsp ring iocb entries more evenly */
/* Take some away from the FCP ring */
pring = &psli->ring[psli->fcp_ring];
pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
/* and give them to the extra ring */
pring = &psli->ring[psli->extra_ring];
pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
/* Setup default profile for this ring */
pring->iotag_max = 4096;
pring->num_mask = 1;
pring->prt[0].profile = 0; /* Mask 0 */
pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
pring->prt[0].type = phba->cfg_multi_ring_type;
pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
return 0;
}
/**
* lpfc_sli_async_event_handler - ASYNC iocb handler function
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @iocbq: Pointer to iocb object.
*
* This function is called by the slow ring event handler
* function when there is an ASYNC event iocb in the ring.
* This function is called with no lock held.
* Currently this function handles only temperature related
* ASYNC events. The function decodes the temperature sensor
* event message and posts events for the management applications.
**/
static void
lpfc_sli_async_event_handler(struct lpfc_hba * phba,
struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
{
IOCB_t *icmd;
uint16_t evt_code;
uint16_t temp;
struct temp_event temp_event_data;
struct Scsi_Host *shost;
uint32_t *iocb_w;
icmd = &iocbq->iocb;
evt_code = icmd->un.asyncstat.evt_code;
temp = icmd->ulpContext;
if ((evt_code != ASYNC_TEMP_WARN) &&
(evt_code != ASYNC_TEMP_SAFE)) {
iocb_w = (uint32_t *) icmd;
lpfc_printf_log(phba,
KERN_ERR,
LOG_SLI,
"0346 Ring %d handler: unexpected ASYNC_STATUS"
" evt_code 0x%x\n"
"W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
"W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
"W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
pring->ringno,
icmd->un.asyncstat.evt_code,
iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
return;
}
temp_event_data.data = (uint32_t)temp;
temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
if (evt_code == ASYNC_TEMP_WARN) {
temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
lpfc_printf_log(phba,
KERN_ERR,
LOG_TEMP,
"0347 Adapter is very hot, please take "
"corrective action. temperature : %d Celsius\n",
temp);
}
if (evt_code == ASYNC_TEMP_SAFE) {
temp_event_data.event_code = LPFC_NORMAL_TEMP;
lpfc_printf_log(phba,
KERN_ERR,
LOG_TEMP,
"0340 Adapter temperature is OK now. "
"temperature : %d Celsius\n",
temp);
}
/* Send temperature change event to applications */
shost = lpfc_shost_from_vport(phba->pport);
fc_host_post_vendor_event(shost, fc_get_event_number(),
sizeof(temp_event_data), (char *) &temp_event_data,
LPFC_NL_VENDOR_ID);
}
/**
* lpfc_sli_setup - SLI ring setup function
* @phba: Pointer to HBA context object.
*
* lpfc_sli_setup sets up rings of the SLI interface with
* number of iocbs per ring and iotags. This function is
* called while driver attach to the HBA and before the
* interrupts are enabled. So there is no need for locking.
*
* This function always returns 0.
**/
int
lpfc_sli_setup(struct lpfc_hba *phba)
{
int i, totiocbsize = 0;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
psli->num_rings = MAX_CONFIGURED_RINGS;
psli->sli_flag = 0;
psli->fcp_ring = LPFC_FCP_RING;
psli->next_ring = LPFC_FCP_NEXT_RING;
psli->extra_ring = LPFC_EXTRA_RING;
psli->iocbq_lookup = NULL;
psli->iocbq_lookup_len = 0;
psli->last_iotag = 0;
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
switch (i) {
case LPFC_FCP_RING: /* ring 0 - FCP */
/* numCiocb and numRiocb are used in config_port */
pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
pring->sizeCiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
pring->sizeRiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
pring->iotag_ctr = 0;
pring->iotag_max =
(phba->cfg_hba_queue_depth * 2);
pring->fast_iotag = pring->iotag_max;
pring->num_mask = 0;
break;
case LPFC_EXTRA_RING: /* ring 1 - EXTRA */
/* numCiocb and numRiocb are used in config_port */
pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
pring->sizeCiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
pring->sizeRiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
pring->iotag_max = phba->cfg_hba_queue_depth;
pring->num_mask = 0;
break;
case LPFC_ELS_RING: /* ring 2 - ELS / CT */
/* numCiocb and numRiocb are used in config_port */
pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
pring->sizeCiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_CMD_SIZE :
SLI2_IOCB_CMD_SIZE;
pring->sizeRiocb = (phba->sli_rev == 3) ?
SLI3_IOCB_RSP_SIZE :
SLI2_IOCB_RSP_SIZE;
pring->fast_iotag = 0;
pring->iotag_ctr = 0;
pring->iotag_max = 4096;
pring->lpfc_sli_rcv_async_status =
lpfc_sli_async_event_handler;
pring->num_mask = 4;
pring->prt[0].profile = 0; /* Mask 0 */
pring->prt[0].rctl = FC_ELS_REQ;
pring->prt[0].type = FC_ELS_DATA;
pring->prt[0].lpfc_sli_rcv_unsol_event =
lpfc_els_unsol_event;
pring->prt[1].profile = 0; /* Mask 1 */
pring->prt[1].rctl = FC_ELS_RSP;
pring->prt[1].type = FC_ELS_DATA;
pring->prt[1].lpfc_sli_rcv_unsol_event =
lpfc_els_unsol_event;
pring->prt[2].profile = 0; /* Mask 2 */
/* NameServer Inquiry */
pring->prt[2].rctl = FC_UNSOL_CTL;
/* NameServer */
pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
pring->prt[2].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
pring->prt[3].profile = 0; /* Mask 3 */
/* NameServer response */
pring->prt[3].rctl = FC_SOL_CTL;
/* NameServer */
pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
pring->prt[3].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
break;
}
totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
(pring->numRiocb * pring->sizeRiocb);
}
if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
/* Too many cmd / rsp ring entries in SLI2 SLIM */
printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
"SLI2 SLIM Data: x%x x%lx\n",
phba->brd_no, totiocbsize,
(unsigned long) MAX_SLIM_IOCB_SIZE);
}
if (phba->cfg_multi_ring_support == 2)
lpfc_extra_ring_setup(phba);
return 0;
}
/**
* lpfc_sli_queue_setup - Queue initialization function
* @phba: Pointer to HBA context object.
*
* lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
* ring. This function also initializes ring indices of each ring.
* This function is called during the initialization of the SLI
* interface of an HBA.
* This function is called with no lock held and always returns
* 1.
**/
int
lpfc_sli_queue_setup(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
int i;
psli = &phba->sli;
spin_lock_irq(&phba->hbalock);
INIT_LIST_HEAD(&psli->mboxq);
INIT_LIST_HEAD(&psli->mboxq_cmpl);
/* Initialize list headers for txq and txcmplq as double linked lists */
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
pring->ringno = i;
pring->next_cmdidx = 0;
pring->local_getidx = 0;
pring->cmdidx = 0;
INIT_LIST_HEAD(&pring->txq);
INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
INIT_LIST_HEAD(&pring->iocb_continue_saveq);
INIT_LIST_HEAD(&pring->postbufq);
}
spin_unlock_irq(&phba->hbalock);
return 1;
}
/**
* lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
* @phba: Pointer to HBA context object.
*
* This routine flushes the mailbox command subsystem. It will unconditionally
* flush all the mailbox commands in the three possible stages in the mailbox
* command sub-system: pending mailbox command queue; the outstanding mailbox
* command; and completed mailbox command queue. It is caller's responsibility
* to make sure that the driver is in the proper state to flush the mailbox
* command sub-system. Namely, the posting of mailbox commands into the
* pending mailbox command queue from the various clients must be stopped;
* either the HBA is in a state that it will never works on the outstanding
* mailbox command (such as in EEH or ERATT conditions) or the outstanding
* mailbox command has been completed.
**/
static void
lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_sli *psli = &phba->sli;
LPFC_MBOXQ_t *pmb;
unsigned long iflag;
/* Flush all the mailbox commands in the mbox system */
spin_lock_irqsave(&phba->hbalock, iflag);
/* The pending mailbox command queue */
list_splice_init(&phba->sli.mboxq, &completions);
/* The outstanding active mailbox command */
if (psli->mbox_active) {
list_add_tail(&psli->mbox_active->list, &completions);
psli->mbox_active = NULL;
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
}
/* The completed mailbox command queue */
list_splice_init(&phba->sli.mboxq_cmpl, &completions);
spin_unlock_irqrestore(&phba->hbalock, iflag);
/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
while (!list_empty(&completions)) {
list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
if (pmb->mbox_cmpl)
pmb->mbox_cmpl(phba, pmb);
}
}
/**
* lpfc_sli_host_down - Vport cleanup function
* @vport: Pointer to virtual port object.
*
* lpfc_sli_host_down is called to clean up the resources
* associated with a vport before destroying virtual
* port data structures.
* This function does following operations:
* - Free discovery resources associated with this virtual
* port.
* - Free iocbs associated with this virtual port in
* the txq.
* - Send abort for all iocb commands associated with this
* vport in txcmplq.
*
* This function is called with no lock held and always returns 1.
**/
int
lpfc_sli_host_down(struct lpfc_vport *vport)
{
LIST_HEAD(completions);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
int i;
unsigned long flags = 0;
uint16_t prev_pring_flag;
lpfc_cleanup_discovery_resources(vport);
spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
prev_pring_flag = pring->flag;
/* Only slow rings */
if (pring->ringno == LPFC_ELS_RING) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */
set_bit(LPFC_DATA_READY, &phba->data_flags);
}
/*
* Error everything on the txq since these iocbs have not been
* given to the FW yet.
*/
list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
if (iocb->vport != vport)
continue;
list_move_tail(&iocb->list, &completions);
pring->txq_cnt--;
}
/* Next issue ABTS for everything on the txcmplq */
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
list) {
if (iocb->vport != vport)
continue;
lpfc_sli_issue_abort_iotag(phba, pring, iocb);
}
pring->flag = prev_pring_flag;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
return 1;
}
/**
* lpfc_sli_hba_down - Resource cleanup function for the HBA
* @phba: Pointer to HBA context object.
*
* This function cleans up all iocb, buffers, mailbox commands
* while shutting down the HBA. This function is called with no
* lock held and always returns 1.
* This function does the following to cleanup driver resources:
* - Free discovery resources for each virtual port
* - Cleanup any pending fabric iocbs
* - Iterate through the iocb txq and free each entry
* in the list.
* - Free up any buffer posted to the HBA
* - Free mailbox commands in the mailbox queue.
**/
int
lpfc_sli_hba_down(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
struct lpfc_dmabuf *buf_ptr;
unsigned long flags = 0;
int i;
/* Shutdown the mailbox command sub-system */
lpfc_sli_mbox_sys_shutdown(phba);
lpfc_hba_down_prep(phba);
lpfc_fabric_abort_hba(phba);
spin_lock_irqsave(&phba->hbalock, flags);
for (i = 0; i < psli->num_rings; i++) {
pring = &psli->ring[i];
/* Only slow rings */
if (pring->ringno == LPFC_ELS_RING) {
pring->flag |= LPFC_DEFERRED_RING_EVENT;
/* Set the lpfc data pending flag */
set_bit(LPFC_DATA_READY, &phba->data_flags);
}
/*
* Error everything on the txq since these iocbs have not been
* given to the FW yet.
*/
list_splice_init(&pring->txq, &completions);
pring->txq_cnt = 0;
}
spin_unlock_irqrestore(&phba->hbalock, flags);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
spin_lock_irqsave(&phba->hbalock, flags);
list_splice_init(&phba->elsbuf, &completions);
phba->elsbuf_cnt = 0;
phba->elsbuf_prev_cnt = 0;
spin_unlock_irqrestore(&phba->hbalock, flags);
while (!list_empty(&completions)) {
list_remove_head(&completions, buf_ptr,
struct lpfc_dmabuf, list);
lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
kfree(buf_ptr);
}
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
spin_lock_irqsave(&phba->pport->work_port_lock, flags);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
return 1;
}
/**
* lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
* @phba: Pointer to HBA context object.
*
* This function cleans up all queues, iocb, buffers, mailbox commands while
* shutting down the SLI4 HBA FCoE function. This function is called with no
* lock held and always returns 1.
*
* This function does the following to cleanup driver FCoE function resources:
* - Free discovery resources for each virtual port
* - Cleanup any pending fabric iocbs
* - Iterate through the iocb txq and free each entry in the list.
* - Free up any buffer posted to the HBA.
* - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
* - Free mailbox commands in the mailbox queue.
**/
int
lpfc_sli4_hba_down(struct lpfc_hba *phba)
{
/* Stop the SLI4 device port */
lpfc_stop_port(phba);
/* Tear down the queues in the HBA */
lpfc_sli4_queue_unset(phba);
/* unregister default FCFI from the HBA */
lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
return 1;
}
/**
* lpfc_sli_pcimem_bcopy - SLI memory copy function
* @srcp: Source memory pointer.
* @destp: Destination memory pointer.
* @cnt: Number of words required to be copied.
*
* This function is used for copying data between driver memory
* and the SLI memory. This function also changes the endianness
* of each word if native endianness is different from SLI
* endianness. This function can be called with or without
* lock.
**/
void
lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
{
uint32_t *src = srcp;
uint32_t *dest = destp;
uint32_t ldata;
int i;
for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
ldata = *src;
ldata = le32_to_cpu(ldata);
*dest = ldata;
src++;
dest++;
}
}
/**
* lpfc_sli_bemem_bcopy - SLI memory copy function
* @srcp: Source memory pointer.
* @destp: Destination memory pointer.
* @cnt: Number of words required to be copied.
*
* This function is used for copying data between a data structure
* with big endian representation to local endianness.
* This function can be called with or without lock.
**/
void
lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
{
uint32_t *src = srcp;
uint32_t *dest = destp;
uint32_t ldata;
int i;
for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
ldata = *src;
ldata = be32_to_cpu(ldata);
*dest = ldata;
src++;
dest++;
}
}
/**
* lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @mp: Pointer to driver buffer object.
*
* This function is called with no lock held.
* It always return zero after adding the buffer to the postbufq
* buffer list.
**/
int
lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_dmabuf *mp)
{
/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
later */
spin_lock_irq(&phba->hbalock);
list_add_tail(&mp->list, &pring->postbufq);
pring->postbufq_cnt++;
spin_unlock_irq(&phba->hbalock);
return 0;
}
/**
* lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
* @phba: Pointer to HBA context object.
*
* When HBQ is enabled, buffers are searched based on tags. This function
* allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
* tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
* does not conflict with tags of buffer posted for unsolicited events.
* The function returns the allocated tag. The function is called with
* no locks held.
**/
uint32_t
lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
{
spin_lock_irq(&phba->hbalock);
phba->buffer_tag_count++;
/*
* Always set the QUE_BUFTAG_BIT to distiguish between
* a tag assigned by HBQ.
*/
phba->buffer_tag_count |= QUE_BUFTAG_BIT;
spin_unlock_irq(&phba->hbalock);
return phba->buffer_tag_count;
}
/**
* lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @tag: Buffer tag.
*
* Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
* list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
* iocb is posted to the response ring with the tag of the buffer.
* This function searches the pring->postbufq list using the tag
* to find buffer associated with CMD_IOCB_RET_XRI64_CX
* iocb. If the buffer is found then lpfc_dmabuf object of the
* buffer is returned to the caller else NULL is returned.
* This function is called with no lock held.
**/
struct lpfc_dmabuf *
lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
uint32_t tag)
{
struct lpfc_dmabuf *mp, *next_mp;
struct list_head *slp = &pring->postbufq;
/* Search postbufq, from the begining, looking for a match on tag */
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
if (mp->buffer_tag == tag) {
list_del_init(&mp->list);
pring->postbufq_cnt--;
spin_unlock_irq(&phba->hbalock);
return mp;
}
}
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0402 Cannot find virtual addr for buffer tag on "
"ring %d Data x%lx x%p x%p x%x\n",
pring->ringno, (unsigned long) tag,
slp->next, slp->prev, pring->postbufq_cnt);
return NULL;
}
/**
* lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @phys: DMA address of the buffer.
*
* This function searches the buffer list using the dma_address
* of unsolicited event to find the driver's lpfc_dmabuf object
* corresponding to the dma_address. The function returns the
* lpfc_dmabuf object if a buffer is found else it returns NULL.
* This function is called by the ct and els unsolicited event
* handlers to get the buffer associated with the unsolicited
* event.
*
* This function is called with no lock held.
**/
struct lpfc_dmabuf *
lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
dma_addr_t phys)
{
struct lpfc_dmabuf *mp, *next_mp;
struct list_head *slp = &pring->postbufq;
/* Search postbufq, from the begining, looking for a match on phys */
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
if (mp->phys == phys) {
list_del_init(&mp->list);
pring->postbufq_cnt--;
spin_unlock_irq(&phba->hbalock);
return mp;
}
}
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0410 Cannot find virtual addr for mapped buf on "
"ring %d Data x%llx x%p x%p x%x\n",
pring->ringno, (unsigned long long)phys,
slp->next, slp->prev, pring->postbufq_cnt);
return NULL;
}
/**
* lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
* @phba: Pointer to HBA context object.
* @cmdiocb: Pointer to driver command iocb object.
* @rspiocb: Pointer to driver response iocb object.
*
* This function is the completion handler for the abort iocbs for
* ELS commands. This function is called from the ELS ring event
* handler with no lock held. This function frees memory resources
* associated with the abort iocb.
**/
static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
IOCB_t *irsp = &rspiocb->iocb;
uint16_t abort_iotag, abort_context;
struct lpfc_iocbq *abort_iocb;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
abort_iocb = NULL;
if (irsp->ulpStatus) {
abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
spin_lock_irq(&phba->hbalock);
if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
"0327 Cannot abort els iocb %p "
"with tag %x context %x, abort status %x, "
"abort code %x\n",
abort_iocb, abort_iotag, abort_context,
irsp->ulpStatus, irsp->un.ulpWord[4]);
/*
* If the iocb is not found in Firmware queue the iocb
* might have completed already. Do not free it again.
*/
if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
spin_unlock_irq(&phba->hbalock);
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
/*
* make sure we have the right iocbq before taking it
* off the txcmplq and try to call completion routine.
*/
if (!abort_iocb ||
abort_iocb->iocb.ulpContext != abort_context ||
(abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
spin_unlock_irq(&phba->hbalock);
else {
list_del_init(&abort_iocb->list);
pring->txcmplq_cnt--;
spin_unlock_irq(&phba->hbalock);
/* Firmware could still be in progress of DMAing
* payload, so don't free data buffer till after
* a hbeat.
*/
abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
}
}
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
/**
* lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
* @phba: Pointer to HBA context object.
* @cmdiocb: Pointer to driver command iocb object.
* @rspiocb: Pointer to driver response iocb object.
*
* The function is called from SLI ring event handler with no
* lock held. This function is the completion handler for ELS commands
* which are aborted. The function frees memory resources used for
* the aborted ELS commands.
**/
static void
lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
IOCB_t *irsp = &rspiocb->iocb;
/* ELS cmd tag <ulpIoTag> completes */
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"0139 Ignoring ELS cmd tag x%x completion Data: "
"x%x x%x x%x\n",
irsp->ulpIoTag, irsp->ulpStatus,
irsp->un.ulpWord[4], irsp->ulpTimeout);
if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
lpfc_ct_free_iocb(phba, cmdiocb);
else
lpfc_els_free_iocb(phba, cmdiocb);
return;
}
/**
* lpfc_sli_issue_abort_iotag - Abort function for a command iocb
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
* @cmdiocb: Pointer to driver command iocb object.
*
* This function issues an abort iocb for the provided command
* iocb. This function is called with hbalock held.
* The function returns 0 when it fails due to memory allocation
* failure or when the command iocb is an abort request.
**/
int
lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *cmdiocb)
{
struct lpfc_vport *vport = cmdiocb->vport;
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
int retval = IOCB_ERROR;
/*
* There are certain command types we don't want to abort. And we
* don't want to abort commands that are already in the process of
* being aborted.
*/
icmd = &cmdiocb->iocb;
if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
(cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
return 0;
/* If we're unloading, don't abort iocb on the ELS ring, but change the
* callback so that nothing happens when it finishes.
*/
if ((vport->load_flag & FC_UNLOADING) &&
(pring->ringno == LPFC_ELS_RING)) {
if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
else
cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
goto abort_iotag_exit;
}
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
return 0;
/* This signals the response to set the correct status
* before calling the completion handler.
*/
cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
iabt = &abtsiocbp->iocb;
iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
iabt->un.acxri.abortContextTag = icmd->ulpContext;
if (phba->sli_rev == LPFC_SLI_REV4)
iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
else
iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
iabt->ulpLe = 1;
iabt->ulpClass = icmd->ulpClass;
if (phba->link_state >= LPFC_LINK_UP)
iabt->ulpCommand = CMD_ABORT_XRI_CN;
else
iabt->ulpCommand = CMD_CLOSE_XRI_CN;
abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
"0339 Abort xri x%x, original iotag x%x, "
"abort cmd iotag x%x\n",
iabt->un.acxri.abortContextTag,
iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
if (retval)
__lpfc_sli_release_iocbq(phba, abtsiocbp);
abort_iotag_exit:
/*
* Caller to this routine should check for IOCB_ERROR
* and handle it properly. This routine no longer removes
* iocb off txcmplq and call compl in case of IOCB_ERROR.
*/
return retval;
}
/**
* lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
* @iocbq: Pointer to driver iocb object.
* @vport: Pointer to driver virtual port object.
* @tgt_id: SCSI ID of the target.
* @lun_id: LUN ID of the scsi device.
* @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
*
* This function acts as an iocb filter for functions which abort or count
* all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
* 0 if the filtering criteria is met for the given iocb and will return
* 1 if the filtering criteria is not met.
* If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
* given iocb is for the SCSI device specified by vport, tgt_id and
* lun_id parameter.
* If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
* given iocb is for the SCSI target specified by vport and tgt_id
* parameters.
* If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
* given iocb is for the SCSI host associated with the given vport.
* This function is called with no locks held.
**/
static int
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
uint16_t tgt_id, uint64_t lun_id,
lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_scsi_buf *lpfc_cmd;
int rc = 1;
if (!(iocbq->iocb_flag & LPFC_IO_FCP))
return rc;
if (iocbq->vport != vport)
return rc;
lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
if (lpfc_cmd->pCmd == NULL)
return rc;
switch (ctx_cmd) {
case LPFC_CTX_LUN:
if ((lpfc_cmd->rdata->pnode) &&
(lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
(scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
rc = 0;
break;
case LPFC_CTX_TGT:
if ((lpfc_cmd->rdata->pnode) &&
(lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
rc = 0;
break;
case LPFC_CTX_HOST:
rc = 0;
break;
default:
printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
__func__, ctx_cmd);
break;
}
return rc;
}
/**
* lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
* @vport: Pointer to virtual port.
* @tgt_id: SCSI ID of the target.
* @lun_id: LUN ID of the scsi device.
* @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
*
* This function returns number of FCP commands pending for the vport.
* When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
* commands pending on the vport associated with SCSI device specified
* by tgt_id and lun_id parameters.
* When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
* commands pending on the vport associated with SCSI target specified
* by tgt_id parameter.
* When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
* commands pending on the vport.
* This function returns the number of iocbs which satisfy the filter.
* This function is called without any lock held.
**/
int
lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
lpfc_ctx_cmd ctx_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
int sum, i;
for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
ctx_cmd) == 0)
sum++;
}
return sum;
}
/**
* lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
* @phba: Pointer to HBA context object
* @cmdiocb: Pointer to command iocb object.
* @rspiocb: Pointer to response iocb object.
*
* This function is called when an aborted FCP iocb completes. This
* function is called by the ring event handler with no lock held.
* This function frees the iocb.
**/
void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
/**
* lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
* @vport: Pointer to virtual port.
* @pring: Pointer to driver SLI ring object.
* @tgt_id: SCSI ID of the target.
* @lun_id: LUN ID of the scsi device.
* @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
*
* This function sends an abort command for every SCSI command
* associated with the given virtual port pending on the ring
* filtered by lpfc_sli_validate_fcp_iocb function.
* When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
* FCP iocbs associated with lun specified by tgt_id and lun_id
* parameters
* When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
* FCP iocbs associated with SCSI target specified by tgt_id parameter.
* When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
* FCP iocbs associated with virtual port.
* This function returns number of iocbs it failed to abort.
* This function is called with no locks held.
**/
int
lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *iocbq;
struct lpfc_iocbq *abtsiocb;
IOCB_t *cmd = NULL;
int errcnt = 0, ret_val = 0;
int i;
for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
abort_cmd) != 0)
continue;
/* issue ABTS for this IOCB based on iotag */
abtsiocb = lpfc_sli_get_iocbq(phba);
if (abtsiocb == NULL) {
errcnt++;
continue;
}
cmd = &iocbq->iocb;
abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
if (phba->sli_rev == LPFC_SLI_REV4)
abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
else
abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
abtsiocb->iocb.ulpLe = 1;
abtsiocb->iocb.ulpClass = cmd->ulpClass;
abtsiocb->vport = phba->pport;
if (lpfc_is_link_up(phba))
abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
else
abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
/* Setup callback routine and issue the command. */
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
abtsiocb, 0);
if (ret_val == IOCB_ERROR) {
lpfc_sli_release_iocbq(phba, abtsiocb);
errcnt++;
continue;
}
}
return errcnt;
}
/**
* lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
* @phba: Pointer to HBA context object.
* @cmdiocbq: Pointer to command iocb.
* @rspiocbq: Pointer to response iocb.
*
* This function is the completion handler for iocbs issued using
* lpfc_sli_issue_iocb_wait function. This function is called by the
* ring event handler function without any lock held. This function
* can be called from both worker thread context and interrupt
* context. This function also can be called from other thread which
* cleans up the SLI layer objects.
* This function copy the contents of the response iocb to the
* response iocb memory object provided by the caller of
* lpfc_sli_issue_iocb_wait and then wakes up the thread which
* sleeps for the iocb completion.
**/
static void
lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocbq,
struct lpfc_iocbq *rspiocbq)
{
wait_queue_head_t *pdone_q;
unsigned long iflags;
spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
&rspiocbq->iocb, sizeof(IOCB_t));
pdone_q = cmdiocbq->context_un.wait_queue;
if (pdone_q)
wake_up(pdone_q);
spin_unlock_irqrestore(&phba->hbalock, iflags);
return;
}
/**
* lpfc_chk_iocb_flg - Test IOCB flag with lock held.
* @phba: Pointer to HBA context object..
* @piocbq: Pointer to command iocb.
* @flag: Flag to test.
*
* This routine grabs the hbalock and then test the iocb_flag to
* see if the passed in flag is set.
* Returns:
* 1 if flag is set.
* 0 if flag is not set.
**/
static int
lpfc_chk_iocb_flg(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq, uint32_t flag)
{
unsigned long iflags;
int ret;
spin_lock_irqsave(&phba->hbalock, iflags);
ret = piocbq->iocb_flag & flag;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return ret;
}
/**
* lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
* @phba: Pointer to HBA context object..
* @pring: Pointer to sli ring.
* @piocb: Pointer to command iocb.
* @prspiocbq: Pointer to response iocb.
* @timeout: Timeout in number of seconds.
*
* This function issues the iocb to firmware and waits for the
* iocb to complete. If the iocb command is not
* completed within timeout seconds, it returns IOCB_TIMEDOUT.
* Caller should not free the iocb resources if this function
* returns IOCB_TIMEDOUT.
* The function waits for the iocb completion using an
* non-interruptible wait.
* This function will sleep while waiting for iocb completion.
* So, this function should not be called from any context which
* does not allow sleeping. Due to the same reason, this function
* cannot be called with interrupt disabled.
* This function assumes that the iocb completions occur while
* this function sleep. So, this function cannot be called from
* the thread which process iocb completion for this ring.
* This function clears the iocb_flag of the iocb object before
* issuing the iocb and the iocb completion handler sets this
* flag and wakes this thread when the iocb completes.
* The contents of the response iocb will be copied to prspiocbq
* by the completion handler when the command completes.
* This function returns IOCB_SUCCESS when success.
* This function is called with no lock held.
**/
int
lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
uint32_t ring_number,
struct lpfc_iocbq *piocb,
struct lpfc_iocbq *prspiocbq,
uint32_t timeout)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
long timeleft, timeout_req = 0;
int retval = IOCB_SUCCESS;
uint32_t creg_val;
/*
* If the caller has provided a response iocbq buffer, then context2
* is NULL or its an error.
*/
if (prspiocbq) {
if (piocb->context2)
return IOCB_ERROR;
piocb->context2 = prspiocbq;
}
piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
piocb->context_un.wait_queue = &done_q;
piocb->iocb_flag &= ~LPFC_IO_WAKE;
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
creg_val = readl(phba->HCregaddr);
creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
if (retval == IOCB_SUCCESS) {
timeout_req = timeout * HZ;
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
if (piocb->iocb_flag & LPFC_IO_WAKE) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0331 IOCB wake signaled\n");
} else if (timeleft == 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0338 IOCB wait timeout error - no "
"wake response Data x%x\n", timeout);
retval = IOCB_TIMEDOUT;
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0330 IOCB wake NOT set, "
"Data x%x x%lx\n",
timeout, (timeleft / jiffies));
retval = IOCB_TIMEDOUT;
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0332 IOCB wait issue failed, Data x%x\n",
retval);
retval = IOCB_ERROR;
}
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
creg_val = readl(phba->HCregaddr);
creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
writel(creg_val, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
if (prspiocbq)
piocb->context2 = NULL;
piocb->context_un.wait_queue = NULL;
piocb->iocb_cmpl = NULL;
return retval;
}
/**
* lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
* @phba: Pointer to HBA context object.
* @pmboxq: Pointer to driver mailbox object.
* @timeout: Timeout in number of seconds.
*
* This function issues the mailbox to firmware and waits for the
* mailbox command to complete. If the mailbox command is not
* completed within timeout seconds, it returns MBX_TIMEOUT.
* The function waits for the mailbox completion using an
* interruptible wait. If the thread is woken up due to a
* signal, MBX_TIMEOUT error is returned to the caller. Caller
* should not free the mailbox resources, if this function returns
* MBX_TIMEOUT.
* This function will sleep while waiting for mailbox completion.
* So, this function should not be called from any context which
* does not allow sleeping. Due to the same reason, this function
* cannot be called with interrupt disabled.
* This function assumes that the mailbox completion occurs while
* this function sleep. So, this function cannot be called from
* the worker thread which processes mailbox completion.
* This function is called in the context of HBA management
* applications.
* This function returns MBX_SUCCESS when successful.
* This function is called with no lock held.
**/
int
lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
uint32_t timeout)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
int retval;
unsigned long flag;
/* The caller must leave context1 empty. */
if (pmboxq->context1)
return MBX_NOT_FINISHED;
pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
/* setup wake call as IOCB callback */
pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
/* setup context field to pass wait_queue pointer to wake function */
pmboxq->context1 = &done_q;
/* now issue the command */
retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
wait_event_interruptible_timeout(done_q,
pmboxq->mbox_flag & LPFC_MBX_WAKE,
timeout * HZ);
spin_lock_irqsave(&phba->hbalock, flag);
pmboxq->context1 = NULL;
/*
* if LPFC_MBX_WAKE flag is set the mailbox is completed
* else do not free the resources.
*/
if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
retval = MBX_SUCCESS;
else {
retval = MBX_TIMEOUT;
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
}
spin_unlock_irqrestore(&phba->hbalock, flag);
}
return retval;
}
/**
* lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
* @phba: Pointer to HBA context.
*
* This function is called to shutdown the driver's mailbox sub-system.
* It first marks the mailbox sub-system is in a block state to prevent
* the asynchronous mailbox command from issued off the pending mailbox
* command queue. If the mailbox command sub-system shutdown is due to
* HBA error conditions such as EEH or ERATT, this routine shall invoke
* the mailbox sub-system flush routine to forcefully bring down the
* mailbox sub-system. Otherwise, if it is due to normal condition (such
* as with offline or HBA function reset), this routine will wait for the
* outstanding mailbox command to complete before invoking the mailbox
* sub-system flush routine to gracefully bring down mailbox sub-system.
**/
void
lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint8_t actcmd = MBX_HEARTBEAT;
unsigned long timeout;
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
spin_lock_irq(&phba->hbalock);
if (phba->sli.mbox_active)
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
spin_unlock_irq(&phba->hbalock);
/* Determine how long we might wait for the active mailbox
* command to be gracefully completed by firmware.
*/
timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
1000) + jiffies;
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
if (time_after(jiffies, timeout))
/* Timeout, let the mailbox flush routine to
* forcefully release active mailbox command
*/
break;
}
}
lpfc_sli_mbox_sys_flush(phba);
}
/**
* lpfc_sli_eratt_read - read sli-3 error attention events
* @phba: Pointer to HBA context.
*
* This function is called to read the SLI3 device error attention registers
* for possible error attention events. The caller must hold the hostlock
* with spin_lock_irq().
*
* This fucntion returns 1 when there is Error Attention in the Host Attention
* Register and returns 0 otherwise.
**/
static int
lpfc_sli_eratt_read(struct lpfc_hba *phba)
{
uint32_t ha_copy;
/* Read chip Host Attention (HA) register */
ha_copy = readl(phba->HAregaddr);
if (ha_copy & HA_ERATT) {
/* Read host status register to retrieve error event */
lpfc_sli_read_hs(phba);
/* Check if there is a deferred error condition is active */
if ((HS_FFER1 & phba->work_hs) &&
((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
HS_FFER6 | HS_FFER7) & phba->work_hs)) {
phba->hba_flag |= DEFER_ERATT;
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr);
}
/* Set the driver HA work bitmap */
phba->work_ha |= HA_ERATT;
/* Indicate polling handles this ERATT */
phba->hba_flag |= HBA_ERATT_HANDLED;
return 1;
}
return 0;
}
/**
* lpfc_sli4_eratt_read - read sli-4 error attention events
* @phba: Pointer to HBA context.
*
* This function is called to read the SLI4 device error attention registers
* for possible error attention events. The caller must hold the hostlock
* with spin_lock_irq().
*
* This fucntion returns 1 when there is Error Attention in the Host Attention
* Register and returns 0 otherwise.
**/
static int
lpfc_sli4_eratt_read(struct lpfc_hba *phba)
{
uint32_t uerr_sta_hi, uerr_sta_lo;
uint32_t onlnreg0, onlnreg1;
/* For now, use the SLI4 device internal unrecoverable error
* registers for error attention. This can be changed later.
*/
onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
if (uerr_sta_lo || uerr_sta_hi) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"1423 HBA Unrecoverable error: "
"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
"online0_reg=0x%x, online1_reg=0x%x\n",
uerr_sta_lo, uerr_sta_hi,
onlnreg0, onlnreg1);
phba->work_status[0] = uerr_sta_lo;
phba->work_status[1] = uerr_sta_hi;
/* Set the driver HA work bitmap */
phba->work_ha |= HA_ERATT;
/* Indicate polling handles this ERATT */
phba->hba_flag |= HBA_ERATT_HANDLED;
return 1;
}
}
return 0;
}
/**
* lpfc_sli_check_eratt - check error attention events
* @phba: Pointer to HBA context.
*
* This function is called from timer soft interrupt context to check HBA's
* error attention register bit for error attention events.
*
* This fucntion returns 1 when there is Error Attention in the Host Attention
* Register and returns 0 otherwise.
**/
int
lpfc_sli_check_eratt(struct lpfc_hba *phba)
{
uint32_t ha_copy;
/* If somebody is waiting to handle an eratt, don't process it
* here. The brdkill function will do this.
*/
if (phba->link_flag & LS_IGNORE_ERATT)
return 0;
/* Check if interrupt handler handles this ERATT */
spin_lock_irq(&phba->hbalock);
if (phba->hba_flag & HBA_ERATT_HANDLED) {
/* Interrupt handler has handled ERATT */
spin_unlock_irq(&phba->hbalock);
return 0;
}
/*
* If there is deferred error attention, do not check for error
* attention
*/
if (unlikely(phba->hba_flag & DEFER_ERATT)) {
spin_unlock_irq(&phba->hbalock);
return 0;
}
/* If PCI channel is offline, don't process it */
if (unlikely(pci_channel_offline(phba->pcidev))) {
spin_unlock_irq(&phba->hbalock);
return 0;
}
switch (phba->sli_rev) {
case LPFC_SLI_REV2:
case LPFC_SLI_REV3:
/* Read chip Host Attention (HA) register */
ha_copy = lpfc_sli_eratt_read(phba);
break;
case LPFC_SLI_REV4:
/* Read devcie Uncoverable Error (UERR) registers */
ha_copy = lpfc_sli4_eratt_read(phba);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0299 Invalid SLI revision (%d)\n",
phba->sli_rev);
ha_copy = 0;
break;
}
spin_unlock_irq(&phba->hbalock);
return ha_copy;
}
/**
* lpfc_intr_state_check - Check device state for interrupt handling
* @phba: Pointer to HBA context.
*
* This inline routine checks whether a device or its PCI slot is in a state
* that the interrupt should be handled.
*
* This function returns 0 if the device or the PCI slot is in a state that
* interrupt should be handled, otherwise -EIO.
*/
static inline int
lpfc_intr_state_check(struct lpfc_hba *phba)
{
/* If the pci channel is offline, ignore all the interrupts */
if (unlikely(pci_channel_offline(phba->pcidev)))
return -EIO;
/* Update device level interrupt statistics */
phba->sli.slistat.sli_intr++;
/* Ignore all interrupts during initialization. */
if (unlikely(phba->link_state < LPFC_LINK_DOWN))
return -EIO;
return 0;
}
/**
* lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This function is directly called from the PCI layer as an interrupt
* service routine when device with SLI-3 interface spec is enabled with
* MSI-X multi-message interrupt mode and there are slow-path events in
* the HBA. However, when the device is enabled with either MSI or Pin-IRQ
* interrupt mode, this function is called as part of the device-level
* interrupt handler. When the PCI slot is in error recovery or the HBA
* is undergoing initialization, the interrupt handler will not process
* the interrupt. The link attention and ELS ring attention events are
* handled by the worker thread. The interrupt handler signals the worker
* thread and returns for these events. This function is called without
* any lock held. It gets the hbalock to access and update SLI data
* structures.
*
* This function returns IRQ_HANDLED when interrupt is handled else it
* returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli_sp_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
uint32_t ha_copy;
uint32_t work_ha_copy;
unsigned long status;
unsigned long iflag;
uint32_t control;
MAILBOX_t *mbox, *pmbox;
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *mp;
LPFC_MBOXQ_t *pmb;
int rc;
/*
* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
*/
phba = (struct lpfc_hba *)dev_id;
if (unlikely(!phba))
return IRQ_NONE;
/*
* Stuff needs to be attented to when this function is invoked as an
* individual interrupt handler in MSI-X multi-message interrupt mode
*/
if (phba->intr_type == MSIX) {
/* Check device state for handling interrupt */
if (lpfc_intr_state_check(phba))
return IRQ_NONE;
/* Need to read HA REG for slow-path events */
spin_lock_irqsave(&phba->hbalock, iflag);
ha_copy = readl(phba->HAregaddr);
/* If somebody is waiting to handle an eratt don't process it
* here. The brdkill function will do this.
*/
if (phba->link_flag & LS_IGNORE_ERATT)
ha_copy &= ~HA_ERATT;
/* Check the need for handling ERATT in interrupt handler */
if (ha_copy & HA_ERATT) {
if (phba->hba_flag & HBA_ERATT_HANDLED)
/* ERATT polling has handled ERATT */
ha_copy &= ~HA_ERATT;
else
/* Indicate interrupt handler handles ERATT */
phba->hba_flag |= HBA_ERATT_HANDLED;
}
/*
* If there is deferred error attention, do not check for any
* interrupt.
*/
if (unlikely(phba->hba_flag & DEFER_ERATT)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
/* Clear up only attention source related to slow-path */
writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
spin_unlock_irqrestore(&phba->hbalock, iflag);
} else
ha_copy = phba->ha_copy;
work_ha_copy = ha_copy & phba->work_ha_mask;
if (work_ha_copy) {
if (work_ha_copy & HA_LATT) {
if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
/*
* Turn off Link Attention interrupts
* until CLEAR_LA done
*/
spin_lock_irqsave(&phba->hbalock, iflag);
phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
control = readl(phba->HCregaddr);
control &= ~HC_LAINT_ENA;
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
else
work_ha_copy &= ~HA_LATT;
}
if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
/*
* Turn off Slow Rings interrupts, LPFC_ELS_RING is
* the only slow ring.
*/
status = (work_ha_copy &
(HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING);
if (status & HA_RXMASK) {
spin_lock_irqsave(&phba->hbalock, iflag);
control = readl(phba->HCregaddr);
lpfc_debugfs_slow_ring_trc(phba,
"ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
control, status,
(uint32_t)phba->sli.slistat.sli_intr);
if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
lpfc_debugfs_slow_ring_trc(phba,
"ISR Disable ring:"
"pwork:x%x hawork:x%x wait:x%x",
phba->work_ha, work_ha_copy,
(uint32_t)((unsigned long)
&phba->work_waitq));
control &=
~(HC_R0INT_ENA << LPFC_ELS_RING);
writel(control, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
}
else {
lpfc_debugfs_slow_ring_trc(phba,
"ISR slow ring: pwork:"
"x%x hawork:x%x wait:x%x",
phba->work_ha, work_ha_copy,
(uint32_t)((unsigned long)
&phba->work_waitq));
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
}
}
spin_lock_irqsave(&phba->hbalock, iflag);
if (work_ha_copy & HA_ERATT) {
lpfc_sli_read_hs(phba);
/*
* Check if there is a deferred error condition
* is active
*/
if ((HS_FFER1 & phba->work_hs) &&
((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
HS_FFER6 | HS_FFER7) & phba->work_hs)) {
phba->hba_flag |= DEFER_ERATT;
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr);
}
}
if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
pmb = phba->sli.mbox_active;
pmbox = &pmb->u.mb;
mbox = phba->mbox;
vport = pmb->vport;
/* First check out the status word */
lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
if (pmbox->mbxOwner != OWN_HOST) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
/*
* Stray Mailbox Interrupt, mbxCommand <cmd>
* mbxStatus <status>
*/
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
LOG_SLI,
"(%d):0304 Stray Mailbox "
"Interrupt mbxCommand x%x "
"mbxStatus x%x\n",
(vport ? vport->vpi : 0),
pmbox->mbxCommand,
pmbox->mbxStatus);
/* clear mailbox attention bit */
work_ha_copy &= ~HA_MBATT;
} else {
phba->sli.mbox_active = NULL;
spin_unlock_irqrestore(&phba->hbalock, iflag);
phba->last_completion_time = jiffies;
del_timer(&phba->sli.mbox_tmo);
if (pmb->mbox_cmpl) {
lpfc_sli_pcimem_bcopy(mbox, pmbox,
MAILBOX_CMD_SIZE);
}
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
lpfc_debugfs_disc_trc(vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX dflt rpi: : "
"status:x%x rpi:x%x",
(uint32_t)pmbox->mbxStatus,
pmbox->un.varWords[0], 0);
if (!pmbox->mbxStatus) {
mp = (struct lpfc_dmabuf *)
(pmb->context1);
ndlp = (struct lpfc_nodelist *)
pmb->context2;
/* Reg_LOGIN of dflt RPI was
* successful. new lets get
* rid of the RPI using the
* same mbox buffer.
*/
lpfc_unreg_login(phba,
vport->vpi,
pmbox->un.varWords[0],
pmb);
pmb->mbox_cmpl =
lpfc_mbx_cmpl_dflt_rpi;
pmb->context1 = mp;
pmb->context2 = ndlp;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba,
pmb,
MBX_NOWAIT);
if (rc != MBX_BUSY)
lpfc_printf_log(phba,
KERN_ERR,
LOG_MBOX | LOG_SLI,
"0350 rc should have"
"been MBX_BUSY");
if (rc != MBX_NOT_FINISHED)
goto send_current_mbox;
}
}
spin_lock_irqsave(
&phba->pport->work_port_lock,
iflag);
phba->pport->work_port_events &=
~WORKER_MBOX_TMO;
spin_unlock_irqrestore(
&phba->pport->work_port_lock,
iflag);
lpfc_mbox_cmpl_put(phba, pmb);
}
} else
spin_unlock_irqrestore(&phba->hbalock, iflag);
if ((work_ha_copy & HA_MBATT) &&
(phba->sli.mbox_active == NULL)) {
send_current_mbox:
/* Process next mailbox command if there is one */
do {
rc = lpfc_sli_issue_mbox(phba, NULL,
MBX_NOWAIT);
} while (rc == MBX_NOT_FINISHED);
if (rc != MBX_SUCCESS)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
LOG_SLI, "0349 rc should be "
"MBX_SUCCESS");
}
spin_lock_irqsave(&phba->hbalock, iflag);
phba->work_ha |= work_ha_copy;
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_worker_wake_up(phba);
}
return IRQ_HANDLED;
} /* lpfc_sli_sp_intr_handler */
/**
* lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This function is directly called from the PCI layer as an interrupt
* service routine when device with SLI-3 interface spec is enabled with
* MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
* ring event in the HBA. However, when the device is enabled with either
* MSI or Pin-IRQ interrupt mode, this function is called as part of the
* device-level interrupt handler. When the PCI slot is in error recovery
* or the HBA is undergoing initialization, the interrupt handler will not
* process the interrupt. The SCSI FCP fast-path ring event are handled in
* the intrrupt context. This function is called without any lock held.
* It gets the hbalock to access and update SLI data structures.
*
* This function returns IRQ_HANDLED when interrupt is handled else it
* returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli_fp_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
uint32_t ha_copy;
unsigned long status;
unsigned long iflag;
/* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
*/
phba = (struct lpfc_hba *) dev_id;
if (unlikely(!phba))
return IRQ_NONE;
/*
* Stuff needs to be attented to when this function is invoked as an
* individual interrupt handler in MSI-X multi-message interrupt mode
*/
if (phba->intr_type == MSIX) {
/* Check device state for handling interrupt */
if (lpfc_intr_state_check(phba))
return IRQ_NONE;
/* Need to read HA REG for FCP ring and other ring events */
ha_copy = readl(phba->HAregaddr);
/* Clear up only attention source related to fast-path */
spin_lock_irqsave(&phba->hbalock, iflag);
/*
* If there is deferred error attention, do not check for
* any interrupt.
*/
if (unlikely(phba->hba_flag & DEFER_ERATT)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
spin_unlock_irqrestore(&phba->hbalock, iflag);
} else
ha_copy = phba->ha_copy;
/*
* Process all events on FCP ring. Take the optimized path for FCP IO.
*/
ha_copy &= ~(phba->work_ha_mask);
status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
status >>= (4*LPFC_FCP_RING);
if (status & HA_RXMASK)
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_FCP_RING],
status);
if (phba->cfg_multi_ring_support == 2) {
/*
* Process all events on extra ring. Take the optimized path
* for extra ring IO.
*/
status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
status >>= (4*LPFC_EXTRA_RING);
if (status & HA_RXMASK) {
lpfc_sli_handle_fast_ring_event(phba,
&phba->sli.ring[LPFC_EXTRA_RING],
status);
}
}
return IRQ_HANDLED;
} /* lpfc_sli_fp_intr_handler */
/**
* lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This function is the HBA device-level interrupt handler to device with
* SLI-3 interface spec, called from the PCI layer when either MSI or
* Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
* requires driver attention. This function invokes the slow-path interrupt
* attention handling function and fast-path interrupt attention handling
* function in turn to process the relevant HBA attention events. This
* function is called without any lock held. It gets the hbalock to access
* and update SLI data structures.
*
* This function returns IRQ_HANDLED when interrupt is handled, else it
* returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
irqreturn_t sp_irq_rc, fp_irq_rc;
unsigned long status1, status2;
/*
* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
*/
phba = (struct lpfc_hba *) dev_id;
if (unlikely(!phba))
return IRQ_NONE;
/* Check device state for handling interrupt */
if (lpfc_intr_state_check(phba))
return IRQ_NONE;
spin_lock(&phba->hbalock);
phba->ha_copy = readl(phba->HAregaddr);
if (unlikely(!phba->ha_copy)) {
spin_unlock(&phba->hbalock);
return IRQ_NONE;
} else if (phba->ha_copy & HA_ERATT) {
if (phba->hba_flag & HBA_ERATT_HANDLED)
/* ERATT polling has handled ERATT */
phba->ha_copy &= ~HA_ERATT;
else
/* Indicate interrupt handler handles ERATT */
phba->hba_flag |= HBA_ERATT_HANDLED;
}
/*
* If there is deferred error attention, do not check for any interrupt.
*/
if (unlikely(phba->hba_flag & DEFER_ERATT)) {
spin_unlock_irq(&phba->hbalock);
return IRQ_NONE;
}
/* Clear attention sources except link and error attentions */
writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
readl(phba->HAregaddr); /* flush */
spin_unlock(&phba->hbalock);
/*
* Invokes slow-path host attention interrupt handling as appropriate.
*/
/* status of events with mailbox and link attention */
status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
/* status of events with ELS ring */
status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
status2 >>= (4*LPFC_ELS_RING);
if (status1 || (status2 & HA_RXMASK))
sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
else
sp_irq_rc = IRQ_NONE;
/*
* Invoke fast-path host attention interrupt handling as appropriate.
*/
/* status of events with FCP ring */
status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
status1 >>= (4*LPFC_FCP_RING);
/* status of events with extra ring */
if (phba->cfg_multi_ring_support == 2) {
status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
status2 >>= (4*LPFC_EXTRA_RING);
} else
status2 = 0;
if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
else
fp_irq_rc = IRQ_NONE;
/* Return device-level interrupt handling status */
return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
} /* lpfc_sli_intr_handler */
/**
* lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked by the worker thread to process all the pending
* SLI4 FCP abort XRI events.
**/
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
{
struct lpfc_cq_event *cq_event;
/* First, declare the fcp xri abort event has been handled */
spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
spin_unlock_irq(&phba->hbalock);
/* Now, handle all the fcp xri abort events */
while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
/* Get the first event from the head of the event queue */
spin_lock_irq(&phba->hbalock);
list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
cq_event, struct lpfc_cq_event, list);
spin_unlock_irq(&phba->hbalock);
/* Notify aborted XRI for FCP work queue */
lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
/* Free the event processed back to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event);
}
}
/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked by the worker thread to process all the pending
* SLI4 els abort xri events.
**/
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
{
struct lpfc_cq_event *cq_event;
/* First, declare the els xri abort event has been handled */
spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
spin_unlock_irq(&phba->hbalock);
/* Now, handle all the els xri abort events */
while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
/* Get the first event from the head of the event queue */
spin_lock_irq(&phba->hbalock);
list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
cq_event, struct lpfc_cq_event, list);
spin_unlock_irq(&phba->hbalock);
/* Notify aborted XRI for ELS work queue */
lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
/* Free the event processed back to the free pool */
lpfc_sli4_cq_event_release(phba, cq_event);
}
}
static void
lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
struct lpfc_iocbq *pIocbOut,
struct lpfc_wcqe_complete *wcqe)
{
size_t offset = offsetof(struct lpfc_iocbq, iocb);
memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
sizeof(struct lpfc_iocbq) - offset);
memset(&pIocbIn->sli4_info, 0,
sizeof(struct lpfc_sli4_rspiocb_info));
/* Map WCQE parameters into irspiocb parameters */
pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
if (pIocbOut->iocb_flag & LPFC_IO_FCP)
if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
pIocbIn->iocb.un.fcpi.fcpi_parm =
pIocbOut->iocb.un.fcpi.fcpi_parm -
wcqe->total_data_placed;
else
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
else
pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
/* Load in additional WCQE parameters */
pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
pIocbIn->sli4_info.bfield = 0;
if (bf_get(lpfc_wcqe_c_xb, wcqe))
pIocbIn->sli4_info.bfield |= LPFC_XB;
if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
pIocbIn->sli4_info.bfield |= LPFC_PV;
pIocbIn->sli4_info.priority =
bf_get(lpfc_wcqe_c_priority, wcqe);
}
}
/**
* lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
* @phba: Pointer to HBA context object.
* @cqe: Pointer to mailbox completion queue entry.
*
* This routine process a mailbox completion queue entry with asynchrous
* event.
*
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
{
struct lpfc_cq_event *cq_event;
unsigned long iflags;
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0392 Async Event: word0:x%x, word1:x%x, "
"word2:x%x, word3:x%x\n", mcqe->word0,
mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
/* Allocate a new internal CQ_EVENT entry */
cq_event = lpfc_sli4_cq_event_alloc(phba);
if (!cq_event) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0394 Failed to allocate CQ_EVENT entry\n");
return false;
}
/* Move the CQE into an asynchronous event entry */
memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
/* Set the async event flag */
phba->hba_flag |= ASYNC_EVENT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return true;
}
/**
* lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
* @phba: Pointer to HBA context object.
* @cqe: Pointer to mailbox completion queue entry.
*
* This routine process a mailbox completion queue entry with mailbox
* completion event.
*
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
{
uint32_t mcqe_status;
MAILBOX_t *mbox, *pmbox;
struct lpfc_mqe *mqe;
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
struct lpfc_dmabuf *mp;
unsigned long iflags;
LPFC_MBOXQ_t *pmb;
bool workposted = false;
int rc;
/* If not a mailbox complete MCQE, out by checking mailbox consume */
if (!bf_get(lpfc_trailer_completed, mcqe))
goto out_no_mqe_complete;
/* Get the reference to the active mbox command */
spin_lock_irqsave(&phba->hbalock, iflags);
pmb = phba->sli.mbox_active;
if (unlikely(!pmb)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"1832 No pending MBOX command to handle\n");
spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out_no_mqe_complete;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
mqe = &pmb->u.mqe;
pmbox = (MAILBOX_t *)&pmb->u.mqe;
mbox = phba->mbox;
vport = pmb->vport;
/* Reset heartbeat timer */
phba->last_completion_time = jiffies;
del_timer(&phba->sli.mbox_tmo);
/* Move mbox data to caller's mailbox region, do endian swapping */
if (pmb->mbox_cmpl && mbox)
lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
/* Set the mailbox status with SLI4 range 0x4000 */
mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
if (mcqe_status != MB_CQE_STATUS_SUCCESS)
bf_set(lpfc_mqe_status, mqe,
(LPFC_MBX_ERROR_RANGE | mcqe_status));
if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
"MBOX dflt rpi: status:x%x rpi:x%x",
mcqe_status,
pmbox->un.varWords[0], 0);
if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
mp = (struct lpfc_dmabuf *)(pmb->context1);
ndlp = (struct lpfc_nodelist *)pmb->context2;
/* Reg_LOGIN of dflt RPI was successful. Now lets get
* RID of the PPI using the same mbox buffer.
*/
lpfc_unreg_login(phba, vport->vpi,
pmbox->un.varWords[0], pmb);
pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
pmb->context1 = mp;
pmb->context2 = ndlp;
pmb->vport = vport;
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
if (rc != MBX_BUSY)
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
LOG_SLI, "0385 rc should "
"have been MBX_BUSY\n");
if (rc != MBX_NOT_FINISHED)
goto send_current_mbox;
}
}
spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
/* There is mailbox completion work to do */
spin_lock_irqsave(&phba->hbalock, iflags);
__lpfc_mbox_cmpl_put(phba, pmb);
phba->work_ha |= HA_MBATT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
send_current_mbox:
spin_lock_irqsave(&phba->hbalock, iflags);
/* Release the mailbox command posting token */
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
out_no_mqe_complete:
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
return workposted;
}
/**
* lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
* @phba: Pointer to HBA context object.
* @cqe: Pointer to mailbox completion queue entry.
*
* This routine process a mailbox completion queue entry, it invokes the
* proper mailbox complete handling or asynchrous event handling routine
* according to the MCQE's async bit.
*
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
{
struct lpfc_mcqe mcqe;
bool workposted;
/* Copy the mailbox MCQE and convert endian order as needed */
lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
/* Invoke the proper event handling routine */
if (!bf_get(lpfc_trailer_async, &mcqe))
workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
else
workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
return workposted;
}
/**
* lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
* @phba: Pointer to HBA context object.
* @wcqe: Pointer to work-queue completion queue entry.
*
* This routine handles an ELS work-queue completion event.
*
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
struct lpfc_iocbq *cmdiocbq;
struct lpfc_iocbq *irspiocbq;
unsigned long iflags;
bool workposted = false;
spin_lock_irqsave(&phba->hbalock, iflags);
pring->stats.iocb_event++;
/* Look up the ELS command IOCB and create pseudo response IOCB */
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (unlikely(!cmdiocbq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0386 ELS complete with no corresponding "
"cmdiocb: iotag (%d)\n",
bf_get(lpfc_wcqe_c_request_tag, wcqe));
return workposted;
}
/* Fake the irspiocbq and copy necessary response information */
irspiocbq = lpfc_sli_get_iocbq(phba);
if (!irspiocbq) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 Failed to allocate an iocbq\n");
return workposted;
}
lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
/* Add the irspiocb to the response IOCB work list */
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
/* Indicate ELS ring attention */
phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
return workposted;
}
/**
* lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
* @phba: Pointer to HBA context object.
* @wcqe: Pointer to work-queue completion queue entry.
*
* This routine handles slow-path WQ entry comsumed event by invoking the
* proper WQ release routine to the slow-path WQ.
**/
static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
struct lpfc_wcqe_release *wcqe)
{
/* Check for the slow-path ELS work queue */
if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
bf_get(lpfc_wcqe_r_wqe_index, wcqe));
else
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"2579 Slow-path wqe consume event carries "
"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
bf_get(lpfc_wcqe_r_wqe_index, wcqe),
phba->sli4_hba.els_wq->queue_id);
}
/**
* lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
* @phba: Pointer to HBA context object.
* @cq: Pointer to a WQ completion queue.
* @wcqe: Pointer to work-queue completion queue entry.
*
* This routine handles an XRI abort event.
*
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
struct lpfc_queue *cq,
struct sli4_wcqe_xri_aborted *wcqe)
{
bool workposted = false;
struct lpfc_cq_event *cq_event;
unsigned long iflags;
/* Allocate a new internal CQ_EVENT entry */
cq_event = lpfc_sli4_cq_event_alloc(phba);
if (!cq_event) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0602 Failed to allocate CQ_EVENT entry\n");
return false;
}
/* Move the CQE into the proper xri abort event list */
memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
switch (cq->subtype) {
case LPFC_FCP:
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
/* Set the fcp xri abort event flag */
phba->hba_flag |= FCP_XRI_ABORT_EVENT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
case LPFC_ELS:
spin_lock_irqsave(&phba->hbalock, iflags);
list_add_tail(&cq_event->list,
&phba->sli4_hba.sp_els_xri_aborted_work_queue);
/* Set the els xri abort event flag */
phba->hba_flag |= ELS_XRI_ABORT_EVENT;
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0603 Invalid work queue CQE subtype (x%x)\n",
cq->subtype);
workposted = false;
break;
}
return workposted;
}
/**
* lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
* @phba: Pointer to HBA context object.
* @cq: Pointer to the completion queue.
* @wcqe: Pointer to a completion queue entry.
*
* This routine process a slow-path work-queue completion queue entry.
*
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_cqe *cqe)
{
struct lpfc_wcqe_complete wcqe;
bool workposted = false;
/* Copy the work queue CQE and convert endian order if needed */
lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
/* Check and process for different type of WCQE and dispatch */
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
case CQE_CODE_COMPL_WQE:
/* Process the WQ complete event */
workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
(struct lpfc_wcqe_complete *)&wcqe);
break;
case CQE_CODE_RELEASE_WQE:
/* Process the WQ release event */
lpfc_sli4_sp_handle_rel_wcqe(phba,
(struct lpfc_wcqe_release *)&wcqe);
break;
case CQE_CODE_XRI_ABORTED:
/* Process the WQ XRI abort event */
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
(struct sli4_wcqe_xri_aborted *)&wcqe);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0388 Not a valid WCQE code: x%x\n",
bf_get(lpfc_wcqe_c_code, &wcqe));
break;
}
return workposted;
}
/**
* lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
* @phba: Pointer to HBA context object.
* @rcqe: Pointer to receive-queue completion queue entry.
*
* This routine process a receive-queue completion queue entry.
*
* Return: true if work posted to worker thread, otherwise false.
**/
static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
{
struct lpfc_rcqe rcqe;
bool workposted = false;
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct hbq_dmabuf *dma_buf;
uint32_t status;
unsigned long iflags;
/* Copy the receive queue CQE and convert endian order if needed */
lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
lpfc_sli4_rq_release(hrq, drq);
if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
goto out;
if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
goto out;
status = bf_get(lpfc_rcqe_status, &rcqe);
switch (status) {
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2537 Receive Frame Truncated!!\n");
case FC_STATUS_RQ_SUCCESS:
spin_lock_irqsave(&phba->hbalock, iflags);
dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
if (!dma_buf) {
spin_unlock_irqrestore(&phba->hbalock, iflags);
goto out;
}
memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
/* save off the frame for the word thread to process */
list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
/* Frame received */
phba->hba_flag |= HBA_RECEIVE_BUFFER;
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
case FC_STATUS_INSUFF_BUF_NEED_BUF:
case FC_STATUS_INSUFF_BUF_FRM_DISC:
/* Post more buffers if possible */
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
}
out:
return workposted;
}
/**
* lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
* @phba: Pointer to HBA context object.
* @eqe: Pointer to fast-path event queue entry.
*
* This routine process a event queue entry from the slow-path event queue.
* It will check the MajorCode and MinorCode to determine this is for a
* completion event on a completion queue, if not, an error shall be logged
* and just return. Otherwise, it will get to the corresponding completion
* queue and process all the entries on that completion queue, rearm the
* completion queue, and then return.
*
**/
static void
lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
{
struct lpfc_queue *cq = NULL, *childq, *speq;
struct lpfc_cqe *cqe;
bool workposted = false;
int ecount = 0;
uint16_t cqid;
if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
bf_get(lpfc_eqe_minor_code, eqe) != 0) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0359 Not a valid slow-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
bf_get(lpfc_eqe_major_code, eqe),
bf_get(lpfc_eqe_minor_code, eqe));
return;
}
/* Get the reference to the corresponding CQ */
cqid = bf_get(lpfc_eqe_resource_id, eqe);
/* Search for completion queue pointer matching this cqid */
speq = phba->sli4_hba.sp_eq;
list_for_each_entry(childq, &speq->child_list, list) {
if (childq->queue_id == cqid) {
cq = childq;
break;
}
}
if (unlikely(!cq)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0365 Slow-path CQ identifier (%d) does "
"not exist\n", cqid);
return;
}
/* Process all the entries to the CQ */
switch (cq->type) {
case LPFC_MCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
break;
case LPFC_RCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0370 Invalid completion queue type (%d)\n",
cq->type);
return;
}
/* Catch the no cq entry condition, log an error */
if (unlikely(ecount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0371 No entry from the CQ: identifier "
"(x%x), type (%d)\n", cq->queue_id, cq->type);
/* In any case, flash and re-arm the RCQ */
lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
}
/**
* lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
* @eqe: Pointer to fast-path completion queue entry.
*
* This routine process a fast-path work queue completion entry from fast-path
* event queue for FCP command response completion.
**/
static void
lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
struct lpfc_wcqe_complete *wcqe)
{
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
struct lpfc_iocbq *cmdiocbq;
struct lpfc_iocbq irspiocbq;
unsigned long iflags;
spin_lock_irqsave(&phba->hbalock, iflags);
pring->stats.iocb_event++;
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Check for response status */
if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
/* If resource errors reported from HBA, reduce queue
* depth of the SCSI device.
*/
if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
IOSTAT_LOCAL_REJECT) &&
(wcqe->parameter == IOERR_NO_RESOURCES)) {
phba->lpfc_rampdown_queue_depth(phba);
}
/* Log the error status */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0373 FCP complete error: status=x%x, "
"hw_status=x%x, total_data_specified=%d, "
"parameter=x%x, word3=x%x\n",
bf_get(lpfc_wcqe_c_status, wcqe),
bf_get(lpfc_wcqe_c_hw_status, wcqe),
wcqe->total_data_placed, wcqe->parameter,
wcqe->word3);
}
/* Look up the FCP command IOCB and create pseudo response IOCB */
spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (unlikely(!cmdiocbq)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0374 FCP complete with no corresponding "
"cmdiocb: iotag (%d)\n",
bf_get(lpfc_wcqe_c_request_tag, wcqe));
return;
}
if (unlikely(!cmdiocbq->iocb_cmpl)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0375 FCP cmdiocb not callback function "
"iotag: (%d)\n",
bf_get(lpfc_wcqe_c_request_tag, wcqe));
return;
}
/* Fake the irspiocb and copy necessary response information */
lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
/* Pass the cmd_iocb and the rsp state to the upper layer */
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
}
/**
* lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
* @phba: Pointer to HBA context object.
* @cq: Pointer to completion queue.
* @wcqe: Pointer to work-queue completion queue entry.
*
* This routine handles an fast-path WQ entry comsumed event by invoking the
* proper WQ release routine to the slow-path WQ.
**/
static void
lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_wcqe_release *wcqe)
{
struct lpfc_queue *childwq;
bool wqid_matched = false;
uint16_t fcp_wqid;
/* Check for fast-path FCP work queue release */
fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
list_for_each_entry(childwq, &cq->child_list, list) {
if (childwq->queue_id == fcp_wqid) {
lpfc_sli4_wq_release(childwq,
bf_get(lpfc_wcqe_r_wqe_index, wcqe));
wqid_matched = true;
break;
}
}
/* Report warning log message if no match found */
if (wqid_matched != true)
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"2580 Fast-path wqe consume event carries "
"miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
}
/**
* lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
* @cq: Pointer to the completion queue.
* @eqe: Pointer to fast-path completion queue entry.
*
* This routine process a fast-path work queue completion entry from fast-path
* event queue for FCP command response completion.
**/
static int
lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_cqe *cqe)
{
struct lpfc_wcqe_release wcqe;
bool workposted = false;
/* Copy the work queue CQE and convert endian order if needed */
lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
/* Check and process for different type of WCQE and dispatch */
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
case CQE_CODE_COMPL_WQE:
/* Process the WQ complete event */
lpfc_sli4_fp_handle_fcp_wcqe(phba,
(struct lpfc_wcqe_complete *)&wcqe);
break;
case CQE_CODE_RELEASE_WQE:
/* Process the WQ release event */
lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
(struct lpfc_wcqe_release *)&wcqe);
break;
case CQE_CODE_XRI_ABORTED:
/* Process the WQ XRI abort event */
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
(struct sli4_wcqe_xri_aborted *)&wcqe);
break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0144 Not a valid WCQE code: x%x\n",
bf_get(lpfc_wcqe_c_code, &wcqe));
break;
}
return workposted;
}
/**
* lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
* @phba: Pointer to HBA context object.
* @eqe: Pointer to fast-path event queue entry.
*
* This routine process a event queue entry from the fast-path event queue.
* It will check the MajorCode and MinorCode to determine this is for a
* completion event on a completion queue, if not, an error shall be logged
* and just return. Otherwise, it will get to the corresponding completion
* queue and process all the entries on the completion queue, rearm the
* completion queue, and then return.
**/
static void
lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint32_t fcp_cqidx)
{
struct lpfc_queue *cq;
struct lpfc_cqe *cqe;
bool workposted = false;
uint16_t cqid;
int ecount = 0;
if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0366 Not a valid fast-path completion "
"event: majorcode=x%x, minorcode=x%x\n",
bf_get(lpfc_eqe_major_code, eqe),
bf_get(lpfc_eqe_minor_code, eqe));
return;
}
cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
if (unlikely(!cq)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0367 Fast-path completion queue does not "
"exist\n");
return;
}
/* Get the reference to the corresponding CQ */
cqid = bf_get(lpfc_eqe_resource_id, eqe);
if (unlikely(cqid != cq->queue_id)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0368 Miss-matched fast-path completion "
"queue identifier: eqcqid=%d, fcpcqid=%d\n",
cqid, cq->queue_id);
return;
}
/* Process all the entries to the CQ */
while ((cqe = lpfc_sli4_cq_get(cq))) {
workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
/* Catch the no cq entry condition */
if (unlikely(ecount == 0))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0369 No entry from fast-path completion "
"queue fcpcqid=%d\n", cq->queue_id);
/* In any case, flash and re-arm the CQ */
lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
/* wake up worker thread if there are works to be done */
if (workposted)
lpfc_worker_wake_up(phba);
}
static void
lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
{
struct lpfc_eqe *eqe;
/* walk all the EQ entries and drop on the floor */
while ((eqe = lpfc_sli4_eq_get(eq)))
;
/* Clear and re-arm the EQ */
lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
}
/**
* lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This function is directly called from the PCI layer as an interrupt
* service routine when device with SLI-4 interface spec is enabled with
* MSI-X multi-message interrupt mode and there are slow-path events in
* the HBA. However, when the device is enabled with either MSI or Pin-IRQ
* interrupt mode, this function is called as part of the device-level
* interrupt handler. When the PCI slot is in error recovery or the HBA is
* undergoing initialization, the interrupt handler will not process the
* interrupt. The link attention and ELS ring attention events are handled
* by the worker thread. The interrupt handler signals the worker thread
* and returns for these events. This function is called without any lock
* held. It gets the hbalock to access and update SLI data structures.
*
* This function returns IRQ_HANDLED when interrupt is handled else it
* returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
struct lpfc_queue *speq;
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
/*
* Get the driver's phba structure from the dev_id
*/
phba = (struct lpfc_hba *)dev_id;
if (unlikely(!phba))
return IRQ_NONE;
/* Get to the EQ struct associated with this vector */
speq = phba->sli4_hba.sp_eq;
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
/* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */
lpfc_sli4_eq_flush(phba, speq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
/*
* Process all the event on FCP slow-path EQ
*/
while ((eqe = lpfc_sli4_eq_get(speq))) {
lpfc_sli4_sp_handle_eqe(phba, eqe);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
}
/* Always clear and re-arm the slow-path EQ */
lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
/* Catch the no cq entry condition */
if (unlikely(ecount == 0)) {
if (phba->intr_type == MSIX)
/* MSI-X treated interrupt served as no EQ share INT */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0357 MSI-X interrupt with no EQE\n");
else
/* Non MSI-X treated on interrupt as EQ share INT */
return IRQ_NONE;
}
return IRQ_HANDLED;
} /* lpfc_sli4_sp_intr_handler */
/**
* lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This function is directly called from the PCI layer as an interrupt
* service routine when device with SLI-4 interface spec is enabled with
* MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
* ring event in the HBA. However, when the device is enabled with either
* MSI or Pin-IRQ interrupt mode, this function is called as part of the
* device-level interrupt handler. When the PCI slot is in error recovery
* or the HBA is undergoing initialization, the interrupt handler will not
* process the interrupt. The SCSI FCP fast-path ring event are handled in
* the intrrupt context. This function is called without any lock held.
* It gets the hbalock to access and update SLI data structures. Note that,
* the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
* equal to that of FCP CQ index.
*
* This function returns IRQ_HANDLED when interrupt is handled else it
* returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
struct lpfc_queue *fpeq;
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
uint32_t fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
phba = fcp_eq_hdl->phba;
fcp_eqidx = fcp_eq_hdl->idx;
if (unlikely(!phba))
return IRQ_NONE;
/* Get to the EQ struct associated with this vector */
fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
/* Check again for link_state with lock held */
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->link_state < LPFC_LINK_DOWN)
/* Flush, clear interrupt, and rearm the EQ */
lpfc_sli4_eq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return IRQ_NONE;
}
/*
* Process all the event on FCP fast-path EQ
*/
while ((eqe = lpfc_sli4_eq_get(fpeq))) {
lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
if (!(++ecount % LPFC_GET_QE_REL_INT))
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
}
/* Always clear and re-arm the fast-path EQ */
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
if (unlikely(ecount == 0)) {
if (phba->intr_type == MSIX)
/* MSI-X treated interrupt served as no EQ share INT */
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0358 MSI-X interrupt with no EQE\n");
else
/* Non MSI-X treated on interrupt as EQ share INT */
return IRQ_NONE;
}
return IRQ_HANDLED;
} /* lpfc_sli4_fp_intr_handler */
/**
* lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
* @irq: Interrupt number.
* @dev_id: The device context pointer.
*
* This function is the device-level interrupt handler to device with SLI-4
* interface spec, called from the PCI layer when either MSI or Pin-IRQ
* interrupt mode is enabled and there is an event in the HBA which requires
* driver attention. This function invokes the slow-path interrupt attention
* handling function and fast-path interrupt attention handling function in
* turn to process the relevant HBA attention events. This function is called
* without any lock held. It gets the hbalock to access and update SLI data
* structures.
*
* This function returns IRQ_HANDLED when interrupt is handled, else it
* returns IRQ_NONE.
**/
irqreturn_t
lpfc_sli4_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
irqreturn_t sp_irq_rc, fp_irq_rc;
bool fp_handled = false;
uint32_t fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
phba = (struct lpfc_hba *)dev_id;
if (unlikely(!phba))
return IRQ_NONE;
/*
* Invokes slow-path host attention interrupt handling as appropriate.
*/
sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
/*
* Invoke fast-path host attention interrupt handling as appropriate.
*/
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
&phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
if (fp_irq_rc == IRQ_HANDLED)
fp_handled |= true;
}
return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
} /* lpfc_sli4_intr_handler */
/**
* lpfc_sli4_queue_free - free a queue structure and associated memory
* @queue: The queue structure to free.
*
* This function frees a queue structure and the DMAable memeory used for
* the host resident queue. This function must be called after destroying the
* queue on the HBA.
**/
void
lpfc_sli4_queue_free(struct lpfc_queue *queue)
{
struct lpfc_dmabuf *dmabuf;
if (!queue)
return;
while (!list_empty(&queue->page_list)) {
list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
list);
dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
kfree(queue);
return;
}
/**
* lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
* @phba: The HBA that this queue is being created on.
* @entry_size: The size of each queue entry for this queue.
* @entry count: The number of entries that this queue will handle.
*
* This function allocates a queue structure and the DMAable memory used for
* the host resident queue. This function must be called before creating the
* queue on the HBA.
**/
struct lpfc_queue *
lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
uint32_t entry_count)
{
struct lpfc_queue *queue;
struct lpfc_dmabuf *dmabuf;
int x, total_qe_count;
void *dma_pointer;
queue = kzalloc(sizeof(struct lpfc_queue) +
(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
if (!queue)
return NULL;
queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
INIT_LIST_HEAD(&queue->list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (!dmabuf)
goto out_fail;
dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
PAGE_SIZE, &dmabuf->phys,
GFP_KERNEL);
if (!dmabuf->virt) {
kfree(dmabuf);
goto out_fail;
}
memset(dmabuf->virt, 0, PAGE_SIZE);
dmabuf->buffer_tag = x;
list_add_tail(&dmabuf->list, &queue->page_list);
/* initialize queue's entry array */
dma_pointer = dmabuf->virt;
for (; total_qe_count < entry_count &&
dma_pointer < (PAGE_SIZE + dmabuf->virt);
total_qe_count++, dma_pointer += entry_size) {
queue->qe[total_qe_count].address = dma_pointer;
}
}
queue->entry_size = entry_size;
queue->entry_count = entry_count;
queue->phba = phba;
return queue;
out_fail:
lpfc_sli4_queue_free(queue);
return NULL;
}
/**
* lpfc_eq_create - Create an Event Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @eq: The queue structure to use to create the event queue.
* @imax: The maximum interrupt per second limit.
*
* This function creates an event queue, as detailed in @eq, on a port,
* described by @phba by sending an EQ_CREATE mailbox command to the HBA.
*
* The @phba struct is used to send mailbox command to HBA. The @eq struct
* is used to get the entry count and entry size that are necessary to
* determine the number of pages to allocate and use for this queue. This
* function will send the EQ_CREATE mailbox command to the HBA to setup the
* event queue. This function is asynchronous and will wait for the mailbox
* command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
* memory this function will return ENOMEM. If the queue create mailbox command
* fails this function will return ENXIO.
**/
uint32_t
lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
{
struct lpfc_mbx_eq_create *eq_create;
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
struct lpfc_dmabuf *dmabuf;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint16_t dmult;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_eq_create) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_EQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
eq_create = &mbox->u.mqe.un.eq_create;
bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
eq->page_count);
bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
LPFC_EQE_SIZE);
bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
/* Calculate delay multiper from maximum interrupt per second */
dmult = LPFC_DMULT_CONST/imax - 1;
bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
dmult);
switch (eq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0360 Unsupported EQ count. (%d)\n",
eq->entry_count);
if (eq->entry_count < 256)
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 256:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_256);
break;
case 512:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_512);
break;
case 1024:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_1024);
break;
case 2048:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_2048);
break;
case 4096:
bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
LPFC_EQ_CNT_4096);
break;
}
list_for_each_entry(dmabuf, &eq->page_list, list) {
eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
mbox->vport = phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
mbox->context1 = NULL;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2500 EQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
eq->type = LPFC_EQ;
eq->subtype = LPFC_NONE;
eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
if (eq->queue_id == 0xFFFF)
status = -ENXIO;
eq->host_index = 0;
eq->hba_index = 0;
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
/**
* lpfc_cq_create - Create a Completion Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @cq: The queue structure to use to create the completion queue.
* @eq: The event queue to bind this completion queue to.
*
* This function creates a completion queue, as detailed in @wq, on a port,
* described by @phba by sending a CQ_CREATE mailbox command to the HBA.
*
* The @phba struct is used to send mailbox command to HBA. The @cq struct
* is used to get the entry count and entry size that are necessary to
* determine the number of pages to allocate and use for this queue. The @eq
* is used to indicate which event queue to bind this completion queue to. This
* function will send the CQ_CREATE mailbox command to the HBA to setup the
* completion queue. This function is asynchronous and will wait for the mailbox
* command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
* memory this function will return ENOMEM. If the queue create mailbox command
* fails this function will return ENXIO.
**/
uint32_t
lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
{
struct lpfc_mbx_cq_create *cq_create;
struct lpfc_dmabuf *dmabuf;
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_cq_create) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_CQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
cq_create = &mbox->u.mqe.un.cq_create;
bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
cq->page_count);
bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
switch (cq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count. (%d)\n",
cq->entry_count);
if (cq->entry_count < 256)
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 256:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
LPFC_CQ_CNT_256);
break;
case 512:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
LPFC_CQ_CNT_512);
break;
case 1024:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
LPFC_CQ_CNT_1024);
break;
}
list_for_each_entry(dmabuf, &cq->page_list, list) {
cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2501 CQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
goto out;
}
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
if (cq->queue_id == 0xFFFF) {
status = -ENXIO;
goto out;
}
/* link the cq onto the parent eq child list */
list_add_tail(&cq->list, &eq->child_list);
/* Set up completion queue's type and subtype */
cq->type = type;
cq->subtype = subtype;
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
cq->host_index = 0;
cq->hba_index = 0;
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
/**
* lpfc_mq_create - Create a mailbox Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @mq: The queue structure to use to create the mailbox queue.
*
* This function creates a mailbox queue, as detailed in @mq, on a port,
* described by @phba by sending a MQ_CREATE mailbox command to the HBA.
*
* The @phba struct is used to send mailbox command to HBA. The @cq struct
* is used to get the entry count and entry size that are necessary to
* determine the number of pages to allocate and use for this queue. This
* function will send the MQ_CREATE mailbox command to the HBA to setup the
* mailbox queue. This function is asynchronous and will wait for the mailbox
* command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
* memory this function will return ENOMEM. If the queue create mailbox command
* fails this function will return ENXIO.
**/
uint32_t
lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
struct lpfc_queue *cq, uint32_t subtype)
{
struct lpfc_mbx_mq_create *mq_create;
struct lpfc_dmabuf *dmabuf;
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_mq_create) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_MQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
mq_create = &mbox->u.mqe.un.mq_create;
bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
mq->page_count);
bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
cq->queue_id);
bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
switch (mq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0362 Unsupported MQ count. (%d)\n",
mq->entry_count);
if (mq->entry_count < 16)
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 16:
bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
LPFC_MQ_CNT_16);
break;
case 32:
bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
LPFC_MQ_CNT_32);
break;
case 64:
bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
LPFC_MQ_CNT_64);
break;
case 128:
bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
LPFC_MQ_CNT_128);
break;
}
list_for_each_entry(dmabuf, &mq->page_list, list) {
mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2502 MQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
goto out;
}
mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
if (mq->queue_id == 0xFFFF) {
status = -ENXIO;
goto out;
}
mq->type = LPFC_MQ;
mq->subtype = subtype;
mq->host_index = 0;
mq->hba_index = 0;
/* link the mq onto the parent cq child list */
list_add_tail(&mq->list, &cq->child_list);
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
/**
* lpfc_wq_create - Create a Work Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @wq: The queue structure to use to create the work queue.
* @cq: The completion queue to bind this work queue to.
* @subtype: The subtype of the work queue indicating its functionality.
*
* This function creates a work queue, as detailed in @wq, on a port, described
* by @phba by sending a WQ_CREATE mailbox command to the HBA.
*
* The @phba struct is used to send mailbox command to HBA. The @wq struct
* is used to get the entry count and entry size that are necessary to
* determine the number of pages to allocate and use for this queue. The @cq
* is used to indicate which completion queue to bind this work queue to. This
* function will send the WQ_CREATE mailbox command to the HBA to setup the
* work queue. This function is asynchronous and will wait for the mailbox
* command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
* memory this function will return ENOMEM. If the queue create mailbox command
* fails this function will return ENXIO.
**/
uint32_t
lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
struct lpfc_queue *cq, uint32_t subtype)
{
struct lpfc_mbx_wq_create *wq_create;
struct lpfc_dmabuf *dmabuf;
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_wq_create) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
wq_create = &mbox->u.mqe.un.wq_create;
bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
wq->page_count);
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
list_for_each_entry(dmabuf, &wq->page_list, list) {
wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2503 WQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
goto out;
}
wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
if (wq->queue_id == 0xFFFF) {
status = -ENXIO;
goto out;
}
wq->type = LPFC_WQ;
wq->subtype = subtype;
wq->host_index = 0;
wq->hba_index = 0;
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
/**
* lpfc_rq_create - Create a Receive Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @hrq: The queue structure to use to create the header receive queue.
* @drq: The queue structure to use to create the data receive queue.
* @cq: The completion queue to bind this work queue to.
*
* This function creates a receive buffer queue pair , as detailed in @hrq and
* @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
* to the HBA.
*
* The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
* struct is used to get the entry count that is necessary to determine the
* number of pages to use for this queue. The @cq is used to indicate which
* completion queue to bind received buffers that are posted to these queues to.
* This function will send the RQ_CREATE mailbox command to the HBA to setup the
* receive queue pair. This function is asynchronous and will wait for the
* mailbox command to finish before continuing.
*
* On success this function will return a zero. If unable to allocate enough
* memory this function will return ENOMEM. If the queue create mailbox command
* fails this function will return ENXIO.
**/
uint32_t
lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
{
struct lpfc_mbx_rq_create *rq_create;
struct lpfc_dmabuf *dmabuf;
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
if (hrq->entry_count != drq->entry_count)
return -EINVAL;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_rq_create) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
rq_create = &mbox->u.mqe.un.rq_create;
switch (hrq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2535 Unsupported RQ count. (%d)\n",
hrq->entry_count);
if (hrq->entry_count < 512)
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 512:
bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
LPFC_RQ_RING_SIZE_512);
break;
case 1024:
bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
LPFC_RQ_RING_SIZE_1024);
break;
case 2048:
bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
LPFC_RQ_RING_SIZE_2048);
break;
case 4096:
bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
LPFC_RQ_RING_SIZE_4096);
break;
}
bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
cq->queue_id);
bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
hrq->page_count);
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
LPFC_HDR_BUF_SIZE);
list_for_each_entry(dmabuf, &hrq->page_list, list) {
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2504 RQ_CREATE mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
goto out;
}
hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
if (hrq->queue_id == 0xFFFF) {
status = -ENXIO;
goto out;
}
hrq->type = LPFC_HRQ;
hrq->subtype = subtype;
hrq->host_index = 0;
hrq->hba_index = 0;
/* now create the data queue */
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
length, LPFC_SLI4_MBX_EMBED);
switch (drq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2536 Unsupported RQ count. (%d)\n",
drq->entry_count);
if (drq->entry_count < 512)
return -EINVAL;
/* otherwise default to smallest count (drop through) */
case 512:
bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
LPFC_RQ_RING_SIZE_512);
break;
case 1024:
bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
LPFC_RQ_RING_SIZE_1024);
break;
case 2048:
bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
LPFC_RQ_RING_SIZE_2048);
break;
case 4096:
bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
LPFC_RQ_RING_SIZE_4096);
break;
}
bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
cq->queue_id);
bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
drq->page_count);
bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
LPFC_DATA_BUF_SIZE);
list_for_each_entry(dmabuf, &drq->page_list, list) {
rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
putPaddrLow(dmabuf->phys);
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
status = -ENXIO;
goto out;
}
drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
if (drq->queue_id == 0xFFFF) {
status = -ENXIO;
goto out;
}
drq->type = LPFC_DRQ;
drq->subtype = subtype;
drq->host_index = 0;
drq->hba_index = 0;
/* link the header and data RQs onto the parent cq child list */
list_add_tail(&hrq->list, &cq->child_list);
list_add_tail(&drq->list, &cq->child_list);
out:
mempool_free(mbox, phba->mbox_mem_pool);
return status;
}
/**
* lpfc_eq_destroy - Destroy an event Queue on the HBA
* @eq: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @eq by sending an mailbox
* command, specific to the type of queue, to the HBA.
*
* The @eq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return ENXIO.
**/
uint32_t
lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
{
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
if (!eq)
return -ENODEV;
mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_eq_destroy) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_EQ_DESTROY,
length, LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
eq->queue_id);
mbox->vport = eq->phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *)
&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2505 EQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
/* Remove eq from any list */
list_del_init(&eq->list);
mempool_free(mbox, eq->phba->mbox_mem_pool);
return status;
}
/**
* lpfc_cq_destroy - Destroy a Completion Queue on the HBA
* @cq: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @cq by sending an mailbox
* command, specific to the type of queue, to the HBA.
*
* The @cq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return ENXIO.
**/
uint32_t
lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
{
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
if (!cq)
return -ENODEV;
mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_cq_destroy) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_CQ_DESTROY,
length, LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
cq->queue_id);
mbox->vport = cq->phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *)
&mbox->u.mqe.un.wq_create.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2506 CQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
/* Remove cq from any list */
list_del_init(&cq->list);
mempool_free(mbox, cq->phba->mbox_mem_pool);
return status;
}
/**
* lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
* @qm: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @mq by sending an mailbox
* command, specific to the type of queue, to the HBA.
*
* The @mq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return ENXIO.
**/
uint32_t
lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
{
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
if (!mq)
return -ENODEV;
mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_mq_destroy) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
LPFC_MBOX_OPCODE_MQ_DESTROY,
length, LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
mq->queue_id);
mbox->vport = mq->phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *)
&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2507 MQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
/* Remove mq from any list */
list_del_init(&mq->list);
mempool_free(mbox, mq->phba->mbox_mem_pool);
return status;
}
/**
* lpfc_wq_destroy - Destroy a Work Queue on the HBA
* @wq: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @wq by sending an mailbox
* command, specific to the type of queue, to the HBA.
*
* The @wq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return ENXIO.
**/
uint32_t
lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
{
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
if (!wq)
return -ENODEV;
mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_wq_destroy) -
sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
length, LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
wq->queue_id);
mbox->vport = wq->phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *)
&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2508 WQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
/* Remove wq from any list */
list_del_init(&wq->list);
mempool_free(mbox, wq->phba->mbox_mem_pool);
return status;
}
/**
* lpfc_rq_destroy - Destroy a Receive Queue on the HBA
* @rq: The queue structure associated with the queue to destroy.
*
* This function destroys a queue, as detailed in @rq by sending an mailbox
* command, specific to the type of queue, to the HBA.
*
* The @rq struct is used to get the queue ID of the queue to destroy.
*
* On success this function will return a zero. If the queue destroy mailbox
* command fails this function will return ENXIO.
**/
uint32_t
lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
struct lpfc_queue *drq)
{
LPFC_MBOXQ_t *mbox;
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
if (!hrq || !drq)
return -ENODEV;
mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_rq_destroy) -
sizeof(struct mbox_header));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
length, LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
hrq->queue_id);
mbox->vport = hrq->phba->pport;
mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *)
&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2509 RQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, hrq->phba->mbox_mem_pool);
return -ENXIO;
}
bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
drq->queue_id);
rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *)
&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2510 RQ_DESTROY mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
status = -ENXIO;
}
list_del_init(&hrq->list);
list_del_init(&drq->list);
mempool_free(mbox, hrq->phba->mbox_mem_pool);
return status;
}
/**
* lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
* @phba: The virtual port for which this call being executed.
* @pdma_phys_addr0: Physical address of the 1st SGL page.
* @pdma_phys_addr1: Physical address of the 2nd SGL page.
* @xritag: the xritag that ties this io to the SGL pages.
*
* This routine will post the sgl pages for the IO that has the xritag
* that is in the iocbq structure. The xritag is assigned during iocbq
* creation and persists for as long as the driver is loaded.
* if the caller has fewer than 256 scatter gather segments to map then
* pdma_phys_addr1 should be 0.
* If the caller needs to map more than 256 scatter gather segment then
* pdma_phys_addr1 should be a valid physical address.
* physical address for SGLs must be 64 byte aligned.
* If you are going to map 2 SGL's then the first one must have 256 entries
* the second sgl can have between 1 and 256 entries.
*
* Return codes:
* 0 - Success
* -ENXIO, -ENOMEM - Failure
**/
int
lpfc_sli4_post_sgl(struct lpfc_hba *phba,
dma_addr_t pdma_phys_addr0,
dma_addr_t pdma_phys_addr1,
uint16_t xritag)
{
struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
LPFC_MBOXQ_t *mbox;
int rc;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
if (xritag == NO_XRI) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0364 Invalid param:\n");
return -EINVAL;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
sizeof(struct lpfc_mbx_post_sgl_pages) -
sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
&mbox->u.mqe.un.post_sgl_pages;
bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
cpu_to_le32(putPaddrLow(pdma_phys_addr0));
post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
cpu_to_le32(putPaddrLow(pdma_phys_addr1));
post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2511 POST_SGL mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
return 0;
}
/**
* lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
* @phba: The virtual port for which this call being executed.
*
* This routine will remove all of the sgl pages registered with the hba.
*
* Return codes:
* 0 - Success
* -ENXIO, -ENOMEM - Failure
**/
int
lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *mbox;
int rc;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
LPFC_SLI4_MBX_EMBED);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else
rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *)
&mbox->u.mqe.un.sli4_config.header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (rc != MBX_TIMEOUT)
mempool_free(mbox, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
return rc;
}
/**
* lpfc_sli4_next_xritag - Get an xritag for the io
* @phba: Pointer to HBA context object.
*
* This function gets an xritag for the iocb. If there is no unused xritag
* it will return 0xffff.
* The function returns the allocated xritag if successful, else returns zero.
* Zero is not a valid xritag.
* The caller is not required to hold any lock.
**/
uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba *phba)
{
uint16_t xritag;
spin_lock_irq(&phba->hbalock);
xritag = phba->sli4_hba.next_xri;
if ((xritag != (uint16_t) -1) && xritag <
(phba->sli4_hba.max_cfg_param.max_xri
+ phba->sli4_hba.max_cfg_param.xri_base)) {
phba->sli4_hba.next_xri++;
phba->sli4_hba.max_cfg_param.xri_used++;
spin_unlock_irq(&phba->hbalock);
return xritag;
}
spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2004 Failed to allocate XRI.last XRITAG is %d"
" Max XRI is %d, Used XRI is %d\n",
phba->sli4_hba.next_xri,
phba->sli4_hba.max_cfg_param.max_xri,
phba->sli4_hba.max_cfg_param.xri_used);
return -1;
}
/**
* lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post a block of driver's sgl pages to the
* HBA using non-embedded mailbox command. No Lock is held. This routine
* is only called when the driver is loading and after all IO has been
* stopped.
**/
int
lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
{
struct lpfc_sglq *sglq_entry;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
struct sgl_page_pairs *sgl_pg_pairs;
void *viraddr;
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, pg_pairs;
uint32_t mbox_tmo;
uint16_t xritag_start = 0;
int els_xri_cnt, rc = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
/* The number of sgls to be posted */
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
if (reqlen > PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"2559 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
return -ENOMEM;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2560 Failed to allocate mbox cmd memory\n");
return -ENOMEM;
}
/* Allocate DMA memory and set up the non-embedded mailbox command */
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
LPFC_SLI4_MBX_NEMBED);
if (alloclen < reqlen) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0285 Allocated DMA memory size (%d) is "
"less than the requested DMA memory "
"size (%d)\n", alloclen, reqlen);
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
/* Get the first SGE entry from the non-embedded DMA memory */
if (unlikely(!mbox->sge_array)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2525 Failed to get the non-embedded SGE "
"virtual address\n");
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
viraddr = mbox->sge_array->addr[0];
/* Set up the SGL pages in the non-embedded DMA pages */
sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
sgl_pg_pairs = &sgl->sgl_pg_pairs;
for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
/* Set up the sge entry */
sgl_pg_pairs->sgl_pg0_addr_lo =
cpu_to_le32(putPaddrLow(sglq_entry->phys));
sgl_pg_pairs->sgl_pg0_addr_hi =
cpu_to_le32(putPaddrHigh(sglq_entry->phys));
sgl_pg_pairs->sgl_pg1_addr_lo =
cpu_to_le32(putPaddrLow(0));
sgl_pg_pairs->sgl_pg1_addr_hi =
cpu_to_le32(putPaddrHigh(0));
/* Keep the first xritag on the list */
if (pg_pairs == 0)
xritag_start = sglq_entry->sli4_xritag;
sgl_pg_pairs++;
}
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
/* Perform endian conversion if necessary */
sgl->word0 = cpu_to_le32(sgl->word0);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (rc != MBX_TIMEOUT)
lpfc_sli4_mbox_cmd_free(phba, mbox);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2513 POST_SGL_BLOCK mailbox command failed "
"status x%x add_status x%x mbx status x%x\n",
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
return rc;
}
/**
* lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
* @phba: pointer to lpfc hba data structure.
* @sblist: pointer to scsi buffer list.
* @count: number of scsi buffers on the list.
*
* This routine is invoked to post a block of @count scsi sgl pages from a
* SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
* No Lock is held.
*
**/
int
lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
int cnt)
{
struct lpfc_scsi_buf *psb;
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
struct sgl_page_pairs *sgl_pg_pairs;
void *viraddr;
LPFC_MBOXQ_t *mbox;
uint32_t reqlen, alloclen, pg_pairs;
uint32_t mbox_tmo;
uint16_t xritag_start = 0;
int rc = 0;
uint32_t shdr_status, shdr_add_status;
dma_addr_t pdma_phys_bpl1;
union lpfc_sli4_cfg_shdr *shdr;
/* Calculate the requested length of the dma memory */
reqlen = cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
if (reqlen > PAGE_SIZE) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0217 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
return -ENOMEM;
}
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0283 Failed to allocate mbox cmd memory\n");
return -ENOMEM;
}
/* Allocate DMA memory and set up the non-embedded mailbox command */
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
LPFC_SLI4_MBX_NEMBED);
if (alloclen < reqlen) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2561 Allocated DMA memory size (%d) is "
"less than the requested DMA memory "
"size (%d)\n", alloclen, reqlen);
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
/* Get the first SGE entry from the non-embedded DMA memory */
if (unlikely(!mbox->sge_array)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2565 Failed to get the non-embedded SGE "
"virtual address\n");
lpfc_sli4_mbox_cmd_free(phba, mbox);
return -ENOMEM;
}
viraddr = mbox->sge_array->addr[0];
/* Set up the SGL pages in the non-embedded DMA pages */
sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
sgl_pg_pairs = &sgl->sgl_pg_pairs;
pg_pairs = 0;
list_for_each_entry(psb, sblist, list) {
/* Set up the sge entry */
sgl_pg_pairs->sgl_pg0_addr_lo =
cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
sgl_pg_pairs->sgl_pg0_addr_hi =
cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
else
pdma_phys_bpl1 = 0;
sgl_pg_pairs->sgl_pg1_addr_lo =
cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
sgl_pg_pairs->sgl_pg1_addr_hi =
cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
/* Keep the first xritag on the list */
if (pg_pairs == 0)
xritag_start = psb->cur_iocbq.sli4_xritag;
sgl_pg_pairs++;
pg_pairs++;
}
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
/* Perform endian conversion if necessary */
sgl->word0 = cpu_to_le32(sgl->word0);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (rc != MBX_TIMEOUT)
lpfc_sli4_mbox_cmd_free(phba, mbox);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2564 POST_SGL_BLOCK mailbox command failed "
"status x%x add_status x%x mbx status x%x\n",
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
return rc;
}
/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
*
* This function checks the fields in the @fc_hdr to see if the FC frame is a
* valid type of frame that the LPFC driver will handle. This function will
* return a zero if the frame is a valid frame or a non zero value when the
* frame does not pass the check.
**/
static int
lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
{
char *rctl_names[] = FC_RCTL_NAMES_INIT;
char *type_names[] = FC_TYPE_NAMES_INIT;
struct fc_vft_header *fc_vft_hdr;
switch (fc_hdr->fh_r_ctl) {
case FC_RCTL_DD_UNCAT: /* uncategorized information */
case FC_RCTL_DD_SOL_DATA: /* solicited data */
case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */
case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */
case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */
case FC_RCTL_DD_DATA_DESC: /* data descriptor */
case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */
case FC_RCTL_DD_CMD_STATUS: /* command status */
case FC_RCTL_ELS_REQ: /* extended link services request */
case FC_RCTL_ELS_REP: /* extended link services reply */
case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */
case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */
case FC_RCTL_BA_NOP: /* basic link service NOP */
case FC_RCTL_BA_ABTS: /* basic link service abort */
case FC_RCTL_BA_RMC: /* remove connection */
case FC_RCTL_BA_ACC: /* basic accept */
case FC_RCTL_BA_RJT: /* basic reject */
case FC_RCTL_BA_PRMT:
case FC_RCTL_ACK_1: /* acknowledge_1 */
case FC_RCTL_ACK_0: /* acknowledge_0 */
case FC_RCTL_P_RJT: /* port reject */
case FC_RCTL_F_RJT: /* fabric reject */
case FC_RCTL_P_BSY: /* port busy */
case FC_RCTL_F_BSY: /* fabric busy to data frame */
case FC_RCTL_F_BSYL: /* fabric busy to link control frame */
case FC_RCTL_LCR: /* link credit reset */
case FC_RCTL_END: /* end */
break;
case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */
fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
return lpfc_fc_frame_check(phba, fc_hdr);
default:
goto drop;
}
switch (fc_hdr->fh_type) {
case FC_TYPE_BLS:
case FC_TYPE_ELS:
case FC_TYPE_FCP:
case FC_TYPE_CT:
break;
case FC_TYPE_IP:
case FC_TYPE_ILS:
default:
goto drop;
}
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"2538 Received frame rctl:%s type:%s\n",
rctl_names[fc_hdr->fh_r_ctl],
type_names[fc_hdr->fh_type]);
return 0;
drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"2539 Dropped frame rctl:%s type:%s\n",
rctl_names[fc_hdr->fh_r_ctl],
type_names[fc_hdr->fh_type]);
return 1;
}
/**
* lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
*
* This function processes the FC header to retrieve the VFI from the VF
* header, if one exists. This function will return the VFI if one exists
* or 0 if no VSAN Header exists.
**/
static uint32_t
lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
{
struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
return 0;
return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
}
/**
* lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
* @phba: Pointer to the HBA structure to search for the vport on
* @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
* @fcfi: The FC Fabric ID that the frame came from
*
* This function searches the @phba for a vport that matches the content of the
* @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
* VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
* returns the matching vport pointer or NULL if unable to match frame to a
* vport.
**/
static struct lpfc_vport *
lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
uint16_t fcfi)
{
struct lpfc_vport **vports;
struct lpfc_vport *vport = NULL;
int i;
uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
fc_hdr->fh_d_id[1] << 8 |
fc_hdr->fh_d_id[2]);
vports = lpfc_create_vport_work_array(phba);
if (vports != NULL)
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
if (phba->fcf.fcfi == fcfi &&
vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
vports[i]->fc_myDID == did) {
vport = vports[i];
break;
}
}
lpfc_destroy_vport_work_array(phba, vports);
return vport;
}
/**
* lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
* @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
*
* This function searches through the existing incomplete sequences that have
* been sent to this @vport. If the frame matches one of the incomplete
* sequences then the dbuf in the @dmabuf is added to the list of frames that
* make up that sequence. If no sequence is found that matches this frame then
* the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
* This function returns a pointer to the first dmabuf in the sequence list that
* the frame was linked to.
**/
static struct hbq_dmabuf *
lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
{
struct fc_frame_header *new_hdr;
struct fc_frame_header *temp_hdr;
struct lpfc_dmabuf *d_buf;
struct lpfc_dmabuf *h_buf;
struct hbq_dmabuf *seq_dmabuf = NULL;
struct hbq_dmabuf *temp_dmabuf = NULL;
new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
/* Use the hdr_buf to find the sequence that this frame belongs to */
list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
temp_hdr = (struct fc_frame_header *)h_buf->virt;
if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
(temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
(memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
continue;
/* found a pending sequence that matches this frame */
seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
break;
}
if (!seq_dmabuf) {
/*
* This indicates first frame received for this sequence.
* Queue the buffer on the vport's rcv_buffer_list.
*/
list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
return dmabuf;
}
temp_hdr = seq_dmabuf->hbuf.virt;
if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
return dmabuf;
}
/* find the correct place in the sequence to insert this frame */
list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
/*
* If the frame's sequence count is greater than the frame on
* the list then insert the frame right after this frame
*/
if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
return seq_dmabuf;
}
}
return NULL;
}
/**
* lpfc_seq_complete - Indicates if a sequence is complete
* @dmabuf: pointer to a dmabuf that describes the FC sequence
*
* This function checks the sequence, starting with the frame described by
* @dmabuf, to see if all the frames associated with this sequence are present.
* the frames associated with this sequence are linked to the @dmabuf using the
* dbuf list. This function looks for two major things. 1) That the first frame
* has a sequence count of zero. 2) There is a frame with last frame of sequence
* set. 3) That there are no holes in the sequence count. The function will
* return 1 when the sequence is complete, otherwise it will return 0.
**/
static int
lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
{
struct fc_frame_header *hdr;
struct lpfc_dmabuf *d_buf;
struct hbq_dmabuf *seq_dmabuf;
uint32_t fctl;
int seq_count = 0;
hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
/* make sure first fame of sequence has a sequence count of zero */
if (hdr->fh_seq_cnt != seq_count)
return 0;
fctl = (hdr->fh_f_ctl[0] << 16 |
hdr->fh_f_ctl[1] << 8 |
hdr->fh_f_ctl[2]);
/* If last frame of sequence we can return success. */
if (fctl & FC_FC_END_SEQ)
return 1;
list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* If there is a hole in the sequence count then fail. */
if (++seq_count != hdr->fh_seq_cnt)
return 0;
fctl = (hdr->fh_f_ctl[0] << 16 |
hdr->fh_f_ctl[1] << 8 |
hdr->fh_f_ctl[2]);
/* If last frame of sequence we can return success. */
if (fctl & FC_FC_END_SEQ)
return 1;
}
return 0;
}
/**
* lpfc_prep_seq - Prep sequence for ULP processing
* @vport: Pointer to the vport on which this sequence was received
* @dmabuf: pointer to a dmabuf that describes the FC sequence
*
* This function takes a sequence, described by a list of frames, and creates
* a list of iocbq structures to describe the sequence. This iocbq list will be
* used to issue to the generic unsolicited sequence handler. This routine
* returns a pointer to the first iocbq in the list. If the function is unable
* to allocate an iocbq then it throw out the received frames that were not
* able to be described and return a pointer to the first iocbq. If unable to
* allocate any iocbqs (including the first) this function will return NULL.
**/
static struct lpfc_iocbq *
lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
{
struct lpfc_dmabuf *d_buf, *n_buf;
struct lpfc_iocbq *first_iocbq, *iocbq;
struct fc_frame_header *fc_hdr;
uint32_t sid;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* remove from receive buffer list */
list_del_init(&seq_dmabuf->hbuf.list);
/* get the Remote Port's SID */
sid = (fc_hdr->fh_s_id[0] << 16 |
fc_hdr->fh_s_id[1] << 8 |
fc_hdr->fh_s_id[2]);
/* Get an iocbq struct to fill in. */
first_iocbq = lpfc_sli_get_iocbq(vport->phba);
if (first_iocbq) {
/* Initialize the first IOCB. */
first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
first_iocbq->iocb.unsli3.rcvsli3.vpi =
vport->vpi + vport->phba->vpi_base;
/* put the first buffer into the first IOCBq */
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
first_iocbq->iocb.ulpBdeCount = 1;
first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.un.rcvels.remoteID = sid;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
}
iocbq = first_iocbq;
/*
* Each IOCBq can have two Buffers assigned, so go through the list
* of buffers for this sequence and save two buffers in each IOCBq
*/
list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
if (!iocbq) {
lpfc_in_buf_free(vport->phba, d_buf);
continue;
}
if (!iocbq->context3) {
iocbq->context3 = d_buf;
iocbq->iocb.ulpBdeCount++;
iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
} else {
iocbq = lpfc_sli_get_iocbq(vport->phba);
if (!iocbq) {
if (first_iocbq) {
first_iocbq->iocb.ulpStatus =
IOSTAT_FCP_RSP_ERROR;
first_iocbq->iocb.un.ulpWord[4] =
IOERR_NO_RESOURCES;
}
lpfc_in_buf_free(vport->phba, d_buf);
continue;
}
iocbq->context2 = d_buf;
iocbq->context3 = NULL;
iocbq->iocb.ulpBdeCount = 1;
iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
iocbq->iocb.un.rcvels.remoteID = sid;
list_add_tail(&iocbq->list, &first_iocbq->list);
}
}
return first_iocbq;
}
/**
* lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
* @phba: Pointer to HBA context object.
*
* This function is called with no lock held. This function processes all
* the received buffers and gives it to upper layers when a received buffer
* indicates that it is the final frame in the sequence. The interrupt
* service routine processes received buffers at interrupt contexts and adds
* received dma buffers to the rb_pend_list queue and signals the worker thread.
* Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
* appropriate receive function when the final frame in a sequence is received.
**/
int
lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
{
LIST_HEAD(cmplq);
struct hbq_dmabuf *dmabuf, *seq_dmabuf;
struct fc_frame_header *fc_hdr;
struct lpfc_vport *vport;
uint32_t fcfi;
struct lpfc_iocbq *iocbq;
/* Clear hba flag and get all received buffers into the cmplq */
spin_lock_irq(&phba->hbalock);
phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
list_splice_init(&phba->rb_pend_list, &cmplq);
spin_unlock_irq(&phba->hbalock);
/* Process each received buffer */
while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
/* check to see if this a valid type of frame */
if (lpfc_fc_frame_check(phba, fc_hdr)) {
lpfc_in_buf_free(phba, &dmabuf->dbuf);
continue;
}
fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
if (!vport) {
/* throw out the frame */
lpfc_in_buf_free(phba, &dmabuf->dbuf);
continue;
}
/* Link this frame */
seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
if (!seq_dmabuf) {
/* unable to add frame to vport - throw it out */
lpfc_in_buf_free(phba, &dmabuf->dbuf);
continue;
}
/* If not last frame in sequence continue processing frames. */
if (!lpfc_seq_complete(seq_dmabuf)) {
/*
* When saving off frames post a new one and mark this
* frame to be freed when it is finished.
**/
lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
dmabuf->tag = -1;
continue;
}
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
iocbq = lpfc_prep_seq(vport, seq_dmabuf);
if (!lpfc_complete_unsol_iocb(phba,
&phba->sli.ring[LPFC_ELS_RING],
iocbq, fc_hdr->fh_r_ctl,
fc_hdr->fh_type))
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"2540 Ring %d handler: unexpected Rctl "
"x%x Type x%x received\n",
LPFC_ELS_RING,
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
};
return 0;
}
/**
* lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
* posts a PAGE_SIZE memory region to the port to hold up to
* PAGE_SIZE modulo 64 rpi context headers.
*
* This routine does not require any locks. It's usage is expected
* to be driver load or reset recovery when the driver is
* sequential.
*
* Return codes
* 0 - sucessful
* EIO - The mailbox failed to complete successfully.
* When this error occurs, the driver is not guaranteed
* to have any rpi regions posted to the device and
* must either attempt to repost the regions or take a
* fatal error.
**/
int
lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
{
struct lpfc_rpi_hdr *rpi_page;
uint32_t rc = 0;
/* Post all rpi memory regions to the port. */
list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2008 Error %d posting all rpi "
"headers\n", rc);
rc = -EIO;
break;
}
}
return rc;
}
/**
* lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
* @phba: pointer to lpfc hba data structure.
* @rpi_page: pointer to the rpi memory region.
*
* This routine is invoked to post a single rpi header to the
* HBA consistent with the SLI-4 interface spec. This memory region
* maps up to 64 rpi context regions.
*
* Return codes
* 0 - sucessful
* ENOMEM - No available memory
* EIO - The mailbox failed to complete successfully.
**/
int
lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
{
LPFC_MBOXQ_t *mboxq;
struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
uint32_t rc = 0;
uint32_t mbox_tmo;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
/* The port is notified of the header region via a mailbox command. */
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2001 Unable to allocate memory for issuing "
"SLI_CONFIG_SPECIAL mailbox command\n");
return -ENOMEM;
}
/* Post all rpi memory regions to the port. */
hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
hdr_tmpl, rpi_page->page_count);
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
rpi_page->start_rpi);
hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if (rc != MBX_TIMEOUT)
mempool_free(mboxq, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2514 POST_RPI_HDR mailbox failed with "
"status x%x add_status x%x, mbx status x%x\n",
shdr_status, shdr_add_status, rc);
rc = -ENXIO;
}
return rc;
}
/**
* lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to post rpi header templates to the
* HBA consistent with the SLI-4 interface spec. This routine
* posts a PAGE_SIZE memory region to the port to hold up to
* PAGE_SIZE modulo 64 rpi context headers.
*
* Returns
* A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
* LPFC_RPI_ALLOC_ERROR if no rpis are available.
**/
int
lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
{
int rpi;
uint16_t max_rpi, rpi_base, rpi_limit;
uint16_t rpi_remaining;
struct lpfc_rpi_hdr *rpi_hdr;
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
rpi_limit = phba->sli4_hba.next_rpi;
/*
* The valid rpi range is not guaranteed to be zero-based. Start
* the search at the rpi_base as reported by the port.
*/
spin_lock_irq(&phba->hbalock);
rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
if (rpi >= rpi_limit || rpi < rpi_base)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
set_bit(rpi, phba->sli4_hba.rpi_bmask);
phba->sli4_hba.max_cfg_param.rpi_used++;
phba->sli4_hba.rpi_count++;
}
/*
* Don't try to allocate more rpi header regions if the device limit
* on available rpis max has been exhausted.
*/
if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
(phba->sli4_hba.rpi_count >= max_rpi)) {
spin_unlock_irq(&phba->hbalock);
return rpi;
}
/*
* If the driver is running low on rpi resources, allocate another
* page now. Note that the next_rpi value is used because
* it represents how many are actually in use whereas max_rpi notes
* how many are supported max by the device.
*/
rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
phba->sli4_hba.rpi_count;
spin_unlock_irq(&phba->hbalock);
if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
if (!rpi_hdr) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2002 Error Could not grow rpi "
"count\n");
} else {
lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
}
}
return rpi;
}
/**
* lpfc_sli4_free_rpi - Release an rpi for reuse.
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to release an rpi to the pool of
* available rpis maintained by the driver.
**/
void
lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
spin_lock_irq(&phba->hbalock);
clear_bit(rpi, phba->sli4_hba.rpi_bmask);
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;
spin_unlock_irq(&phba->hbalock);
}
/**
* lpfc_sli4_remove_rpis - Remove the rpi bitmask region
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to remove the memory region that
* provided rpi via a bitmask.
**/
void
lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
{
kfree(phba->sli4_hba.rpi_bmask);
}
/**
* lpfc_sli4_resume_rpi - Remove the rpi bitmask region
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to remove the memory region that
* provided rpi via a bitmask.
**/
int
lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
{
LPFC_MBOXQ_t *mboxq;
struct lpfc_hba *phba = ndlp->phba;
int rc;
/* The port is notified of the header region via a mailbox command. */
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
/* Post all rpi memory regions to the port. */
lpfc_resume_rpi(mboxq, ndlp);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2010 Resume RPI Mailbox failed "
"status %d, mbxStatus x%x\n", rc,
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
mempool_free(mboxq, phba->mbox_mem_pool);
return -EIO;
}
return 0;
}
/**
* lpfc_sli4_init_vpi - Initialize a vpi with the port
* @phba: pointer to lpfc hba data structure.
* @vpi: vpi value to activate with the port.
*
* This routine is invoked to activate a vpi with the
* port when the host intends to use vports with a
* nonzero vpi.
*
* Returns:
* 0 success
* -Evalue otherwise
**/
int
lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
{
LPFC_MBOXQ_t *mboxq;
int rc = 0;
uint32_t mbox_tmo;
if (vpi == 0)
return -EINVAL;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
lpfc_init_vpi(phba, mboxq, vpi);
mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
if (rc != MBX_TIMEOUT)
mempool_free(mboxq, phba->mbox_mem_pool);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2022 INIT VPI Mailbox failed "
"status %d, mbxStatus x%x\n", rc,
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
rc = -EIO;
}
return rc;
}
/**
* lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
* @phba: pointer to lpfc hba data structure.
* @mboxq: Pointer to mailbox object.
*
* This routine is invoked to manually add a single FCF record. The caller
* must pass a completely initialized FCF_Record. This routine takes
* care of the nonembedded mailbox operations.
**/
static void
lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
void *virt_addr;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t shdr_status, shdr_add_status;
virt_addr = mboxq->sge_array->addr[0];
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
if ((shdr_status || shdr_add_status) &&
(shdr_status != STATUS_FCF_IN_USE))
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2558 ADD_FCF_RECORD mailbox failed with "
"status x%x add_status x%x\n",
shdr_status, shdr_add_status);
lpfc_sli4_mbox_cmd_free(phba, mboxq);
}
/**
* lpfc_sli4_add_fcf_record - Manually add an FCF Record.
* @phba: pointer to lpfc hba data structure.
* @fcf_record: pointer to the initialized fcf record to add.
*
* This routine is invoked to manually add a single FCF record. The caller
* must pass a completely initialized FCF_Record. This routine takes
* care of the nonembedded mailbox operations.
**/
int
lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
{
int rc = 0;
LPFC_MBOXQ_t *mboxq;
uint8_t *bytep;
void *virt_addr;
dma_addr_t phys_addr;
struct lpfc_mbx_sge sge;
uint32_t alloc_len, req_len;
uint32_t fcfindex;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2009 Failed to allocate mbox for ADD_FCF cmd\n");
return -ENOMEM;
}
req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
sizeof(uint32_t);
/* Allocate DMA memory and set up the non-embedded mailbox command */
alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
req_len, LPFC_SLI4_MBX_NEMBED);
if (alloc_len < req_len) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2523 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return -ENOMEM;
}
/*
* Get the first SGE entry from the non-embedded DMA memory. This
* routine only uses a single SGE.
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
if (unlikely(!mboxq->sge_array)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2526 Failed to get the non-embedded SGE "
"virtual address\n");
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return -ENOMEM;
}
virt_addr = mboxq->sge_array->addr[0];
/*
* Configure the FCF record for FCFI 0. This is the driver's
* hardcoded default and gets used in nonFIP mode.
*/
fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
/*
* Copy the fcf_index and the FCF Record Data. The data starts after
* the FCoE header plus word10. The data copy needs to be endian
* correct.
*/
bytep += sizeof(uint32_t);
lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2515 ADD_FCF_RECORD mailbox failed with "
"status 0x%x\n", rc);
lpfc_sli4_mbox_cmd_free(phba, mboxq);
rc = -EIO;
} else
rc = 0;
return rc;
}
/**
* lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
* @phba: pointer to lpfc hba data structure.
* @fcf_record: pointer to the fcf record to write the default data.
* @fcf_index: FCF table entry index.
*
* This routine is invoked to build the driver's default FCF record. The
* values used are hardcoded. This routine handles memory initialization.
*
**/
void
lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
struct fcf_record *fcf_record,
uint16_t fcf_index)
{
memset(fcf_record, 0, sizeof(struct fcf_record));
fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
LPFC_FCF_FPMA | LPFC_FCF_SPMA);
/* Set the VLAN bit map */
if (phba->valid_vlan) {
fcf_record->vlan_bitmap[phba->vlan_id / 8]
= 1 << (phba->vlan_id % 8);
}
}
/**
* lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: FCF table entry offset.
*
* This routine is invoked to read up to @fcf_num of FCF record from the
* device starting with the given @fcf_index.
**/
int
lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
{
int rc = 0, error;
LPFC_MBOXQ_t *mboxq;
void *virt_addr;
dma_addr_t phys_addr;
uint8_t *bytep;
struct lpfc_mbx_sge sge;
uint32_t alloc_len, req_len;
struct lpfc_mbx_read_fcf_tbl *read_fcf;
phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2000 Failed to allocate mbox for "
"READ_FCF cmd\n");
return -ENOMEM;
}
req_len = sizeof(struct fcf_record) +
sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
/* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
LPFC_SLI4_MBX_NEMBED);
if (alloc_len < req_len) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0291 Allocated DMA memory size (x%x) is "
"less than the requested DMA memory "
"size (x%x)\n", alloc_len, req_len);
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return -ENOMEM;
}
/* Get the first SGE entry from the non-embedded DMA memory. This
* routine only uses a single SGE.
*/
lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
if (unlikely(!mboxq->sge_array)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
"2527 Failed to get the non-embedded SGE "
"virtual address\n");
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return -ENOMEM;
}
virt_addr = mboxq->sge_array->addr[0];
read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
/* Set up command fields */
bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
/* Perform necessary endian conversion */
bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
lpfc_sli4_mbox_cmd_free(phba, mboxq);
error = -EIO;
} else {
spin_lock_irq(&phba->hbalock);
phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock);
error = 0;
}
return error;
}
/**
* lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
* @phba: pointer to lpfc hba data structure.
*
* This function read region 23 and parse TLV for port status to
* decide if the user disaled the port. If the TLV indicates the
* port is disabled, the hba_flag is set accordingly.
**/
void
lpfc_sli_read_link_ste(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb = NULL;
MAILBOX_t *mb;
uint8_t *rgn23_data = NULL;
uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
int rc;
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2600 lpfc_sli_read_serdes_param failed to"
" allocate mailbox memory\n");
goto out;
}
mb = &pmb->u.mb;
/* Get adapter Region 23 data */
rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
if (!rgn23_data)
goto out;
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2601 lpfc_sli_read_link_ste failed to"
" read config region 23 rc 0x%x Status 0x%x\n",
rc, mb->mbxStatus);
mb->un.varDmp.word_cnt = 0;
}
/*
* dump mem may return a zero when finished or we got a
* mailbox error, either way we are done.
*/
if (mb->un.varDmp.word_cnt == 0)
break;
if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
rgn23_data + offset,
mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
data_size = offset;
offset = 0;
if (!data_size)
goto out;
/* Check the region signature first */
if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2619 Config region 23 has bad signature\n");
goto out;
}
offset += 4;
/* Check the data structure version */
if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2620 Config region 23 has bad version\n");
goto out;
}
offset += 4;
/* Parse TLV entries in the region */
while (offset < data_size) {
if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
break;
/*
* If the TLV is not driver specific TLV or driver id is
* not linux driver id, skip the record.
*/
if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
(rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
(rgn23_data[offset + 3] != 0)) {
offset += rgn23_data[offset + 1] * 4 + 4;
continue;
}
/* Driver found a driver specific TLV in the config region */
sub_tlv_len = rgn23_data[offset + 1] * 4;
offset += 4;
tlv_offset = 0;
/*
* Search for configured port state sub-TLV.
*/
while ((offset < data_size) &&
(tlv_offset < sub_tlv_len)) {
if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
offset += 4;
tlv_offset += 4;
break;
}
if (rgn23_data[offset] != PORT_STE_TYPE) {
offset += rgn23_data[offset + 1] * 4 + 4;
tlv_offset += rgn23_data[offset + 1] * 4 + 4;
continue;
}
/* This HBA contains PORT_STE configured */
if (!rgn23_data[offset + 2])
phba->hba_flag |= LINK_DISABLED;
goto out;
}
}
out:
if (pmb)
mempool_free(pmb, phba->mbox_mem_pool);
kfree(rgn23_data);
return;
}
| gpl-2.0 |
Perferom/android_kernel_huawei_msm7x25 | arch/arm/mach-msm/qdsp5v2/audio_out.c | 455 | 16804 | /*
* pcm audio output device
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/wakelock.h>
#include <linux/msm_audio.h>
#include <asm/atomic.h>
#include <asm/ioctls.h>
#include <mach/msm_adsp.h>
#include <mach/qdsp5v2/qdsp5audppcmdi.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/qdsp5v2/audpp.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/htc_pwrsink.h>
#include <mach/debug_mm.h>
#define BUFSZ (960 * 5)
#define DMASZ (BUFSZ * 2)
#define HOSTPCM_STREAM_ID 5
struct buffer {
void *data;
unsigned size;
unsigned used;
unsigned addr;
};
struct audio {
struct buffer out[2];
spinlock_t dsp_lock;
uint8_t out_head;
uint8_t out_tail;
uint8_t out_needed; /* number of buffers the dsp is waiting for */
atomic_t out_bytes;
struct mutex lock;
struct mutex write_lock;
wait_queue_head_t wait;
/* configuration to use on next enable */
uint32_t out_sample_rate;
uint32_t out_channel_mode;
uint32_t out_weight;
uint32_t out_buffer_size;
uint32_t device_events;
int16_t source;
/* data allocated for various buffers */
char *data;
dma_addr_t phys;
int teos; /* valid only if tunnel mode & no data left for decoder */
int opened;
int enabled;
int running;
int stopped; /* set when stopped, cleared on flush */
uint16_t dec_id;
struct wake_lock wakelock;
struct wake_lock idlelock;
struct audpp_cmd_cfg_object_params_volume vol_pan;
};
static void audio_out_listener(u32 evt_id, union auddev_evt_data *evt_payload,
void *private_data)
{
struct audio *audio = private_data;
switch (evt_id) {
case AUDDEV_EVT_DEV_RDY:
MM_DBG(":AUDDEV_EVT_DEV_RDY\n");
audio->source |= (0x1 << evt_payload->routing_id);
if (audio->running == 1 && audio->enabled == 1)
audpp_route_stream(audio->dec_id, audio->source);
break;
case AUDDEV_EVT_DEV_RLS:
MM_DBG(":AUDDEV_EVT_DEV_RLS\n");
audio->source &= ~(0x1 << evt_payload->routing_id);
if (audio->running == 1 && audio->enabled == 1)
audpp_route_stream(audio->dec_id, audio->source);
break;
case AUDDEV_EVT_STREAM_VOL_CHG:
audio->vol_pan.volume = evt_payload->session_vol;
MM_DBG(":AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d\n",
audio->vol_pan.volume);
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
break;
default:
MM_ERR("ERROR:wrong event\n");
break;
}
}
static void audio_prevent_sleep(struct audio *audio)
{
MM_DBG("\n"); /* Macro prints the file name and function */
wake_lock(&audio->wakelock);
wake_lock(&audio->idlelock);
}
static void audio_allow_sleep(struct audio *audio)
{
wake_unlock(&audio->wakelock);
wake_unlock(&audio->idlelock);
MM_DBG("\n"); /* Macro prints the file name and function */
}
static int audio_dsp_out_enable(struct audio *audio, int yes);
static int audio_dsp_send_buffer(struct audio *audio, unsigned id,
unsigned len);
static void audio_dsp_event(void *private, unsigned id, uint16_t *msg);
/* must be called with audio->lock held */
static int audio_enable(struct audio *audio)
{
MM_DBG("\n"); /* Macro prints the file name and function */
if (audio->enabled)
return 0;
/* refuse to start if we're not ready */
if (!audio->out[0].used || !audio->out[1].used)
return -EIO;
/* we start buffers 0 and 1, so buffer 0 will be the
* next one the dsp will want
*/
audio->out_tail = 0;
audio->out_needed = 0;
audio_prevent_sleep(audio);
if (audpp_enable(-1, audio_dsp_event, audio)) {
MM_ERR("audpp_enable() failed\n");
audio_allow_sleep(audio);
return -ENODEV;
}
audio->enabled = 1;
htc_pwrsink_set(PWRSINK_AUDIO, 100);
return 0;
}
/* must be called with audio->lock held */
static int audio_disable(struct audio *audio)
{
MM_DBG("\n"); /* Macro prints the file name and function */
if (audio->enabled) {
audio->enabled = 0;
audio_dsp_out_enable(audio, 0);
audpp_disable(-1, audio);
wake_up(&audio->wait);
audio->out_needed = 0;
audio_allow_sleep(audio);
}
return 0;
}
/* ------------------- dsp --------------------- */
static void audio_dsp_event(void *private, unsigned id, uint16_t *msg)
{
struct audio *audio = private;
struct buffer *frame;
unsigned long flags;
static unsigned long pcmdmamsd_time;
switch (id) {
case AUDPP_MSG_HOST_PCM_INTF_MSG: {
unsigned id = msg[3];
unsigned idx = msg[4] - 1;
MM_DBG("HOST_PCM id %d idx %d\n", id, idx);
if (id != AUDPP_MSG_HOSTPCM_ID_ARM_RX) {
MM_ERR("bogus id\n");
break;
}
if (idx > 1) {
MM_ERR("bogus buffer idx\n");
break;
}
spin_lock_irqsave(&audio->dsp_lock, flags);
if (audio->running) {
atomic_add(audio->out[idx].used, &audio->out_bytes);
audio->out[idx].used = 0;
frame = audio->out + audio->out_tail;
if (frame->used) {
/* Reset teos flag to avoid stale
* PCMDMAMISS been considered
*/
audio->teos = 0;
audio_dsp_send_buffer(
audio, audio->out_tail, frame->used);
audio->out_tail ^= 1;
} else {
audio->out_needed++;
}
wake_up(&audio->wait);
}
spin_unlock_irqrestore(&audio->dsp_lock, flags);
break;
}
case AUDPP_MSG_PCMDMAMISSED:
/* prints only if 1 second is elapsed since the last time
* this message has been printed */
if (printk_timed_ratelimit(&pcmdmamsd_time, 1000))
printk(KERN_INFO "[%s:%s] PCMDMAMISSED %d\n",
__MM_FILE__, __func__, msg[0]);
audio->teos++;
MM_DBG("PCMDMAMISSED Count per Buffer %d\n", audio->teos);
wake_up(&audio->wait);
break;
case AUDPP_MSG_CFG_MSG:
if (msg[0] == AUDPP_MSG_ENA_ENA) {
MM_DBG("CFG_MSG ENABLE\n");
audio->out_needed = 0;
audio->running = 1;
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
audpp_route_stream(audio->dec_id, audio->source);
audio_dsp_out_enable(audio, 1);
} else if (msg[0] == AUDPP_MSG_ENA_DIS) {
MM_DBG("CFG_MSG DISABLE\n");
audio->running = 0;
} else {
MM_ERR("CFG_MSG %d?\n", msg[0]);
}
break;
default:
MM_ERR("UNKNOWN (%d)\n", id);
}
}
static int audio_dsp_out_enable(struct audio *audio, int yes)
{
struct audpp_cmd_pcm_intf cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = AUDPP_CMD_PCM_INTF;
cmd.stream = AUDPP_CMD_POPP_STREAM;
cmd.stream_id = audio->dec_id;
cmd.config = AUDPP_CMD_PCM_INTF_CONFIG_CMD_V;
cmd.intf_type = AUDPP_CMD_PCM_INTF_RX_ENA_ARMTODSP_V;
if (yes) {
cmd.write_buf1LSW = audio->out[0].addr;
cmd.write_buf1MSW = audio->out[0].addr >> 16;
if (audio->out[0].used)
cmd.write_buf1_len = audio->out[0].used;
else
cmd.write_buf1_len = audio->out[0].size;
cmd.write_buf2LSW = audio->out[1].addr;
cmd.write_buf2MSW = audio->out[1].addr >> 16;
if (audio->out[1].used)
cmd.write_buf2_len = audio->out[1].used;
else
cmd.write_buf2_len = audio->out[1].size;
cmd.arm_to_rx_flag = AUDPP_CMD_PCM_INTF_ENA_V;
cmd.weight_decoder_to_rx = audio->out_weight;
cmd.weight_arm_to_rx = 1;
cmd.partition_number_arm_to_dsp = 0;
cmd.sample_rate = audio->out_sample_rate;
cmd.channel_mode = audio->out_channel_mode;
}
return audpp_send_queue2(&cmd, sizeof(cmd));
}
static int audio_dsp_send_buffer(struct audio *audio, unsigned idx,
unsigned len)
{
struct audpp_cmd_pcm_intf_send_buffer cmd;
cmd.cmd_id = AUDPP_CMD_PCM_INTF;
cmd.stream = AUDPP_CMD_POPP_STREAM;
cmd.stream_id = audio->dec_id;
cmd.config = AUDPP_CMD_PCM_INTF_BUFFER_CMD_V;
cmd.intf_type = AUDPP_CMD_PCM_INTF_RX_ENA_ARMTODSP_V;
cmd.dsp_to_arm_buf_id = 0;
cmd.arm_to_dsp_buf_id = idx + 1;
cmd.arm_to_dsp_buf_len = len;
return audpp_send_queue2(&cmd, sizeof(cmd));
}
/* ------------------- device --------------------- */
static void audio_flush(struct audio *audio)
{
audio->out[0].used = 0;
audio->out[1].used = 0;
audio->out_head = 0;
audio->out_tail = 0;
audio->stopped = 0;
}
static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct audio *audio = file->private_data;
int rc = -EINVAL;
unsigned long flags = 0;
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
stats.byte_count = atomic_read(&audio->out_bytes);
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
switch (cmd) {
case AUDIO_SET_VOLUME:
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->vol_pan.volume = arg;
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
return 0;
case AUDIO_SET_PAN:
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->vol_pan.pan = arg;
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan,
POPP);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
return 0;
}
mutex_lock(&audio->lock);
switch (cmd) {
case AUDIO_START:
rc = audio_enable(audio);
break;
case AUDIO_STOP:
rc = audio_disable(audio);
audio->stopped = 1;
break;
case AUDIO_FLUSH:
if (audio->stopped) {
/* Make sure we're stopped and we wake any threads
* that might be blocked holding the write_lock.
* While audio->stopped write threads will always
* exit immediately.
*/
wake_up(&audio->wait);
mutex_lock(&audio->write_lock);
audio_flush(audio);
mutex_unlock(&audio->write_lock);
}
case AUDIO_SET_CONFIG: {
struct msm_audio_config config;
if (copy_from_user(&config, (void *) arg, sizeof(config))) {
rc = -EFAULT;
break;
}
if (config.channel_count == 1)
config.channel_count = AUDPP_CMD_PCM_INTF_MONO_V;
else if (config.channel_count == 2)
config.channel_count = AUDPP_CMD_PCM_INTF_STEREO_V;
else {
rc = -EINVAL;
break;
}
audio->out_sample_rate = config.sample_rate;
audio->out_channel_mode = config.channel_count;
rc = 0;
break;
}
case AUDIO_GET_CONFIG: {
struct msm_audio_config config;
config.buffer_size = BUFSZ;
config.buffer_count = 2;
config.sample_rate = audio->out_sample_rate;
if (audio->out_channel_mode == AUDPP_CMD_PCM_INTF_MONO_V)
config.channel_count = 1;
else
config.channel_count = 2;
config.unused[0] = 0;
config.unused[1] = 0;
config.unused[2] = 0;
if (copy_to_user((void *) arg, &config, sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_GET_SESSION_ID: {
if (copy_to_user((void *) arg, &audio->dec_id,
sizeof(unsigned short)))
return -EFAULT;
rc = 0;
break;
}
default:
rc = -EINVAL;
}
mutex_unlock(&audio->lock);
return rc;
}
/* Only useful in tunnel-mode */
static int audio_fsync(struct file *file, struct dentry *dentry,
int datasync)
{
struct audio *audio = file->private_data;
int rc = 0;
if (!audio->running)
return -EINVAL;
mutex_lock(&audio->write_lock);
/* PCM DMAMISS message is sent only once in
* hpcm interface. So, wait for buffer complete
* and teos flag.
*/
rc = wait_event_interruptible(audio->wait,
(!audio->out[0].used &&
!audio->out[1].used));
if (rc < 0)
goto done;
rc = wait_event_interruptible(audio->wait,
audio->teos);
done:
mutex_unlock(&audio->write_lock);
return rc;
}
static ssize_t audio_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
return -EINVAL;
}
static inline int rt_policy(int policy)
{
if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
return 1;
return 0;
}
static inline int task_has_rt_policy(struct task_struct *p)
{
return rt_policy(p->policy);
}
static ssize_t audio_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct sched_param s = { .sched_priority = 1 };
struct audio *audio = file->private_data;
unsigned long flags;
const char __user *start = buf;
struct buffer *frame;
size_t xfer;
int old_prio = current->rt_priority;
int old_policy = current->policy;
int cap_nice = cap_raised(current_cap(), CAP_SYS_NICE);
int rc = 0;
/* just for this write, set us real-time */
if (!task_has_rt_policy(current)) {
struct cred *new = prepare_creds();
cap_raise(new->cap_effective, CAP_SYS_NICE);
commit_creds(new);
if ((sched_setscheduler(current, SCHED_RR, &s)) < 0)
MM_ERR("sched_setscheduler failed\n");
}
mutex_lock(&audio->write_lock);
while (count > 0) {
frame = audio->out + audio->out_head;
rc = wait_event_interruptible(audio->wait,
(frame->used == 0) ||
(audio->stopped));
if (rc < 0)
break;
if (audio->stopped) {
rc = -EBUSY;
break;
}
xfer = count > frame->size ? frame->size : count;
if (copy_from_user(frame->data, buf, xfer)) {
rc = -EFAULT;
break;
}
frame->used = xfer;
audio->out_head ^= 1;
count -= xfer;
buf += xfer;
spin_lock_irqsave(&audio->dsp_lock, flags);
frame = audio->out + audio->out_tail;
if (frame->used && audio->out_needed) {
/* Reset teos flag to avoid stale
* PCMDMAMISS been considered
*/
audio->teos = 0;
audio_dsp_send_buffer(audio, audio->out_tail,
frame->used);
audio->out_tail ^= 1;
audio->out_needed--;
}
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
mutex_unlock(&audio->write_lock);
/* restore scheduling policy and priority */
if (!rt_policy(old_policy)) {
struct sched_param v = { .sched_priority = old_prio };
if ((sched_setscheduler(current, old_policy, &v)) < 0)
MM_ERR("sched_setscheduler failed\n");
if (likely(!cap_nice)) {
struct cred *new = prepare_creds();
cap_lower(new->cap_effective, CAP_SYS_NICE);
commit_creds(new);
}
}
if (buf > start)
return buf - start;
return rc;
}
static int audio_release(struct inode *inode, struct file *file)
{
struct audio *audio = file->private_data;
mutex_lock(&audio->lock);
auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->dec_id);
audio_disable(audio);
audio_flush(audio);
audio->opened = 0;
mutex_unlock(&audio->lock);
htc_pwrsink_set(PWRSINK_AUDIO, 0);
return 0;
}
static struct audio the_audio;
static int audio_open(struct inode *inode, struct file *file)
{
struct audio *audio = &the_audio;
int rc;
mutex_lock(&audio->lock);
if (audio->opened) {
MM_ERR("busy\n");
rc = -EBUSY;
goto done;
}
if (!audio->data) {
audio->data = dma_alloc_coherent(NULL, DMASZ,
&audio->phys, GFP_KERNEL);
if (!audio->data) {
MM_ERR("could not allocate DMA buffers\n");
rc = -ENOMEM;
goto done;
}
}
audio->dec_id = HOSTPCM_STREAM_ID;
audio->out_buffer_size = BUFSZ;
audio->out_sample_rate = 44100;
audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V;
audio->out_weight = 100;
audio->out[0].data = audio->data + 0;
audio->out[0].addr = audio->phys + 0;
audio->out[0].size = BUFSZ;
audio->out[1].data = audio->data + BUFSZ;
audio->out[1].addr = audio->phys + BUFSZ;
audio->out[1].size = BUFSZ;
audio->vol_pan.volume = 0x2000;
audio->vol_pan.pan = 0x0;
audio->source = 0x0;
audio_flush(audio);
audio->device_events = AUDDEV_EVT_DEV_RDY
|AUDDEV_EVT_DEV_RLS|
AUDDEV_EVT_STREAM_VOL_CHG;
MM_DBG("register for event callback pdata %p\n", audio);
rc = auddev_register_evt_listner(audio->device_events,
AUDDEV_CLNT_DEC,
audio->dec_id,
audio_out_listener,
(void *)audio);
if (rc) {
MM_ERR("%s: failed to register listener\n", __func__);
dma_free_coherent(NULL, DMASZ, audio->data, audio->phys);
goto done;
}
file->private_data = audio;
audio->opened = 1;
rc = 0;
done:
mutex_unlock(&audio->lock);
return rc;
}
static const struct file_operations audio_fops = {
.owner = THIS_MODULE,
.open = audio_open,
.release = audio_release,
.read = audio_read,
.write = audio_write,
.unlocked_ioctl = audio_ioctl,
.fsync = audio_fsync,
};
struct miscdevice audio_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_pcm_out",
.fops = &audio_fops,
};
static int __init audio_init(void)
{
mutex_init(&the_audio.lock);
mutex_init(&the_audio.write_lock);
spin_lock_init(&the_audio.dsp_lock);
init_waitqueue_head(&the_audio.wait);
wake_lock_init(&the_audio.wakelock, WAKE_LOCK_SUSPEND, "audio_pcm");
wake_lock_init(&the_audio.idlelock, WAKE_LOCK_IDLE, "audio_pcm_idle");
return misc_register(&audio_misc);
}
device_initcall(audio_init);
| gpl-2.0 |
drowningchild/kernel_JB_I9100ZSLS6 | arch/arm/mm/cache-l2x0.c | 455 | 9649 | /*
* arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
*
* Copyright (C) 2007 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static uint32_t l2x0_size;
static u32 l2x0_cache_id;
static unsigned int l2x0_sets;
static unsigned int l2x0_ways;
static inline bool is_pl310_rev(int rev)
{
return (l2x0_cache_id &
(L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
(L2X0_CACHE_ID_PART_L310 | rev);
}
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
/* wait for cache operation by line or way to complete */
while (readl_relaxed(reg) & mask)
;
}
#ifdef CONFIG_CACHE_PL310
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
/* cache operations by line are atomic on PL310 */
}
#else
#define cache_wait cache_wait_way
#endif
static inline void cache_sync(void)
{
void __iomem *base = l2x0_base;
#ifdef CONFIG_ARM_ERRATA_753970
/* write to an unmmapped register */
writel_relaxed(0, base + L2X0_DUMMY_REG);
#else
writel_relaxed(0, base + L2X0_CACHE_SYNC);
#endif
cache_wait(base + L2X0_CACHE_SYNC, 1);
}
static inline void l2x0_clean_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
}
static inline void l2x0_inv_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
cache_wait(base + L2X0_INV_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_INV_LINE_PA);
}
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
#define debug_writel(val) outer_cache.set_debug(val)
static void l2x0_set_debug(unsigned long val)
{
writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
}
#else
/* Optimised out for non-errata case */
static inline void debug_writel(unsigned long val)
{
}
#define l2x0_set_debug NULL
#endif
#ifdef CONFIG_PL310_ERRATA_588369
static inline void l2x0_flush_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
/* Clean by PA followed by Invalidate by PA */
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
cache_wait(base + L2X0_INV_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_INV_LINE_PA);
}
#else
static inline void l2x0_flush_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
}
#endif
static void l2x0_cache_sync(void)
{
unsigned long flags;
spin_lock_irqsave(&l2x0_lock, flags);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
#ifdef CONFIG_PL310_ERRATA_727915
static void l2x0_for_each_set_way(void __iomem *reg)
{
int set;
int way;
unsigned long flags;
for (way = 0; way < l2x0_ways; way++) {
spin_lock_irqsave(&l2x0_lock, flags);
for (set = 0; set < l2x0_sets; set++)
writel_relaxed((way << 28) | (set << 5), reg);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
}
#endif
static void __l2x0_flush_all(void)
{
debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
cache_sync();
debug_writel(0x00);
}
static void l2x0_flush_all(void)
{
unsigned long flags;
#ifdef CONFIG_PL310_ERRATA_727915
if (is_pl310_rev(REV_PL310_R2P0)) {
l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
return;
}
#endif
/* clean all ways */
spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_all(void)
{
unsigned long flags;
#ifdef CONFIG_PL310_ERRATA_727915
if (is_pl310_rev(REV_PL310_R2P0)) {
l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
return;
}
#endif
/* clean all ways */
spin_lock_irqsave(&l2x0_lock, flags);
debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
cache_sync();
debug_writel(0x00);
spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_all(void)
{
unsigned long flags;
/* invalidate all ways */
spin_lock_irqsave(&l2x0_lock, flags);
/* Invalidating when L2 is enabled is a nono */
BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_inv_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
spin_lock_irqsave(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
debug_writel(0x03);
l2x0_flush_line(start);
debug_writel(0x00);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
debug_writel(0x03);
l2x0_flush_line(end);
debug_writel(0x00);
}
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
l2x0_inv_line(start);
start += CACHE_LINE_SIZE;
}
if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_INV_LINE_PA, 1);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_clean_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
if ((end - start) >= l2x0_size) {
l2x0_clean_all();
return;
}
spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
l2x0_clean_line(start);
start += CACHE_LINE_SIZE;
}
if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_flush_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
if ((end - start) >= l2x0_size) {
l2x0_flush_all();
return;
}
spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
debug_writel(0x03);
while (start < blk_end) {
l2x0_flush_line(start);
start += CACHE_LINE_SIZE;
}
debug_writel(0x00);
if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2x0_disable(void)
{
unsigned long flags;
spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
writel_relaxed(0, l2x0_base + L2X0_CTRL);
dsb();
spin_unlock_irqrestore(&l2x0_lock, flags);
}
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
__u32 way_size = 0;
const char *type;
l2x0_base = base;
l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
l2x0_ways = 16;
else
l2x0_ways = 8;
type = "L310";
break;
case L2X0_CACHE_ID_PART_L210:
l2x0_ways = (aux >> 13) & 0xf;
type = "L210";
break;
default:
/* Assume unknown chips have 8 ways */
l2x0_ways = 8;
type = "L2x0 series";
break;
}
l2x0_way_mask = (1 << l2x0_ways) - 1;
/*
* L2 cache Size = Way size * Number of ways
*/
way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
way_size = SZ_1K << (way_size + 3);
l2x0_size = l2x0_ways * way_size;
l2x0_sets = way_size / CACHE_LINE_SIZE;
/*
* Check if l2x0 controller is already enabled.
* If you are booting from non-secure mode
* accessing the below registers will fault.
*/
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
/* l2x0 controller is disabled */
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
l2x0_inv_all();
/* enable L2X0 */
writel_relaxed(1, l2x0_base + L2X0_CTRL);
}
outer_cache.inv_range = l2x0_inv_range;
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
outer_cache.sync = l2x0_cache_sync;
outer_cache.flush_all = l2x0_flush_all;
outer_cache.inv_all = l2x0_inv_all;
outer_cache.clean_all = l2x0_clean_all;
outer_cache.disable = l2x0_disable;
outer_cache.set_debug = l2x0_set_debug;
printk(KERN_INFO "%s cache controller enabled\n", type);
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
l2x0_ways, l2x0_cache_id, aux, l2x0_size);
}
| gpl-2.0 |
fmaker/kernel_msm | fs/direct-io.c | 711 | 36938 | /*
* fs/direct-io.c
*
* Copyright (C) 2002, Linus Torvalds.
*
* O_DIRECT
*
* 04Jul2002 Andrew Morton
* Initial version
* 11Sep2002 janetinc@us.ibm.com
* added readv/writev support.
* 29Oct2002 Andrew Morton
* rewrote bio_add_page() support.
* 30Oct2002 pbadari@us.ibm.com
* added support for non-aligned IO.
* 06Nov2002 pbadari@us.ibm.com
* added asynchronous IO support.
* 21Jul2003 nathans@sgi.com
* added IO completion notifier.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/bio.h>
#include <linux/wait.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/rwsem.h>
#include <linux/uio.h>
#include <asm/atomic.h>
/*
* How many user pages to map in one call to get_user_pages(). This determines
* the size of a structure on the stack.
*/
#define DIO_PAGES 64
/*
* This code generally works in units of "dio_blocks". A dio_block is
* somewhere between the hard sector size and the filesystem block size. it
* is determined on a per-invocation basis. When talking to the filesystem
* we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
* down by dio->blkfactor. Similarly, fs-blocksize quantities are converted
* to bio_block quantities by shifting left by blkfactor.
*
* If blkfactor is zero then the user's request was aligned to the filesystem's
* blocksize.
*/
struct dio {
/* BIO submission state */
struct bio *bio; /* bio under assembly */
struct inode *inode;
int rw;
loff_t i_size; /* i_size when submitted */
int flags; /* doesn't change */
unsigned blkbits; /* doesn't change */
unsigned blkfactor; /* When we're using an alignment which
is finer than the filesystem's soft
blocksize, this specifies how much
finer. blkfactor=2 means 1/4-block
alignment. Does not change */
unsigned start_zero_done; /* flag: sub-blocksize zeroing has
been performed at the start of a
write */
int pages_in_io; /* approximate total IO pages */
size_t size; /* total request size (doesn't change)*/
sector_t block_in_file; /* Current offset into the underlying
file in dio_block units. */
unsigned blocks_available; /* At block_in_file. changes */
sector_t final_block_in_request;/* doesn't change */
unsigned first_block_in_page; /* doesn't change, Used only once */
int boundary; /* prev block is at a boundary */
int reap_counter; /* rate limit reaping */
get_block_t *get_block; /* block mapping function */
dio_iodone_t *end_io; /* IO completion function */
dio_submit_t *submit_io; /* IO submition function */
loff_t logical_offset_in_bio; /* current first logical block in bio */
sector_t final_block_in_bio; /* current final block in bio + 1 */
sector_t next_block_for_io; /* next block to be put under IO,
in dio_blocks units */
struct buffer_head map_bh; /* last get_block() result */
/*
* Deferred addition of a page to the dio. These variables are
* private to dio_send_cur_page(), submit_page_section() and
* dio_bio_add_page().
*/
struct page *cur_page; /* The page */
unsigned cur_page_offset; /* Offset into it, in bytes */
unsigned cur_page_len; /* Nr of bytes at cur_page_offset */
sector_t cur_page_block; /* Where it starts */
loff_t cur_page_fs_offset; /* Offset in file */
/* BIO completion state */
spinlock_t bio_lock; /* protects BIO fields below */
unsigned long refcount; /* direct_io_worker() and bios */
struct bio *bio_list; /* singly linked via bi_private */
struct task_struct *waiter; /* waiting task (NULL if none) */
/* AIO related stuff */
struct kiocb *iocb; /* kiocb */
int is_async; /* is IO async ? */
int io_error; /* IO error in completion path */
ssize_t result; /* IO result */
/*
* Page fetching state. These variables belong to dio_refill_pages().
*/
int curr_page; /* changes */
int total_pages; /* doesn't change */
unsigned long curr_user_address;/* changes */
/*
* Page queue. These variables belong to dio_refill_pages() and
* dio_get_page().
*/
unsigned head; /* next page to process */
unsigned tail; /* last valid page + 1 */
int page_errors; /* errno from get_user_pages() */
/*
* pages[] (and any fields placed after it) are not zeroed out at
* allocation time. Don't add new fields after pages[] unless you
* wish that they not be zeroed.
*/
struct page *pages[DIO_PAGES]; /* page buffer */
};
/*
* How many pages are in the queue?
*/
static inline unsigned dio_pages_present(struct dio *dio)
{
return dio->tail - dio->head;
}
/*
* Go grab and pin some userspace pages. Typically we'll get 64 at a time.
*/
static int dio_refill_pages(struct dio *dio)
{
int ret;
int nr_pages;
nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES);
ret = get_user_pages_fast(
dio->curr_user_address, /* Where from? */
nr_pages, /* How many pages? */
dio->rw == READ, /* Write to memory? */
&dio->pages[0]); /* Put results here */
if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) {
struct page *page = ZERO_PAGE(0);
/*
* A memory fault, but the filesystem has some outstanding
* mapped blocks. We need to use those blocks up to avoid
* leaking stale data in the file.
*/
if (dio->page_errors == 0)
dio->page_errors = ret;
page_cache_get(page);
dio->pages[0] = page;
dio->head = 0;
dio->tail = 1;
ret = 0;
goto out;
}
if (ret >= 0) {
dio->curr_user_address += ret * PAGE_SIZE;
dio->curr_page += ret;
dio->head = 0;
dio->tail = ret;
ret = 0;
}
out:
return ret;
}
/*
* Get another userspace page. Returns an ERR_PTR on error. Pages are
* buffered inside the dio so that we can call get_user_pages() against a
* decent number of pages, less frequently. To provide nicer use of the
* L1 cache.
*/
static struct page *dio_get_page(struct dio *dio)
{
if (dio_pages_present(dio) == 0) {
int ret;
ret = dio_refill_pages(dio);
if (ret)
return ERR_PTR(ret);
BUG_ON(dio_pages_present(dio) == 0);
}
return dio->pages[dio->head++];
}
/**
* dio_complete() - called when all DIO BIO I/O has been completed
* @offset: the byte offset in the file of the completed operation
*
* This releases locks as dictated by the locking type, lets interested parties
* know that a DIO operation has completed, and calculates the resulting return
* code for the operation.
*
* It lets the filesystem know if it registered an interest earlier via
* get_block. Pass the private field of the map buffer_head so that
* filesystems can use it to hold additional state between get_block calls and
* dio_complete.
*/
static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async)
{
ssize_t transferred = 0;
/*
* AIO submission can race with bio completion to get here while
* expecting to have the last io completed by bio completion.
* In that case -EIOCBQUEUED is in fact not an error we want
* to preserve through this call.
*/
if (ret == -EIOCBQUEUED)
ret = 0;
if (dio->result) {
transferred = dio->result;
/* Check for short read case */
if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
transferred = dio->i_size - offset;
}
if (ret == 0)
ret = dio->page_errors;
if (ret == 0)
ret = dio->io_error;
if (ret == 0)
ret = transferred;
if (dio->end_io && dio->result) {
dio->end_io(dio->iocb, offset, transferred,
dio->map_bh.b_private, ret, is_async);
} else if (is_async) {
aio_complete(dio->iocb, ret, 0);
}
if (dio->flags & DIO_LOCKING)
/* lockdep: non-owner release */
up_read_non_owner(&dio->inode->i_alloc_sem);
return ret;
}
static int dio_bio_complete(struct dio *dio, struct bio *bio);
/*
* Asynchronous IO callback.
*/
static void dio_bio_end_aio(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long remaining;
unsigned long flags;
/* cleanup the bio */
dio_bio_complete(dio, bio);
spin_lock_irqsave(&dio->bio_lock, flags);
remaining = --dio->refcount;
if (remaining == 1 && dio->waiter)
wake_up_process(dio->waiter);
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (remaining == 0) {
dio_complete(dio, dio->iocb->ki_pos, 0, true);
kfree(dio);
}
}
/*
* The BIO completion handler simply queues the BIO up for the process-context
* handler.
*
* During I/O bi_private points at the dio. After I/O, bi_private is used to
* implement a singly-linked list of completed BIOs, at dio->bio_list.
*/
static void dio_bio_end_io(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
unsigned long flags;
spin_lock_irqsave(&dio->bio_lock, flags);
bio->bi_private = dio->bio_list;
dio->bio_list = bio;
if (--dio->refcount == 1 && dio->waiter)
wake_up_process(dio->waiter);
spin_unlock_irqrestore(&dio->bio_lock, flags);
}
/**
* dio_end_io - handle the end io action for the given bio
* @bio: The direct io bio thats being completed
* @error: Error if there was one
*
* This is meant to be called by any filesystem that uses their own dio_submit_t
* so that the DIO specific endio actions are dealt with after the filesystem
* has done it's completion work.
*/
void dio_end_io(struct bio *bio, int error)
{
struct dio *dio = bio->bi_private;
if (dio->is_async)
dio_bio_end_aio(bio, error);
else
dio_bio_end_io(bio, error);
}
EXPORT_SYMBOL_GPL(dio_end_io);
static int
dio_bio_alloc(struct dio *dio, struct block_device *bdev,
sector_t first_sector, int nr_vecs)
{
struct bio *bio;
bio = bio_alloc(GFP_KERNEL, nr_vecs);
bio->bi_bdev = bdev;
bio->bi_sector = first_sector;
if (dio->is_async)
bio->bi_end_io = dio_bio_end_aio;
else
bio->bi_end_io = dio_bio_end_io;
dio->bio = bio;
dio->logical_offset_in_bio = dio->cur_page_fs_offset;
return 0;
}
/*
* In the AIO read case we speculatively dirty the pages before starting IO.
* During IO completion, any of these pages which happen to have been written
* back will be redirtied by bio_check_pages_dirty().
*
* bios hold a dio reference between submit_bio and ->end_io.
*/
static void dio_bio_submit(struct dio *dio)
{
struct bio *bio = dio->bio;
unsigned long flags;
bio->bi_private = dio;
spin_lock_irqsave(&dio->bio_lock, flags);
dio->refcount++;
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (dio->is_async && dio->rw == READ)
bio_set_pages_dirty(bio);
if (dio->submit_io)
dio->submit_io(dio->rw, bio, dio->inode,
dio->logical_offset_in_bio);
else
submit_bio(dio->rw, bio);
dio->bio = NULL;
dio->boundary = 0;
dio->logical_offset_in_bio = 0;
}
/*
* Release any resources in case of a failure
*/
static void dio_cleanup(struct dio *dio)
{
while (dio_pages_present(dio))
page_cache_release(dio_get_page(dio));
}
/*
* Wait for the next BIO to complete. Remove it and return it. NULL is
* returned once all BIOs have been completed. This must only be called once
* all bios have been issued so that dio->refcount can only decrease. This
* requires that that the caller hold a reference on the dio.
*/
static struct bio *dio_await_one(struct dio *dio)
{
unsigned long flags;
struct bio *bio = NULL;
spin_lock_irqsave(&dio->bio_lock, flags);
/*
* Wait as long as the list is empty and there are bios in flight. bio
* completion drops the count, maybe adds to the list, and wakes while
* holding the bio_lock so we don't need set_current_state()'s barrier
* and can call it after testing our condition.
*/
while (dio->refcount > 1 && dio->bio_list == NULL) {
__set_current_state(TASK_UNINTERRUPTIBLE);
dio->waiter = current;
spin_unlock_irqrestore(&dio->bio_lock, flags);
io_schedule();
/* wake up sets us TASK_RUNNING */
spin_lock_irqsave(&dio->bio_lock, flags);
dio->waiter = NULL;
}
if (dio->bio_list) {
bio = dio->bio_list;
dio->bio_list = bio->bi_private;
}
spin_unlock_irqrestore(&dio->bio_lock, flags);
return bio;
}
/*
* Process one completed BIO. No locks are held.
*/
static int dio_bio_complete(struct dio *dio, struct bio *bio)
{
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct bio_vec *bvec = bio->bi_io_vec;
int page_no;
if (!uptodate)
dio->io_error = -EIO;
if (dio->is_async && dio->rw == READ) {
bio_check_pages_dirty(bio); /* transfers ownership */
} else {
for (page_no = 0; page_no < bio->bi_vcnt; page_no++) {
struct page *page = bvec[page_no].bv_page;
if (dio->rw == READ && !PageCompound(page))
set_page_dirty_lock(page);
page_cache_release(page);
}
bio_put(bio);
}
return uptodate ? 0 : -EIO;
}
/*
* Wait on and process all in-flight BIOs. This must only be called once
* all bios have been issued so that the refcount can only decrease.
* This just waits for all bios to make it through dio_bio_complete. IO
* errors are propagated through dio->io_error and should be propagated via
* dio_complete().
*/
static void dio_await_completion(struct dio *dio)
{
struct bio *bio;
do {
bio = dio_await_one(dio);
if (bio)
dio_bio_complete(dio, bio);
} while (bio);
}
/*
* A really large O_DIRECT read or write can generate a lot of BIOs. So
* to keep the memory consumption sane we periodically reap any completed BIOs
* during the BIO generation phase.
*
* This also helps to limit the peak amount of pinned userspace memory.
*/
static int dio_bio_reap(struct dio *dio)
{
int ret = 0;
if (dio->reap_counter++ >= 64) {
while (dio->bio_list) {
unsigned long flags;
struct bio *bio;
int ret2;
spin_lock_irqsave(&dio->bio_lock, flags);
bio = dio->bio_list;
dio->bio_list = bio->bi_private;
spin_unlock_irqrestore(&dio->bio_lock, flags);
ret2 = dio_bio_complete(dio, bio);
if (ret == 0)
ret = ret2;
}
dio->reap_counter = 0;
}
return ret;
}
/*
* Call into the fs to map some more disk blocks. We record the current number
* of available blocks at dio->blocks_available. These are in units of the
* fs blocksize, (1 << inode->i_blkbits).
*
* The fs is allowed to map lots of blocks at once. If it wants to do that,
* it uses the passed inode-relative block number as the file offset, as usual.
*
* get_block() is passed the number of i_blkbits-sized blocks which direct_io
* has remaining to do. The fs should not map more than this number of blocks.
*
* If the fs has mapped a lot of blocks, it should populate bh->b_size to
* indicate how much contiguous disk space has been made available at
* bh->b_blocknr.
*
* If *any* of the mapped blocks are new, then the fs must set buffer_new().
* This isn't very efficient...
*
* In the case of filesystem holes: the fs may return an arbitrarily-large
* hole by returning an appropriate value in b_size and by clearing
* buffer_mapped(). However the direct-io code will only process holes one
* block at a time - it will repeatedly call get_block() as it walks the hole.
*/
static int get_more_blocks(struct dio *dio)
{
int ret;
struct buffer_head *map_bh = &dio->map_bh;
sector_t fs_startblk; /* Into file, in filesystem-sized blocks */
unsigned long fs_count; /* Number of filesystem-sized blocks */
unsigned long dio_count;/* Number of dio_block-sized blocks */
unsigned long blkmask;
int create;
/*
* If there was a memory error and we've overwritten all the
* mapped blocks then we can now return that memory error
*/
ret = dio->page_errors;
if (ret == 0) {
BUG_ON(dio->block_in_file >= dio->final_block_in_request);
fs_startblk = dio->block_in_file >> dio->blkfactor;
dio_count = dio->final_block_in_request - dio->block_in_file;
fs_count = dio_count >> dio->blkfactor;
blkmask = (1 << dio->blkfactor) - 1;
if (dio_count & blkmask)
fs_count++;
map_bh->b_state = 0;
map_bh->b_size = fs_count << dio->inode->i_blkbits;
/*
* For writes inside i_size on a DIO_SKIP_HOLES filesystem we
* forbid block creations: only overwrites are permitted.
* We will return early to the caller once we see an
* unmapped buffer head returned, and the caller will fall
* back to buffered I/O.
*
* Otherwise the decision is left to the get_blocks method,
* which may decide to handle it or also return an unmapped
* buffer head.
*/
create = dio->rw & WRITE;
if (dio->flags & DIO_SKIP_HOLES) {
if (dio->block_in_file < (i_size_read(dio->inode) >>
dio->blkbits))
create = 0;
}
ret = (*dio->get_block)(dio->inode, fs_startblk,
map_bh, create);
}
return ret;
}
/*
* There is no bio. Make one now.
*/
static int dio_new_bio(struct dio *dio, sector_t start_sector)
{
sector_t sector;
int ret, nr_pages;
ret = dio_bio_reap(dio);
if (ret)
goto out;
sector = start_sector << (dio->blkbits - 9);
nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
BUG_ON(nr_pages <= 0);
ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
dio->boundary = 0;
out:
return ret;
}
/*
* Attempt to put the current chunk of 'cur_page' into the current BIO. If
* that was successful then update final_block_in_bio and take a ref against
* the just-added page.
*
* Return zero on success. Non-zero means the caller needs to start a new BIO.
*/
static int dio_bio_add_page(struct dio *dio)
{
int ret;
ret = bio_add_page(dio->bio, dio->cur_page,
dio->cur_page_len, dio->cur_page_offset);
if (ret == dio->cur_page_len) {
/*
* Decrement count only, if we are done with this page
*/
if ((dio->cur_page_len + dio->cur_page_offset) == PAGE_SIZE)
dio->pages_in_io--;
page_cache_get(dio->cur_page);
dio->final_block_in_bio = dio->cur_page_block +
(dio->cur_page_len >> dio->blkbits);
ret = 0;
} else {
ret = 1;
}
return ret;
}
/*
* Put cur_page under IO. The section of cur_page which is described by
* cur_page_offset,cur_page_len is put into a BIO. The section of cur_page
* starts on-disk at cur_page_block.
*
* We take a ref against the page here (on behalf of its presence in the bio).
*
* The caller of this function is responsible for removing cur_page from the
* dio, and for dropping the refcount which came from that presence.
*/
static int dio_send_cur_page(struct dio *dio)
{
int ret = 0;
if (dio->bio) {
loff_t cur_offset = dio->cur_page_fs_offset;
loff_t bio_next_offset = dio->logical_offset_in_bio +
dio->bio->bi_size;
/*
* See whether this new request is contiguous with the old.
*
* Btrfs cannot handl having logically non-contiguous requests
* submitted. For exmple if you have
*
* Logical: [0-4095][HOLE][8192-12287]
* Phyiscal: [0-4095] [4096-8181]
*
* We cannot submit those pages together as one BIO. So if our
* current logical offset in the file does not equal what would
* be the next logical offset in the bio, submit the bio we
* have.
*/
if (dio->final_block_in_bio != dio->cur_page_block ||
cur_offset != bio_next_offset)
dio_bio_submit(dio);
/*
* Submit now if the underlying fs is about to perform a
* metadata read
*/
else if (dio->boundary)
dio_bio_submit(dio);
}
if (dio->bio == NULL) {
ret = dio_new_bio(dio, dio->cur_page_block);
if (ret)
goto out;
}
if (dio_bio_add_page(dio) != 0) {
dio_bio_submit(dio);
ret = dio_new_bio(dio, dio->cur_page_block);
if (ret == 0) {
ret = dio_bio_add_page(dio);
BUG_ON(ret != 0);
}
}
out:
return ret;
}
/*
* An autonomous function to put a chunk of a page under deferred IO.
*
* The caller doesn't actually know (or care) whether this piece of page is in
* a BIO, or is under IO or whatever. We just take care of all possible
* situations here. The separation between the logic of do_direct_IO() and
* that of submit_page_section() is important for clarity. Please don't break.
*
* The chunk of page starts on-disk at blocknr.
*
* We perform deferred IO, by recording the last-submitted page inside our
* private part of the dio structure. If possible, we just expand the IO
* across that page here.
*
* If that doesn't work out then we put the old page into the bio and add this
* page to the dio instead.
*/
static int
submit_page_section(struct dio *dio, struct page *page,
unsigned offset, unsigned len, sector_t blocknr)
{
int ret = 0;
if (dio->rw & WRITE) {
/*
* Read accounting is performed in submit_bio()
*/
task_io_account_write(len);
}
/*
* Can we just grow the current page's presence in the dio?
*/
if ( (dio->cur_page == page) &&
(dio->cur_page_offset + dio->cur_page_len == offset) &&
(dio->cur_page_block +
(dio->cur_page_len >> dio->blkbits) == blocknr)) {
dio->cur_page_len += len;
/*
* If dio->boundary then we want to schedule the IO now to
* avoid metadata seeks.
*/
if (dio->boundary) {
ret = dio_send_cur_page(dio);
page_cache_release(dio->cur_page);
dio->cur_page = NULL;
}
goto out;
}
/*
* If there's a deferred page already there then send it.
*/
if (dio->cur_page) {
ret = dio_send_cur_page(dio);
page_cache_release(dio->cur_page);
dio->cur_page = NULL;
if (ret)
goto out;
}
page_cache_get(page); /* It is in dio */
dio->cur_page = page;
dio->cur_page_offset = offset;
dio->cur_page_len = len;
dio->cur_page_block = blocknr;
dio->cur_page_fs_offset = dio->block_in_file << dio->blkbits;
out:
return ret;
}
/*
* Clean any dirty buffers in the blockdev mapping which alias newly-created
* file blocks. Only called for S_ISREG files - blockdevs do not set
* buffer_new
*/
static void clean_blockdev_aliases(struct dio *dio)
{
unsigned i;
unsigned nblocks;
nblocks = dio->map_bh.b_size >> dio->inode->i_blkbits;
for (i = 0; i < nblocks; i++) {
unmap_underlying_metadata(dio->map_bh.b_bdev,
dio->map_bh.b_blocknr + i);
}
}
/*
* If we are not writing the entire block and get_block() allocated
* the block for us, we need to fill-in the unused portion of the
* block with zeros. This happens only if user-buffer, fileoffset or
* io length is not filesystem block-size multiple.
*
* `end' is zero if we're doing the start of the IO, 1 at the end of the
* IO.
*/
static void dio_zero_block(struct dio *dio, int end)
{
unsigned dio_blocks_per_fs_block;
unsigned this_chunk_blocks; /* In dio_blocks */
unsigned this_chunk_bytes;
struct page *page;
dio->start_zero_done = 1;
if (!dio->blkfactor || !buffer_new(&dio->map_bh))
return;
dio_blocks_per_fs_block = 1 << dio->blkfactor;
this_chunk_blocks = dio->block_in_file & (dio_blocks_per_fs_block - 1);
if (!this_chunk_blocks)
return;
/*
* We need to zero out part of an fs block. It is either at the
* beginning or the end of the fs block.
*/
if (end)
this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;
this_chunk_bytes = this_chunk_blocks << dio->blkbits;
page = ZERO_PAGE(0);
if (submit_page_section(dio, page, 0, this_chunk_bytes,
dio->next_block_for_io))
return;
dio->next_block_for_io += this_chunk_blocks;
}
/*
* Walk the user pages, and the file, mapping blocks to disk and generating
* a sequence of (page,offset,len,block) mappings. These mappings are injected
* into submit_page_section(), which takes care of the next stage of submission
*
* Direct IO against a blockdev is different from a file. Because we can
* happily perform page-sized but 512-byte aligned IOs. It is important that
* blockdev IO be able to have fine alignment and large sizes.
*
* So what we do is to permit the ->get_block function to populate bh.b_size
* with the size of IO which is permitted at this offset and this i_blkbits.
*
* For best results, the blockdev should be set up with 512-byte i_blkbits and
* it should set b_size to PAGE_SIZE or more inside get_block(). This gives
* fine alignment but still allows this function to work in PAGE_SIZE units.
*/
static int do_direct_IO(struct dio *dio)
{
const unsigned blkbits = dio->blkbits;
const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
struct page *page;
unsigned block_in_page;
struct buffer_head *map_bh = &dio->map_bh;
int ret = 0;
/* The I/O can start at any block offset within the first page */
block_in_page = dio->first_block_in_page;
while (dio->block_in_file < dio->final_block_in_request) {
page = dio_get_page(dio);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
while (block_in_page < blocks_per_page) {
unsigned offset_in_page = block_in_page << blkbits;
unsigned this_chunk_bytes; /* # of bytes mapped */
unsigned this_chunk_blocks; /* # of blocks */
unsigned u;
if (dio->blocks_available == 0) {
/*
* Need to go and map some more disk
*/
unsigned long blkmask;
unsigned long dio_remainder;
ret = get_more_blocks(dio);
if (ret) {
page_cache_release(page);
goto out;
}
if (!buffer_mapped(map_bh))
goto do_holes;
dio->blocks_available =
map_bh->b_size >> dio->blkbits;
dio->next_block_for_io =
map_bh->b_blocknr << dio->blkfactor;
if (buffer_new(map_bh))
clean_blockdev_aliases(dio);
if (!dio->blkfactor)
goto do_holes;
blkmask = (1 << dio->blkfactor) - 1;
dio_remainder = (dio->block_in_file & blkmask);
/*
* If we are at the start of IO and that IO
* starts partway into a fs-block,
* dio_remainder will be non-zero. If the IO
* is a read then we can simply advance the IO
* cursor to the first block which is to be
* read. But if the IO is a write and the
* block was newly allocated we cannot do that;
* the start of the fs block must be zeroed out
* on-disk
*/
if (!buffer_new(map_bh))
dio->next_block_for_io += dio_remainder;
dio->blocks_available -= dio_remainder;
}
do_holes:
/* Handle holes */
if (!buffer_mapped(map_bh)) {
loff_t i_size_aligned;
/* AKPM: eargh, -ENOTBLK is a hack */
if (dio->rw & WRITE) {
page_cache_release(page);
return -ENOTBLK;
}
/*
* Be sure to account for a partial block as the
* last block in the file
*/
i_size_aligned = ALIGN(i_size_read(dio->inode),
1 << blkbits);
if (dio->block_in_file >=
i_size_aligned >> blkbits) {
/* We hit eof */
page_cache_release(page);
goto out;
}
zero_user(page, block_in_page << blkbits,
1 << blkbits);
dio->block_in_file++;
block_in_page++;
goto next_block;
}
/*
* If we're performing IO which has an alignment which
* is finer than the underlying fs, go check to see if
* we must zero out the start of this block.
*/
if (unlikely(dio->blkfactor && !dio->start_zero_done))
dio_zero_block(dio, 0);
/*
* Work out, in this_chunk_blocks, how much disk we
* can add to this page
*/
this_chunk_blocks = dio->blocks_available;
u = (PAGE_SIZE - offset_in_page) >> blkbits;
if (this_chunk_blocks > u)
this_chunk_blocks = u;
u = dio->final_block_in_request - dio->block_in_file;
if (this_chunk_blocks > u)
this_chunk_blocks = u;
this_chunk_bytes = this_chunk_blocks << blkbits;
BUG_ON(this_chunk_bytes == 0);
dio->boundary = buffer_boundary(map_bh);
ret = submit_page_section(dio, page, offset_in_page,
this_chunk_bytes, dio->next_block_for_io);
if (ret) {
page_cache_release(page);
goto out;
}
dio->next_block_for_io += this_chunk_blocks;
dio->block_in_file += this_chunk_blocks;
block_in_page += this_chunk_blocks;
dio->blocks_available -= this_chunk_blocks;
next_block:
BUG_ON(dio->block_in_file > dio->final_block_in_request);
if (dio->block_in_file == dio->final_block_in_request)
break;
}
/* Drop the ref which was taken in get_user_pages() */
page_cache_release(page);
block_in_page = 0;
}
out:
return ret;
}
/*
* Releases both i_mutex and i_alloc_sem
*/
static ssize_t
direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
const struct iovec *iov, loff_t offset, unsigned long nr_segs,
unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, struct dio *dio)
{
unsigned long user_addr;
unsigned long flags;
int seg;
ssize_t ret = 0;
ssize_t ret2;
size_t bytes;
dio->inode = inode;
dio->rw = rw;
dio->blkbits = blkbits;
dio->blkfactor = inode->i_blkbits - blkbits;
dio->block_in_file = offset >> blkbits;
dio->get_block = get_block;
dio->end_io = end_io;
dio->submit_io = submit_io;
dio->final_block_in_bio = -1;
dio->next_block_for_io = -1;
dio->iocb = iocb;
dio->i_size = i_size_read(inode);
spin_lock_init(&dio->bio_lock);
dio->refcount = 1;
/*
* In case of non-aligned buffers, we may need 2 more
* pages since we need to zero out first and last block.
*/
if (unlikely(dio->blkfactor))
dio->pages_in_io = 2;
for (seg = 0; seg < nr_segs; seg++) {
user_addr = (unsigned long)iov[seg].iov_base;
dio->pages_in_io +=
((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE
- user_addr/PAGE_SIZE);
}
for (seg = 0; seg < nr_segs; seg++) {
user_addr = (unsigned long)iov[seg].iov_base;
dio->size += bytes = iov[seg].iov_len;
/* Index into the first page of the first block */
dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
dio->final_block_in_request = dio->block_in_file +
(bytes >> blkbits);
/* Page fetching state */
dio->head = 0;
dio->tail = 0;
dio->curr_page = 0;
dio->total_pages = 0;
if (user_addr & (PAGE_SIZE-1)) {
dio->total_pages++;
bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
}
dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
dio->curr_user_address = user_addr;
ret = do_direct_IO(dio);
dio->result += iov[seg].iov_len -
((dio->final_block_in_request - dio->block_in_file) <<
blkbits);
if (ret) {
dio_cleanup(dio);
break;
}
} /* end iovec loop */
if (ret == -ENOTBLK) {
/*
* The remaining part of the request will be
* be handled by buffered I/O when we return
*/
ret = 0;
}
/*
* There may be some unwritten disk at the end of a part-written
* fs-block-sized block. Go zero that now.
*/
dio_zero_block(dio, 1);
if (dio->cur_page) {
ret2 = dio_send_cur_page(dio);
if (ret == 0)
ret = ret2;
page_cache_release(dio->cur_page);
dio->cur_page = NULL;
}
if (dio->bio)
dio_bio_submit(dio);
/*
* It is possible that, we return short IO due to end of file.
* In that case, we need to release all the pages we got hold on.
*/
dio_cleanup(dio);
/*
* All block lookups have been performed. For READ requests
* we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
if (rw == READ && (dio->flags & DIO_LOCKING))
mutex_unlock(&dio->inode->i_mutex);
/*
* The only time we want to leave bios in flight is when a successful
* partial aio read or full aio write have been setup. In that case
* bio completion will call aio_complete. The only time it's safe to
* call aio_complete is when we return -EIOCBQUEUED, so we key on that.
* This had *better* be the only place that raises -EIOCBQUEUED.
*/
BUG_ON(ret == -EIOCBQUEUED);
if (dio->is_async && ret == 0 && dio->result &&
((rw & READ) || (dio->result == dio->size)))
ret = -EIOCBQUEUED;
if (ret != -EIOCBQUEUED) {
/* All IO is now issued, send it on its way */
blk_run_address_space(inode->i_mapping);
dio_await_completion(dio);
}
/*
* Sync will always be dropping the final ref and completing the
* operation. AIO can if it was a broken operation described above or
* in fact if all the bios race to complete before we get here. In
* that case dio_complete() translates the EIOCBQUEUED into the proper
* return code that the caller will hand to aio_complete().
*
* This is managed by the bio_lock instead of being an atomic_t so that
* completion paths can drop their ref and use the remaining count to
* decide to wake the submission path atomically.
*/
spin_lock_irqsave(&dio->bio_lock, flags);
ret2 = --dio->refcount;
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (ret2 == 0) {
ret = dio_complete(dio, offset, ret, false);
kfree(dio);
} else
BUG_ON(ret != -EIOCBQUEUED);
return ret;
}
ssize_t
__blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
int seg;
size_t size;
unsigned long addr;
unsigned blkbits = inode->i_blkbits;
unsigned bdev_blkbits = 0;
unsigned blocksize_mask = (1 << blkbits) - 1;
ssize_t retval = -EINVAL;
loff_t end = offset;
struct dio *dio;
if (rw & WRITE)
rw = WRITE_ODIRECT_PLUG;
if (bdev)
bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
if (offset & blocksize_mask) {
if (bdev)
blkbits = bdev_blkbits;
blocksize_mask = (1 << blkbits) - 1;
if (offset & blocksize_mask)
goto out;
}
/* Check the memory alignment. Blocks cannot straddle pages */
for (seg = 0; seg < nr_segs; seg++) {
addr = (unsigned long)iov[seg].iov_base;
size = iov[seg].iov_len;
end += size;
if ((addr & blocksize_mask) || (size & blocksize_mask)) {
if (bdev)
blkbits = bdev_blkbits;
blocksize_mask = (1 << blkbits) - 1;
if ((addr & blocksize_mask) || (size & blocksize_mask))
goto out;
}
}
dio = kmalloc(sizeof(*dio), GFP_KERNEL);
retval = -ENOMEM;
if (!dio)
goto out;
/*
* Believe it or not, zeroing out the page array caused a .5%
* performance regression in a database benchmark. So, we take
* care to only zero out what's needed.
*/
memset(dio, 0, offsetof(struct dio, pages));
dio->flags = flags;
if (dio->flags & DIO_LOCKING) {
/* watch out for a 0 len io from a tricksy fs */
if (rw == READ && end > offset) {
struct address_space *mapping =
iocb->ki_filp->f_mapping;
/* will be released by direct_io_worker */
mutex_lock(&inode->i_mutex);
retval = filemap_write_and_wait_range(mapping, offset,
end - 1);
if (retval) {
mutex_unlock(&inode->i_mutex);
kfree(dio);
goto out;
}
}
/*
* Will be released at I/O completion, possibly in a
* different thread.
*/
down_read_non_owner(&inode->i_alloc_sem);
}
/*
* For file extending writes updating i_size before data
* writeouts complete can expose uninitialized blocks. So
* even for AIO, we need to wait for i/o to complete before
* returning in this case.
*/
dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
(end > i_size_read(inode)));
retval = direct_io_worker(rw, iocb, inode, iov, offset,
nr_segs, blkbits, get_block, end_io,
submit_io, dio);
out:
return retval;
}
EXPORT_SYMBOL(__blockdev_direct_IO_newtrunc);
/*
* This is a library function for use by filesystem drivers.
*
* The locking rules are governed by the flags parameter:
* - if the flags value contains DIO_LOCKING we use a fancy locking
* scheme for dumb filesystems.
* For writes this function is called under i_mutex and returns with
* i_mutex held, for reads, i_mutex is not held on entry, but it is
* taken and dropped again before returning.
* For reads and writes i_alloc_sem is taken in shared mode and released
* on I/O completion (which may happen asynchronously after returning to
* the caller).
*
* - if the flags value does NOT contain DIO_LOCKING we don't use any
* internal locking but rather rely on the filesystem to synchronize
* direct I/O reads/writes versus each other and truncate.
* For reads and writes both i_mutex and i_alloc_sem are not held on
* entry and are never taken.
*/
ssize_t
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
dio_submit_t submit_io, int flags)
{
ssize_t retval;
retval = __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov,
offset, nr_segs, get_block, end_io, submit_io, flags);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again for DIO_LOCKING.
* NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this in
* their own manner. This is a further example of where the old
* truncate sequence is inadequate.
*
* NOTE: filesystems with their own locking have to handle this
* on their own.
*/
if (flags & DIO_LOCKING) {
if (unlikely((rw & WRITE) && retval < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = offset + iov_length(iov, nr_segs);
if (end > isize)
vmtruncate(inode, isize);
}
}
return retval;
}
EXPORT_SYMBOL(__blockdev_direct_IO);
| gpl-2.0 |
El-Nath/bidji-find5 | drivers/net/usb/rmnet_usb_ctrl.c | 1223 | 30570 | /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/uaccess.h>
#include <linux/termios.h>
#include <linux/poll.h>
#include <linux/ratelimit.h>
#include <linux/debugfs.h>
#include "rmnet_usb.h"
static char *rmnet_dev_names[MAX_RMNET_DEVS] = {"hsicctl"};
module_param_array(rmnet_dev_names, charp, NULL, S_IRUGO | S_IWUSR);
#define DEFAULT_READ_URB_LENGTH 0x1000
#define UNLINK_TIMEOUT_MS 500 /*random value*/
/*Output control lines.*/
#define ACM_CTRL_DTR BIT(0)
#define ACM_CTRL_RTS BIT(1)
/*Input control lines.*/
#define ACM_CTRL_DSR BIT(0)
#define ACM_CTRL_CTS BIT(1)
#define ACM_CTRL_RI BIT(2)
#define ACM_CTRL_CD BIT(3)
/* polling interval for Interrupt ep */
#define HS_INTERVAL 7
#define FS_LS_INTERVAL 3
/*echo modem_wait > /sys/class/hsicctl/hsicctlx/modem_wait*/
static ssize_t modem_wait_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t n)
{
unsigned int mdm_wait;
struct rmnet_ctrl_dev *dev = dev_get_drvdata(d);
if (!dev)
return -ENODEV;
sscanf(buf, "%u", &mdm_wait);
dev->mdm_wait_timeout = mdm_wait;
return n;
}
static ssize_t modem_wait_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct rmnet_ctrl_dev *dev = dev_get_drvdata(d);
if (!dev)
return -ENODEV;
return snprintf(buf, PAGE_SIZE, "%u\n", dev->mdm_wait_timeout);
}
static DEVICE_ATTR(modem_wait, 0664, modem_wait_show, modem_wait_store);
static int ctl_msg_dbg_mask;
module_param_named(dump_ctrl_msg, ctl_msg_dbg_mask, int,
S_IRUGO | S_IWUSR | S_IWGRP);
enum {
MSM_USB_CTL_DEBUG = 1U << 0,
MSM_USB_CTL_DUMP_BUFFER = 1U << 1,
};
#define DUMP_BUFFER(prestr, cnt, buf) \
do { \
if (ctl_msg_dbg_mask & MSM_USB_CTL_DUMP_BUFFER) \
print_hex_dump(KERN_INFO, prestr, DUMP_PREFIX_NONE, \
16, 1, buf, cnt, false); \
} while (0)
#define DBG(x...) \
do { \
if (ctl_msg_dbg_mask & MSM_USB_CTL_DEBUG) \
pr_info(x); \
} while (0)
/* passed in rmnet_usb_ctrl_init */
static int num_devs;
static int insts_per_dev;
/* dynamically allocated 2-D array of num_devs*insts_per_dev ctrl_devs */
static struct rmnet_ctrl_dev **ctrl_devs;
static struct class *ctrldev_classp[MAX_RMNET_DEVS];
static dev_t ctrldev_num[MAX_RMNET_DEVS];
struct ctrl_pkt {
size_t data_size;
void *data;
void *ctxt;
};
struct ctrl_pkt_list_elem {
struct list_head list;
struct ctrl_pkt cpkt;
};
static void resp_avail_cb(struct urb *);
static int rmnet_usb_ctrl_dmux(struct ctrl_pkt_list_elem *clist)
{
struct mux_hdr *hdr;
size_t pad_len;
size_t total_len;
unsigned int mux_id;
hdr = (struct mux_hdr *)clist->cpkt.data;
pad_len = hdr->padding_info >> MUX_PAD_SHIFT;
if (pad_len > MAX_PAD_BYTES(4)) {
pr_err_ratelimited("%s: Invalid pad len %d\n", __func__,
pad_len);
return -EINVAL;
}
mux_id = hdr->mux_id;
if (!mux_id || mux_id > insts_per_dev) {
pr_err_ratelimited("%s: Invalid mux id %d\n", __func__, mux_id);
return -EINVAL;
}
total_len = le16_to_cpu(hdr->pkt_len_w_padding);
if (!total_len || !(total_len - pad_len)) {
pr_err_ratelimited("%s: Invalid pkt length %d\n", __func__,
total_len);
return -EINVAL;
}
clist->cpkt.data_size = total_len - pad_len;
return mux_id - 1;
}
static void rmnet_usb_ctrl_mux(unsigned int id, struct ctrl_pkt *cpkt)
{
struct mux_hdr *hdr;
size_t len;
size_t pad_len = 0;
hdr = (struct mux_hdr *)cpkt->data;
hdr->mux_id = id + 1;
len = cpkt->data_size - sizeof(struct mux_hdr) - MAX_PAD_BYTES(4);
/*add padding if len is not 4 byte aligned*/
pad_len = ALIGN(len, 4) - len;
hdr->pkt_len_w_padding = cpu_to_le16(len + pad_len);
hdr->padding_info = (pad_len << MUX_PAD_SHIFT) | MUX_CTRL_MASK;
cpkt->data_size = sizeof(struct mux_hdr) + hdr->pkt_len_w_padding;
}
static void get_encap_work(struct work_struct *w)
{
struct usb_device *udev;
struct rmnet_ctrl_dev *dev =
container_of(w, struct rmnet_ctrl_dev, get_encap_work);
int status;
if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status))
return;
udev = interface_to_usbdev(dev->intf);
status = usb_autopm_get_interface(dev->intf);
if (status < 0 && status != -EAGAIN && status != -EACCES) {
dev->get_encap_failure_cnt++;
return;
}
usb_fill_control_urb(dev->rcvurb, udev,
usb_rcvctrlpipe(udev, 0),
(unsigned char *)dev->in_ctlreq,
dev->rcvbuf,
DEFAULT_READ_URB_LENGTH,
resp_avail_cb, dev);
usb_anchor_urb(dev->rcvurb, &dev->rx_submitted);
status = usb_submit_urb(dev->rcvurb, GFP_KERNEL);
if (status) {
dev->get_encap_failure_cnt++;
usb_unanchor_urb(dev->rcvurb);
usb_autopm_put_interface(dev->intf);
if (status != -ENODEV)
dev_err(dev->devicep,
"%s: Error submitting Read URB %d\n",
__func__, status);
goto resubmit_int_urb;
}
return;
resubmit_int_urb:
/*check if it is already submitted in resume*/
if (!dev->inturb->anchor) {
usb_anchor_urb(dev->inturb, &dev->rx_submitted);
status = usb_submit_urb(dev->inturb, GFP_KERNEL);
if (status) {
usb_unanchor_urb(dev->inturb);
if (status != -ENODEV)
dev_err(dev->devicep,
"%s: Error re-submitting Int URB %d\n",
__func__, status);
}
}
}
static void notification_available_cb(struct urb *urb)
{
int status;
struct usb_cdc_notification *ctrl;
struct usb_device *udev;
struct rmnet_ctrl_dev *dev = urb->context;
udev = interface_to_usbdev(dev->intf);
switch (urb->status) {
case 0:
/*if non zero lenght of data received while unlink*/
case -ENOENT:
/*success*/
break;
/*do not resubmit*/
case -ESHUTDOWN:
case -ECONNRESET:
case -EPROTO:
return;
case -EPIPE:
pr_err_ratelimited("%s: Stall on int endpoint\n", __func__);
/* TBD : halt to be cleared in work */
return;
/*resubmit*/
case -EOVERFLOW:
pr_err_ratelimited("%s: Babble error happened\n", __func__);
default:
pr_debug_ratelimited("%s: Non zero urb status = %d\n",
__func__, urb->status);
goto resubmit_int_urb;
}
if (!urb->actual_length)
return;
ctrl = urb->transfer_buffer;
switch (ctrl->bNotificationType) {
case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
dev->resp_avail_cnt++;
/* If MUX is not enabled, wakeup up the open process
* upon first notify response available.
*/
if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
set_bit(RMNET_CTRL_DEV_READY, &dev->status);
wake_up(&dev->open_wait_queue);
}
usb_mark_last_busy(udev);
queue_work(dev->wq, &dev->get_encap_work);
return;
default:
dev_err(dev->devicep,
"%s:Command not implemented\n", __func__);
}
resubmit_int_urb:
usb_anchor_urb(urb, &dev->rx_submitted);
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
usb_unanchor_urb(urb);
if (status != -ENODEV)
dev_err(dev->devicep,
"%s: Error re-submitting Int URB %d\n",
__func__, status);
}
return;
}
static void resp_avail_cb(struct urb *urb)
{
struct usb_device *udev;
struct ctrl_pkt_list_elem *list_elem = NULL;
struct rmnet_ctrl_dev *rx_dev, *dev = urb->context;
void *cpkt;
int ch_id, status = 0;
size_t cpkt_size = 0;
udev = interface_to_usbdev(dev->intf);
usb_autopm_put_interface_async(dev->intf);
switch (urb->status) {
case 0:
/*success*/
break;
/*do not resubmit*/
case -ESHUTDOWN:
case -ENOENT:
case -ECONNRESET:
case -EPROTO:
return;
/*resubmit*/
case -EOVERFLOW:
pr_err_ratelimited("%s: Babble error happened\n", __func__);
default:
pr_debug_ratelimited("%s: Non zero urb status = %d\n",
__func__, urb->status);
goto resubmit_int_urb;
}
dev_dbg(dev->devicep, "Read %d bytes for %s\n",
urb->actual_length, dev->name);
cpkt = urb->transfer_buffer;
cpkt_size = urb->actual_length;
if (!cpkt_size) {
dev->zlp_cnt++;
dev_dbg(dev->devicep, "%s: zero length pkt received\n",
__func__);
goto resubmit_int_urb;
}
list_elem = kmalloc(sizeof(struct ctrl_pkt_list_elem), GFP_ATOMIC);
if (!list_elem) {
dev_err(dev->devicep, "%s: list_elem alloc failed\n", __func__);
return;
}
list_elem->cpkt.data = kmalloc(cpkt_size, GFP_ATOMIC);
if (!list_elem->cpkt.data) {
dev_err(dev->devicep, "%s: list_elem->data alloc failed\n",
__func__);
kfree(list_elem);
return;
}
memcpy(list_elem->cpkt.data, cpkt, cpkt_size);
list_elem->cpkt.data_size = cpkt_size;
rx_dev = dev;
if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status)) {
ch_id = rmnet_usb_ctrl_dmux(list_elem);
if (ch_id < 0) {
kfree(list_elem->cpkt.data);
kfree(list_elem);
goto resubmit_int_urb;
}
rx_dev = &ctrl_devs[dev->id][ch_id];
}
rx_dev->get_encap_resp_cnt++;
spin_lock(&rx_dev->rx_lock);
list_add_tail(&list_elem->list, &rx_dev->rx_list);
spin_unlock(&rx_dev->rx_lock);
wake_up(&rx_dev->read_wait_queue);
resubmit_int_urb:
/*check if it is already submitted in resume*/
if (!dev->inturb->anchor) {
usb_mark_last_busy(udev);
usb_anchor_urb(dev->inturb, &dev->rx_submitted);
status = usb_submit_urb(dev->inturb, GFP_ATOMIC);
if (status) {
usb_unanchor_urb(dev->inturb);
if (status != -ENODEV)
dev_err(dev->devicep,
"%s: Error re-submitting Int URB %d\n",
__func__, status);
}
}
}
int rmnet_usb_ctrl_start_rx(struct rmnet_ctrl_dev *dev)
{
int retval = 0;
usb_anchor_urb(dev->inturb, &dev->rx_submitted);
retval = usb_submit_urb(dev->inturb, GFP_KERNEL);
if (retval < 0) {
usb_unanchor_urb(dev->inturb);
if (retval != -ENODEV)
dev_err(dev->devicep,
"%s Intr submit %d\n", __func__, retval);
}
return retval;
}
static int rmnet_usb_ctrl_alloc_rx(struct rmnet_ctrl_dev *dev)
{
dev->rcvurb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->rcvurb) {
pr_err("%s: Error allocating read urb\n", __func__);
goto nomem;
}
dev->rcvbuf = kmalloc(DEFAULT_READ_URB_LENGTH, GFP_KERNEL);
if (!dev->rcvbuf) {
pr_err("%s: Error allocating read buffer\n", __func__);
goto nomem;
}
dev->in_ctlreq = kmalloc(sizeof(*dev->in_ctlreq), GFP_KERNEL);
if (!dev->in_ctlreq) {
pr_err("%s: Error allocating setup packet buffer\n", __func__);
goto nomem;
}
return 0;
nomem:
usb_free_urb(dev->rcvurb);
kfree(dev->rcvbuf);
kfree(dev->in_ctlreq);
return -ENOMEM;
}
static int rmnet_usb_ctrl_write_cmd(struct rmnet_ctrl_dev *dev)
{
struct usb_device *udev;
if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status))
return -ENODEV;
udev = interface_to_usbdev(dev->intf);
dev->set_ctrl_line_state_cnt++;
return usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_CDC_REQ_SET_CONTROL_LINE_STATE,
(USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE),
dev->cbits_tomdm,
dev->intf->cur_altsetting->desc.bInterfaceNumber,
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
static void ctrl_write_callback(struct urb *urb)
{
struct ctrl_pkt *cpkt = urb->context;
struct rmnet_ctrl_dev *dev = cpkt->ctxt;
if (urb->status) {
dev->tx_ctrl_err_cnt++;
pr_debug_ratelimited("Write status/size %d/%d\n",
urb->status, urb->actual_length);
}
kfree(urb->setup_packet);
kfree(urb->transfer_buffer);
usb_free_urb(urb);
kfree(cpkt);
usb_autopm_put_interface_async(dev->intf);
}
static int rmnet_usb_ctrl_write(struct rmnet_ctrl_dev *dev,
struct ctrl_pkt *cpkt, size_t size)
{
int result;
struct urb *sndurb;
struct usb_ctrlrequest *out_ctlreq;
struct usb_device *udev;
if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status))
return -ENETRESET;
udev = interface_to_usbdev(dev->intf);
sndurb = usb_alloc_urb(0, GFP_KERNEL);
if (!sndurb) {
dev_err(dev->devicep, "Error allocating read urb\n");
return -ENOMEM;
}
out_ctlreq = kmalloc(sizeof(*out_ctlreq), GFP_KERNEL);
if (!out_ctlreq) {
usb_free_urb(sndurb);
dev_err(dev->devicep, "Error allocating setup packet buffer\n");
return -ENOMEM;
}
/* CDC Send Encapsulated Request packet */
out_ctlreq->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
USB_RECIP_INTERFACE);
out_ctlreq->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
out_ctlreq->wValue = 0;
out_ctlreq->wIndex = dev->intf->cur_altsetting->desc.bInterfaceNumber;
out_ctlreq->wLength = cpu_to_le16(cpkt->data_size);
usb_fill_control_urb(sndurb, udev,
usb_sndctrlpipe(udev, 0),
(unsigned char *)out_ctlreq, (void *)cpkt->data,
cpkt->data_size, ctrl_write_callback, cpkt);
result = usb_autopm_get_interface(dev->intf);
if (result < 0) {
dev_dbg(dev->devicep, "%s: Unable to resume interface: %d\n",
__func__, result);
/*
* Revisit: if (result == -EPERM)
* rmnet_usb_suspend(dev->intf, PMSG_SUSPEND);
*/
usb_free_urb(sndurb);
kfree(out_ctlreq);
return result;
}
usb_anchor_urb(sndurb, &dev->tx_submitted);
dev->snd_encap_cmd_cnt++;
result = usb_submit_urb(sndurb, GFP_KERNEL);
if (result < 0) {
if (result != -ENODEV)
dev_err(dev->devicep,
"%s: Submit URB error %d\n",
__func__, result);
dev->snd_encap_cmd_cnt--;
usb_autopm_put_interface(dev->intf);
usb_unanchor_urb(sndurb);
usb_free_urb(sndurb);
kfree(out_ctlreq);
return result;
}
return size;
}
static int rmnet_ctl_open(struct inode *inode, struct file *file)
{
int retval = 0;
struct rmnet_ctrl_dev *dev =
container_of(inode->i_cdev, struct rmnet_ctrl_dev, cdev);
if (!dev)
return -ENODEV;
if (test_bit(RMNET_CTRL_DEV_OPEN, &dev->status))
goto already_opened;
if (dev->mdm_wait_timeout &&
!test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
retval = wait_event_interruptible_timeout(
dev->open_wait_queue,
test_bit(RMNET_CTRL_DEV_READY, &dev->status),
msecs_to_jiffies(dev->mdm_wait_timeout * 1000));
if (retval == 0) {
dev_err(dev->devicep, "%s: Timeout opening %s\n",
__func__, dev->name);
return -ETIMEDOUT;
} else if (retval < 0) {
dev_err(dev->devicep, "%s: Error waiting for %s\n",
__func__, dev->name);
return retval;
}
}
if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
dev_dbg(dev->devicep, "%s: Connection timedout opening %s\n",
__func__, dev->name);
return -ETIMEDOUT;
}
set_bit(RMNET_CTRL_DEV_OPEN, &dev->status);
file->private_data = dev;
already_opened:
DBG("%s: Open called for %s\n", __func__, dev->name);
return 0;
}
static int rmnet_ctl_release(struct inode *inode, struct file *file)
{
struct ctrl_pkt_list_elem *list_elem = NULL;
struct rmnet_ctrl_dev *dev;
unsigned long flag;
int time;
dev = file->private_data;
if (!dev)
return -ENODEV;
DBG("%s Called on %s device\n", __func__, dev->name);
spin_lock_irqsave(&dev->rx_lock, flag);
while (!list_empty(&dev->rx_list)) {
list_elem = list_first_entry(
&dev->rx_list,
struct ctrl_pkt_list_elem,
list);
list_del(&list_elem->list);
kfree(list_elem->cpkt.data);
kfree(list_elem);
}
spin_unlock_irqrestore(&dev->rx_lock, flag);
clear_bit(RMNET_CTRL_DEV_OPEN, &dev->status);
time = usb_wait_anchor_empty_timeout(&dev->tx_submitted,
UNLINK_TIMEOUT_MS);
if (!time)
usb_kill_anchored_urbs(&dev->tx_submitted);
file->private_data = NULL;
return 0;
}
static unsigned int rmnet_ctl_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
struct rmnet_ctrl_dev *dev;
dev = file->private_data;
if (!dev)
return POLLERR;
poll_wait(file, &dev->read_wait_queue, wait);
if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
dev_dbg(dev->devicep, "%s: Device not connected\n",
__func__);
return POLLERR;
}
if (!list_empty(&dev->rx_list))
mask |= POLLIN | POLLRDNORM;
return mask;
}
static ssize_t rmnet_ctl_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
int retval = 0;
int bytes_to_read;
unsigned int hdr_len = 0;
struct rmnet_ctrl_dev *dev;
struct ctrl_pkt_list_elem *list_elem = NULL;
unsigned long flags;
dev = file->private_data;
if (!dev)
return -ENODEV;
DBG("%s: Read from %s\n", __func__, dev->name);
ctrl_read:
if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status)) {
dev_dbg(dev->devicep, "%s: Device not connected\n",
__func__);
return -ENETRESET;
}
spin_lock_irqsave(&dev->rx_lock, flags);
if (list_empty(&dev->rx_list)) {
spin_unlock_irqrestore(&dev->rx_lock, flags);
retval = wait_event_interruptible(dev->read_wait_queue,
!list_empty(&dev->rx_list) ||
!test_bit(RMNET_CTRL_DEV_READY, &dev->status));
if (retval < 0)
return retval;
goto ctrl_read;
}
list_elem = list_first_entry(&dev->rx_list,
struct ctrl_pkt_list_elem, list);
bytes_to_read = (uint32_t)(list_elem->cpkt.data_size);
if (bytes_to_read > count) {
spin_unlock_irqrestore(&dev->rx_lock, flags);
dev_err(dev->devicep, "%s: Packet size %d > buf size %d\n",
__func__, bytes_to_read, count);
return -ENOMEM;
}
spin_unlock_irqrestore(&dev->rx_lock, flags);
if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status))
hdr_len = sizeof(struct mux_hdr);
if (copy_to_user(buf, list_elem->cpkt.data + hdr_len, bytes_to_read)) {
dev_err(dev->devicep,
"%s: copy_to_user failed for %s\n",
__func__, dev->name);
return -EFAULT;
}
spin_lock_irqsave(&dev->rx_lock, flags);
list_del(&list_elem->list);
spin_unlock_irqrestore(&dev->rx_lock, flags);
kfree(list_elem->cpkt.data);
kfree(list_elem);
DBG("%s: Returning %d bytes to %s\n", __func__, bytes_to_read,
dev->name);
DUMP_BUFFER("Read: ", bytes_to_read, buf);
return bytes_to_read;
}
static ssize_t rmnet_ctl_write(struct file *file, const char __user * buf,
size_t size, loff_t *pos)
{
int status;
size_t total_len;
void *wbuf;
void *actual_data;
struct ctrl_pkt *cpkt;
struct rmnet_ctrl_dev *dev = file->private_data;
if (!dev)
return -ENODEV;
if (size <= 0)
return -EINVAL;
if (!test_bit(RMNET_CTRL_DEV_READY, &dev->status))
return -ENETRESET;
DBG("%s: Writing %i bytes on %s\n", __func__, size, dev->name);
total_len = size;
if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status))
total_len += sizeof(struct mux_hdr) + MAX_PAD_BYTES(4);
wbuf = kmalloc(total_len , GFP_KERNEL);
if (!wbuf)
return -ENOMEM;
cpkt = kmalloc(sizeof(struct ctrl_pkt), GFP_KERNEL);
if (!cpkt) {
kfree(wbuf);
return -ENOMEM;
}
actual_data = cpkt->data = wbuf;
cpkt->data_size = total_len;
cpkt->ctxt = dev;
if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status)) {
actual_data = wbuf + sizeof(struct mux_hdr);
rmnet_usb_ctrl_mux(dev->ch_id, cpkt);
}
status = copy_from_user(actual_data, buf, size);
if (status) {
dev_err(dev->devicep,
"%s: Unable to copy data from userspace %d\n",
__func__, status);
kfree(wbuf);
kfree(cpkt);
return status;
}
DUMP_BUFFER("Write: ", size, buf);
status = rmnet_usb_ctrl_write(dev, cpkt, size);
if (status == size)
return size;
return status;
}
static int rmnet_ctrl_tiocmset(struct rmnet_ctrl_dev *dev, unsigned int set,
unsigned int clear)
{
int retval;
mutex_lock(&dev->dev_lock);
if (set & TIOCM_DTR)
dev->cbits_tomdm |= ACM_CTRL_DTR;
/*
* TBD if (set & TIOCM_RTS)
* dev->cbits_tomdm |= ACM_CTRL_RTS;
*/
if (clear & TIOCM_DTR)
dev->cbits_tomdm &= ~ACM_CTRL_DTR;
/*
* (clear & TIOCM_RTS)
* dev->cbits_tomdm &= ~ACM_CTRL_RTS;
*/
mutex_unlock(&dev->dev_lock);
retval = usb_autopm_get_interface(dev->intf);
if (retval < 0) {
dev_dbg(dev->devicep, "%s: Unable to resume interface: %d\n",
__func__, retval);
return retval;
}
retval = rmnet_usb_ctrl_write_cmd(dev);
usb_autopm_put_interface(dev->intf);
return retval;
}
static int rmnet_ctrl_tiocmget(struct rmnet_ctrl_dev *dev)
{
int ret;
mutex_lock(&dev->dev_lock);
ret =
/*
* TBD(dev->cbits_tolocal & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
* (dev->cbits_tolocal & ACM_CTRL_CTS ? TIOCM_CTS : 0) |
*/
(dev->cbits_tolocal & ACM_CTRL_CD ? TIOCM_CD : 0) |
/*
* TBD (dev->cbits_tolocal & ACM_CTRL_RI ? TIOCM_RI : 0) |
*(dev->cbits_tomdm & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
*/
(dev->cbits_tomdm & ACM_CTRL_DTR ? TIOCM_DTR : 0);
mutex_unlock(&dev->dev_lock);
return ret;
}
static long rmnet_ctrl_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
struct rmnet_ctrl_dev *dev;
dev = file->private_data;
if (!dev)
return -ENODEV;
switch (cmd) {
case TIOCMGET:
ret = rmnet_ctrl_tiocmget(dev);
break;
case TIOCMSET:
ret = rmnet_ctrl_tiocmset(dev, arg, ~arg);
break;
default:
ret = -EINVAL;
}
return ret;
}
static const struct file_operations ctrldev_fops = {
.owner = THIS_MODULE,
.read = rmnet_ctl_read,
.write = rmnet_ctl_write,
.unlocked_ioctl = rmnet_ctrl_ioctl,
.open = rmnet_ctl_open,
.release = rmnet_ctl_release,
.poll = rmnet_ctl_poll,
};
int rmnet_usb_ctrl_probe(struct usb_interface *intf,
struct usb_host_endpoint *int_in,
unsigned long rmnet_devnum,
unsigned long *data)
{
struct rmnet_ctrl_dev *dev = NULL;
u16 wMaxPacketSize;
struct usb_endpoint_descriptor *ep;
struct usb_device *udev = interface_to_usbdev(intf);
int interval;
int ret = 0, n;
/* Find next available ctrl_dev */
for (n = 0; n < insts_per_dev; n++) {
dev = &ctrl_devs[rmnet_devnum][n];
if (!dev->claimed)
break;
}
if (!dev || n == insts_per_dev) {
pr_err("%s: No available ctrl devices for %lu\n", __func__,
rmnet_devnum);
return -ENODEV;
}
dev->int_pipe = usb_rcvintpipe(udev,
int_in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
dev->intf = intf;
dev->id = rmnet_devnum;
dev->snd_encap_cmd_cnt = 0;
dev->get_encap_resp_cnt = 0;
dev->resp_avail_cnt = 0;
dev->tx_ctrl_err_cnt = 0;
dev->set_ctrl_line_state_cnt = 0;
dev->inturb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->inturb) {
dev_err(dev->devicep, "Error allocating int urb\n");
return -ENOMEM;
}
/*use max pkt size from ep desc*/
ep = &dev->intf->cur_altsetting->endpoint[0].desc;
wMaxPacketSize = le16_to_cpu(ep->wMaxPacketSize);
dev->intbuf = kmalloc(wMaxPacketSize, GFP_KERNEL);
if (!dev->intbuf) {
usb_free_urb(dev->inturb);
dev_err(dev->devicep, "Error allocating int buffer\n");
return -ENOMEM;
}
dev->in_ctlreq->bRequestType =
(USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
dev->in_ctlreq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
dev->in_ctlreq->wValue = 0;
dev->in_ctlreq->wIndex =
dev->intf->cur_altsetting->desc.bInterfaceNumber;
dev->in_ctlreq->wLength = cpu_to_le16(DEFAULT_READ_URB_LENGTH);
interval = max((int)int_in->desc.bInterval,
(udev->speed == USB_SPEED_HIGH) ? HS_INTERVAL
: FS_LS_INTERVAL);
usb_fill_int_urb(dev->inturb, udev,
dev->int_pipe,
dev->intbuf, wMaxPacketSize,
notification_available_cb, dev, interval);
usb_mark_last_busy(udev);
ret = rmnet_usb_ctrl_start_rx(dev);
if (ret) {
usb_free_urb(dev->inturb);
kfree(dev->intbuf);
return ret;
}
dev->claimed = true;
/*mux info is passed to data parameter*/
if (*data)
set_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status);
*data = (unsigned long)dev;
/* If MUX is enabled, wakeup the open process here */
if (test_bit(RMNET_CTRL_DEV_MUX_EN, &dev->status)) {
set_bit(RMNET_CTRL_DEV_READY, &dev->status);
wake_up(&dev->open_wait_queue);
}
return 0;
}
void rmnet_usb_ctrl_disconnect(struct rmnet_ctrl_dev *dev)
{
dev->claimed = false;
clear_bit(RMNET_CTRL_DEV_READY, &dev->status);
mutex_lock(&dev->dev_lock);
/*TBD: for now just update CD status*/
dev->cbits_tolocal = ~ACM_CTRL_CD;
dev->cbits_tomdm = ~ACM_CTRL_DTR;
mutex_unlock(&dev->dev_lock);
wake_up(&dev->read_wait_queue);
cancel_work_sync(&dev->get_encap_work);
usb_kill_anchored_urbs(&dev->tx_submitted);
usb_kill_anchored_urbs(&dev->rx_submitted);
usb_free_urb(dev->inturb);
dev->inturb = NULL;
kfree(dev->intbuf);
dev->intbuf = NULL;
}
#if defined(CONFIG_DEBUG_FS)
#define DEBUG_BUF_SIZE 4096
static ssize_t rmnet_usb_ctrl_read_stats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct rmnet_ctrl_dev *dev;
char *buf;
int ret;
int i, n;
int temp = 0;
buf = kzalloc(sizeof(char) * DEBUG_BUF_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (i = 0; i < num_devs; i++) {
for (n = 0; n < insts_per_dev; n++) {
dev = &ctrl_devs[i][n];
temp += scnprintf(buf + temp, DEBUG_BUF_SIZE - temp,
"\n#ctrl_dev: %p Name: %s#\n"
"snd encap cmd cnt %u\n"
"resp avail cnt: %u\n"
"get encap resp cnt: %u\n"
"set ctrl line state cnt: %u\n"
"tx_err_cnt: %u\n"
"cbits_tolocal: %d\n"
"cbits_tomdm: %d\n"
"mdm_wait_timeout: %u\n"
"zlp_cnt: %u\n"
"get_encap_failure_cnt %u\n"
"RMNET_CTRL_DEV_MUX_EN: %d\n"
"RMNET_CTRL_DEV_OPEN: %d\n"
"RMNET_CTRL_DEV_READY: %d\n",
dev, dev->name,
dev->snd_encap_cmd_cnt,
dev->resp_avail_cnt,
dev->get_encap_resp_cnt,
dev->set_ctrl_line_state_cnt,
dev->tx_ctrl_err_cnt,
dev->cbits_tolocal,
dev->cbits_tomdm,
dev->mdm_wait_timeout,
dev->zlp_cnt,
dev->get_encap_failure_cnt,
test_bit(RMNET_CTRL_DEV_MUX_EN,
&dev->status),
test_bit(RMNET_CTRL_DEV_OPEN,
&dev->status),
test_bit(RMNET_CTRL_DEV_READY,
&dev->status));
}
}
ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
kfree(buf);
return ret;
}
static ssize_t rmnet_usb_ctrl_reset_stats(struct file *file, const char __user *
buf, size_t count, loff_t *ppos)
{
struct rmnet_ctrl_dev *dev;
int i, n;
for (i = 0; i < num_devs; i++) {
for (n = 0; n < insts_per_dev; n++) {
dev = &ctrl_devs[i][n];
dev->snd_encap_cmd_cnt = 0;
dev->resp_avail_cnt = 0;
dev->get_encap_resp_cnt = 0;
dev->set_ctrl_line_state_cnt = 0;
dev->tx_ctrl_err_cnt = 0;
dev->zlp_cnt = 0;
}
}
return count;
}
const struct file_operations rmnet_usb_ctrl_stats_ops = {
.read = rmnet_usb_ctrl_read_stats,
.write = rmnet_usb_ctrl_reset_stats,
};
struct dentry *usb_ctrl_dent;
struct dentry *usb_ctrl_dfile;
static void rmnet_usb_ctrl_debugfs_init(void)
{
usb_ctrl_dent = debugfs_create_dir("rmnet_usb_ctrl", 0);
if (IS_ERR(usb_ctrl_dent))
return;
usb_ctrl_dfile = debugfs_create_file("status", 0644, usb_ctrl_dent, 0,
&rmnet_usb_ctrl_stats_ops);
if (!usb_ctrl_dfile || IS_ERR(usb_ctrl_dfile))
debugfs_remove(usb_ctrl_dent);
}
static void rmnet_usb_ctrl_debugfs_exit(void)
{
debugfs_remove(usb_ctrl_dfile);
debugfs_remove(usb_ctrl_dent);
}
#else
static void rmnet_usb_ctrl_debugfs_init(void) { }
static void rmnet_usb_ctrl_debugfs_exit(void) { }
#endif
int rmnet_usb_ctrl_init(int no_rmnet_devs, int no_rmnet_insts_per_dev)
{
struct rmnet_ctrl_dev *dev;
int i, n;
int status;
num_devs = no_rmnet_devs;
insts_per_dev = no_rmnet_insts_per_dev;
ctrl_devs = kzalloc(num_devs * sizeof(*ctrl_devs), GFP_KERNEL);
if (!ctrl_devs)
return -ENOMEM;
for (i = 0; i < num_devs; i++) {
ctrl_devs[i] = kzalloc(insts_per_dev * sizeof(*ctrl_devs[i]),
GFP_KERNEL);
if (!ctrl_devs[i])
return -ENOMEM;
status = alloc_chrdev_region(&ctrldev_num[i], 0, insts_per_dev,
rmnet_dev_names[i]);
if (IS_ERR_VALUE(status)) {
pr_err("ERROR:%s: alloc_chrdev_region() ret %i.\n",
__func__, status);
return status;
}
ctrldev_classp[i] = class_create(THIS_MODULE,
rmnet_dev_names[i]);
if (IS_ERR(ctrldev_classp[i])) {
pr_err("ERROR:%s: class_create() ENOMEM\n", __func__);
status = PTR_ERR(ctrldev_classp[i]);
return status;
}
for (n = 0; n < insts_per_dev; n++) {
dev = &ctrl_devs[i][n];
/*for debug purpose*/
snprintf(dev->name, CTRL_DEV_MAX_LEN, "%s%d",
rmnet_dev_names[i], n);
dev->wq = create_singlethread_workqueue(dev->name);
if (!dev->wq) {
pr_err("unable to allocate workqueue");
kfree(dev);
return -ENOMEM;
}
dev->ch_id = n;
mutex_init(&dev->dev_lock);
spin_lock_init(&dev->rx_lock);
init_waitqueue_head(&dev->read_wait_queue);
init_waitqueue_head(&dev->open_wait_queue);
INIT_LIST_HEAD(&dev->rx_list);
init_usb_anchor(&dev->tx_submitted);
init_usb_anchor(&dev->rx_submitted);
INIT_WORK(&dev->get_encap_work, get_encap_work);
cdev_init(&dev->cdev, &ctrldev_fops);
dev->cdev.owner = THIS_MODULE;
status = cdev_add(&dev->cdev, (ctrldev_num[i] + n), 1);
if (status) {
pr_err("%s: cdev_add() ret %i\n", __func__,
status);
destroy_workqueue(dev->wq);
kfree(dev);
return status;
}
dev->devicep = device_create(ctrldev_classp[i], NULL,
(ctrldev_num[i] + n), NULL,
"%s%d", rmnet_dev_names[i],
n);
if (IS_ERR(dev->devicep)) {
pr_err("%s: device_create() returned %ld\n",
__func__, PTR_ERR(dev->devicep));
cdev_del(&dev->cdev);
destroy_workqueue(dev->wq);
kfree(dev);
return PTR_ERR(dev->devicep);
}
/*create /sys/class/hsicctl/hsicctlx/modem_wait*/
status = device_create_file(dev->devicep,
&dev_attr_modem_wait);
if (status) {
device_destroy(dev->devicep->class,
dev->devicep->devt);
cdev_del(&dev->cdev);
destroy_workqueue(dev->wq);
kfree(dev);
return status;
}
dev_set_drvdata(dev->devicep, dev);
status = rmnet_usb_ctrl_alloc_rx(dev);
if (status) {
device_remove_file(dev->devicep,
&dev_attr_modem_wait);
device_destroy(dev->devicep->class,
dev->devicep->devt);
cdev_del(&dev->cdev);
destroy_workqueue(dev->wq);
kfree(dev);
return status;
}
}
}
rmnet_usb_ctrl_debugfs_init();
pr_info("rmnet usb ctrl Initialized.\n");
return 0;
}
static void free_rmnet_ctrl_dev(struct rmnet_ctrl_dev *dev)
{
kfree(dev->in_ctlreq);
kfree(dev->rcvbuf);
kfree(dev->intbuf);
usb_free_urb(dev->rcvurb);
usb_free_urb(dev->inturb);
device_remove_file(dev->devicep, &dev_attr_modem_wait);
cdev_del(&dev->cdev);
destroy_workqueue(dev->wq);
device_destroy(dev->devicep->class,
dev->devicep->devt);
}
void rmnet_usb_ctrl_exit(int no_rmnet_devs, int no_rmnet_insts_per_dev)
{
int i, n;
for (i = 0; i < no_rmnet_devs; i++) {
for (n = 0; n < no_rmnet_insts_per_dev; n++)
free_rmnet_ctrl_dev(&ctrl_devs[i][n]);
kfree(ctrl_devs[i]);
class_destroy(ctrldev_classp[i]);
if (ctrldev_num[i])
unregister_chrdev_region(ctrldev_num[i], insts_per_dev);
}
kfree(ctrl_devs);
rmnet_usb_ctrl_debugfs_exit();
}
| gpl-2.0 |
omega-roms/G900F_Omega_Kernel_LL_5.0 | kernel/power/qos.c | 1991 | 15087 | /*
* This module exposes the interface to kernel space for specifying
* QoS dependencies. It provides infrastructure for registration of:
*
* Dependents on a QoS value : register requests
* Watchers of QoS value : get notified when target QoS value changes
*
* This QoS design is best effort based. Dependents register their QoS needs.
* Watchers register to keep track of the current QoS needs of the system.
*
* There are 3 basic classes of QoS parameter: latency, timeout, throughput
* each have defined units:
* latency: usec
* timeout: usec <-- currently not used.
* throughput: kbs (kilo byte / sec)
*
* There are lists of pm_qos_objects each one wrapping requests, notifiers
*
* User mode requests on a QOS parameter register themselves to the
* subsystem by opening the device node /dev/... and writing there request to
* the node. As long as the process holds a file handle open to the node the
* client continues to be accounted for. Upon file release the usermode
* request is removed and a new qos target is computed. This way when the
* request that the application has is cleaned up when closes the file
* pointer or exits the pm_qos_object will get an opportunity to clean up.
*
* Mark Gross <mgross@linux.intel.com>
*/
/*#define DEBUG*/
#include <linux/pm_qos.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/export.h>
/*
* locking rule: all changes to constraints or notifiers lists
* or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
* held, taken with _irqsave. One lock to rule them all
*/
struct pm_qos_object {
struct pm_qos_constraints *constraints;
struct miscdevice pm_qos_power_miscdev;
char *name;
};
static DEFINE_SPINLOCK(pm_qos_lock);
static struct pm_qos_object null_pm_qos;
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_constraints cpu_dma_constraints = {
.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &cpu_dma_lat_notifier,
};
static struct pm_qos_object cpu_dma_pm_qos = {
.constraints = &cpu_dma_constraints,
.name = "cpu_dma_latency",
};
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_constraints network_lat_constraints = {
.list = PLIST_HEAD_INIT(network_lat_constraints.list),
.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
.type = PM_QOS_MIN,
.notifiers = &network_lat_notifier,
};
static struct pm_qos_object network_lat_pm_qos = {
.constraints = &network_lat_constraints,
.name = "network_latency",
};
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_constraints network_tput_constraints = {
.list = PLIST_HEAD_INIT(network_tput_constraints.list),
.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
.type = PM_QOS_MAX,
.notifiers = &network_throughput_notifier,
};
static struct pm_qos_object network_throughput_pm_qos = {
.constraints = &network_tput_constraints,
.name = "network_throughput",
};
static struct pm_qos_object *pm_qos_array[] = {
&null_pm_qos,
&cpu_dma_pm_qos,
&network_lat_pm_qos,
&network_throughput_pm_qos
};
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos);
static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos);
static int pm_qos_power_open(struct inode *inode, struct file *filp);
static int pm_qos_power_release(struct inode *inode, struct file *filp);
static const struct file_operations pm_qos_power_fops = {
.write = pm_qos_power_write,
.read = pm_qos_power_read,
.open = pm_qos_power_open,
.release = pm_qos_power_release,
.llseek = noop_llseek,
};
/* unlocked internal variant */
static inline int pm_qos_get_value(struct pm_qos_constraints *c)
{
if (plist_head_empty(&c->list))
return c->default_value;
switch (c->type) {
case PM_QOS_MIN:
return plist_first(&c->list)->prio;
case PM_QOS_MAX:
return plist_last(&c->list)->prio;
default:
/* runtime check for not using enum */
BUG();
}
}
s32 pm_qos_read_value(struct pm_qos_constraints *c)
{
return c->target_value;
}
static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
{
c->target_value = value;
}
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
* if needed
* @c: constraints data struct
* @node: request to add to the list, to update or to remove
* @action: action to take on the constraints list
* @value: value of the request to add or update
*
* This function returns 1 if the aggregated constraint value has changed, 0
* otherwise.
*/
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
enum pm_qos_req_action action, int value)
{
unsigned long flags;
int prev_value, curr_value, new_value;
spin_lock_irqsave(&pm_qos_lock, flags);
prev_value = pm_qos_get_value(c);
if (value == PM_QOS_DEFAULT_VALUE)
new_value = c->default_value;
else
new_value = value;
switch (action) {
case PM_QOS_REMOVE_REQ:
plist_del(node, &c->list);
break;
case PM_QOS_UPDATE_REQ:
/*
* to change the list, we atomically remove, reinit
* with new value and add, then see if the extremal
* changed
*/
plist_del(node, &c->list);
case PM_QOS_ADD_REQ:
plist_node_init(node, new_value);
plist_add(node, &c->list);
break;
default:
/* no action */
;
}
curr_value = pm_qos_get_value(c);
pm_qos_set_value(c, curr_value);
spin_unlock_irqrestore(&pm_qos_lock, flags);
if (prev_value != curr_value) {
blocking_notifier_call_chain(c->notifiers,
(unsigned long)curr_value,
NULL);
return 1;
} else {
return 0;
}
}
/**
* pm_qos_request - returns current system wide qos expectation
* @pm_qos_class: identification of which qos value is requested
*
* This function returns the current target value.
*/
int pm_qos_request(int pm_qos_class)
{
return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
}
EXPORT_SYMBOL_GPL(pm_qos_request);
int pm_qos_request_active(struct pm_qos_request *req)
{
return req->pm_qos_class != 0;
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
/**
* pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
* @work: work struct for the delayed work (timeout)
*
* This cancels the timeout request by falling back to the default at timeout.
*/
static void pm_qos_work_fn(struct work_struct *work)
{
s32 new_value = PM_QOS_DEFAULT_VALUE;
struct pm_qos_request *req = container_of(to_delayed_work(work),
struct pm_qos_request,
work);
if (!req || !pm_qos_request_active(req))
return;
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
}
/**
* pm_qos_add_request - inserts new qos request into the list
* @req: pointer to a preallocated handle
* @pm_qos_class: identifies which list of qos request to use
* @value: defines the qos request
*
* This function inserts a new entry in the pm_qos_class list of requested qos
* performance characteristics. It recomputes the aggregate QoS expectations
* for the pm_qos_class of parameters and initializes the pm_qos_request
* handle. Caller needs to save this handle for later use in updates and
* removal.
*/
void pm_qos_add_request(struct pm_qos_request *req,
int pm_qos_class, s32 value)
{
if (!req) /*guard against callers passing in null */
return;
if (pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
return;
}
req->pm_qos_class = pm_qos_class;
INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
&req->node, PM_QOS_ADD_REQ, value);
}
EXPORT_SYMBOL_GPL(pm_qos_add_request);
/**
* pm_qos_update_request - modifies an existing qos request
* @req : handle to list element holding a pm_qos request to use
* @value: defines the qos request
*
* Updates an existing qos request for the pm_qos_class of parameters along
* with updating the target pm_qos_class value.
*
* Attempts are made to make this code callable on hot code paths.
*/
void pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
if (!req) /*guard against callers passing in null */
return;
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
return;
}
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
/**
* pm_qos_update_request_timeout - modifies an existing qos request temporarily.
* @req : handle to list element holding a pm_qos request to use
* @new_value: defines the temporal qos request
* @timeout_us: the effective duration of this qos request in usecs.
*
* After timeout_us, this qos request is cancelled automatically.
*/
void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
unsigned long timeout_us)
{
if (!req)
return;
if (WARN(!pm_qos_request_active(req),
"%s called for unknown object.", __func__))
return;
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
}
/**
* pm_qos_remove_request - modifies an existing qos request
* @req: handle to request list element
*
* Will remove pm qos request from the list of constraints and
* recompute the current target value for the pm_qos_class. Call this
* on slow code paths.
*/
void pm_qos_remove_request(struct pm_qos_request *req)
{
if (!req) /*guard against callers passing in null */
return;
/* silent return to keep pcm code cleaner */
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
return;
}
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
EXPORT_SYMBOL_GPL(pm_qos_remove_request);
/**
* pm_qos_add_notifier - sets notification entry for changes to target value
* @pm_qos_class: identifies which qos target changes should be notified.
* @notifier: notifier block managed by caller.
*
* will register the notifier into a notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
retval = blocking_notifier_chain_register(
pm_qos_array[pm_qos_class]->constraints->notifiers,
notifier);
return retval;
}
EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
/**
* pm_qos_remove_notifier - deletes notification entry from chain.
* @pm_qos_class: identifies which qos target changes are notified.
* @notifier: notifier block to be removed.
*
* will remove the notifier from the notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
retval = blocking_notifier_chain_unregister(
pm_qos_array[pm_qos_class]->constraints->notifiers,
notifier);
return retval;
}
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
/* User space interface to PM QoS classes via misc devices */
static int register_pm_qos_misc(struct pm_qos_object *qos)
{
qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
qos->pm_qos_power_miscdev.name = qos->name;
qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
return misc_register(&qos->pm_qos_power_miscdev);
}
static int find_pm_qos_object_by_minor(int minor)
{
int pm_qos_class;
for (pm_qos_class = 0;
pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
if (minor ==
pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
return pm_qos_class;
}
return -1;
}
static int pm_qos_power_open(struct inode *inode, struct file *filp)
{
long pm_qos_class;
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
if (pm_qos_class >= 0) {
struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
filp->private_data = req;
return 0;
}
return -EPERM;
}
static int pm_qos_power_release(struct inode *inode, struct file *filp)
{
struct pm_qos_request *req;
req = filp->private_data;
pm_qos_remove_request(req);
kfree(req);
return 0;
}
static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
unsigned long flags;
struct pm_qos_request *req = filp->private_data;
if (!req)
return -EINVAL;
if (!pm_qos_request_active(req))
return -EINVAL;
spin_lock_irqsave(&pm_qos_lock, flags);
value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
spin_unlock_irqrestore(&pm_qos_lock, flags);
return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
}
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
struct pm_qos_request *req;
if (count == sizeof(s32)) {
if (copy_from_user(&value, buf, sizeof(s32)))
return -EFAULT;
} else if (count <= 11) { /* ASCII perhaps? */
char ascii_value[11];
unsigned long int ulval;
int ret;
if (copy_from_user(ascii_value, buf, count))
return -EFAULT;
if (count > 10) {
if (ascii_value[10] == '\n')
ascii_value[10] = '\0';
else
return -EINVAL;
} else {
ascii_value[count] = '\0';
}
ret = strict_strtoul(ascii_value, 16, &ulval);
if (ret) {
pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
return -EINVAL;
}
value = (s32)lower_32_bits(ulval);
} else {
return -EINVAL;
}
req = filp->private_data;
pm_qos_update_request(req, value);
return count;
}
static int __init pm_qos_power_init(void)
{
int ret = 0;
int i;
BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
for (i = 1; i < PM_QOS_NUM_CLASSES; i++) {
ret = register_pm_qos_misc(pm_qos_array[i]);
if (ret < 0) {
printk(KERN_ERR "pm_qos_param: %s setup failed\n",
pm_qos_array[i]->name);
return ret;
}
}
return ret;
}
late_initcall(pm_qos_power_init);
| gpl-2.0 |
myjang0507/Polaris-a8elte | drivers/input/misc/pwm-beeper.c | 2247 | 4569 | /*
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
* PWM beeper driver
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/input.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
struct pwm_beeper {
struct input_dev *input;
struct pwm_device *pwm;
unsigned long period;
};
#define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
static int pwm_beeper_event(struct input_dev *input,
unsigned int type, unsigned int code, int value)
{
int ret = 0;
struct pwm_beeper *beeper = input_get_drvdata(input);
unsigned long period;
if (type != EV_SND || value < 0)
return -EINVAL;
switch (code) {
case SND_BELL:
value = value ? 1000 : 0;
break;
case SND_TONE:
break;
default:
return -EINVAL;
}
if (value == 0) {
pwm_config(beeper->pwm, 0, 0);
pwm_disable(beeper->pwm);
} else {
period = HZ_TO_NANOSECONDS(value);
ret = pwm_config(beeper->pwm, period / 2, period);
if (ret)
return ret;
ret = pwm_enable(beeper->pwm);
if (ret)
return ret;
beeper->period = period;
}
return 0;
}
static int pwm_beeper_probe(struct platform_device *pdev)
{
unsigned long pwm_id = (unsigned long)pdev->dev.platform_data;
struct pwm_beeper *beeper;
int error;
beeper = kzalloc(sizeof(*beeper), GFP_KERNEL);
if (!beeper)
return -ENOMEM;
beeper->pwm = pwm_get(&pdev->dev, NULL);
if (IS_ERR(beeper->pwm)) {
dev_dbg(&pdev->dev, "unable to request PWM, trying legacy API\n");
beeper->pwm = pwm_request(pwm_id, "pwm beeper");
}
if (IS_ERR(beeper->pwm)) {
error = PTR_ERR(beeper->pwm);
dev_err(&pdev->dev, "Failed to request pwm device: %d\n", error);
goto err_free;
}
beeper->input = input_allocate_device();
if (!beeper->input) {
dev_err(&pdev->dev, "Failed to allocate input device\n");
error = -ENOMEM;
goto err_pwm_free;
}
beeper->input->dev.parent = &pdev->dev;
beeper->input->name = "pwm-beeper";
beeper->input->phys = "pwm/input0";
beeper->input->id.bustype = BUS_HOST;
beeper->input->id.vendor = 0x001f;
beeper->input->id.product = 0x0001;
beeper->input->id.version = 0x0100;
beeper->input->evbit[0] = BIT(EV_SND);
beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
beeper->input->event = pwm_beeper_event;
input_set_drvdata(beeper->input, beeper);
error = input_register_device(beeper->input);
if (error) {
dev_err(&pdev->dev, "Failed to register input device: %d\n", error);
goto err_input_free;
}
platform_set_drvdata(pdev, beeper);
return 0;
err_input_free:
input_free_device(beeper->input);
err_pwm_free:
pwm_free(beeper->pwm);
err_free:
kfree(beeper);
return error;
}
static int pwm_beeper_remove(struct platform_device *pdev)
{
struct pwm_beeper *beeper = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
input_unregister_device(beeper->input);
pwm_disable(beeper->pwm);
pwm_free(beeper->pwm);
kfree(beeper);
return 0;
}
#ifdef CONFIG_PM
static int pwm_beeper_suspend(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
if (beeper->period)
pwm_disable(beeper->pwm);
return 0;
}
static int pwm_beeper_resume(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
if (beeper->period) {
pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
pwm_enable(beeper->pwm);
}
return 0;
}
static SIMPLE_DEV_PM_OPS(pwm_beeper_pm_ops,
pwm_beeper_suspend, pwm_beeper_resume);
#define PWM_BEEPER_PM_OPS (&pwm_beeper_pm_ops)
#else
#define PWM_BEEPER_PM_OPS NULL
#endif
#ifdef CONFIG_OF
static const struct of_device_id pwm_beeper_match[] = {
{ .compatible = "pwm-beeper", },
{ },
};
#endif
static struct platform_driver pwm_beeper_driver = {
.probe = pwm_beeper_probe,
.remove = pwm_beeper_remove,
.driver = {
.name = "pwm-beeper",
.owner = THIS_MODULE,
.pm = PWM_BEEPER_PM_OPS,
.of_match_table = of_match_ptr(pwm_beeper_match),
},
};
module_platform_driver(pwm_beeper_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("PWM beeper driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pwm-beeper");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.