repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
hackeran/linux-netmap | mm/vmstat.c | 77 | 34109 | /*
* linux/mm/vmstat.c
*
* Manages VM statistics
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* zoned VM statistics
* Copyright (C) 2006 Silicon Graphics, Inc.,
* Christoph Lameter <christoph@lameter.com>
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/vmstat.h>
#include <linux/sched.h>
#include <linux/math64.h>
#include <linux/writeback.h>
#include <linux/compaction.h>
#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
EXPORT_PER_CPU_SYMBOL(vm_event_states);
static void sum_vm_events(unsigned long *ret)
{
int cpu;
int i;
memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
for_each_online_cpu(cpu) {
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
ret[i] += this->event[i];
}
}
/*
* Accumulate the vm event counters across all CPUs.
* The result is unavoidably approximate - it can change
* during and after execution of this function.
*/
void all_vm_events(unsigned long *ret)
{
get_online_cpus();
sum_vm_events(ret);
put_online_cpus();
}
EXPORT_SYMBOL_GPL(all_vm_events);
#ifdef CONFIG_HOTPLUG
/*
* Fold the foreign cpu events into our own.
*
* This is adding to the events on one processor
* but keeps the global counts constant.
*/
void vm_events_fold_cpu(int cpu)
{
struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
int i;
for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
count_vm_events(i, fold_state->event[i]);
fold_state->event[i] = 0;
}
}
#endif /* CONFIG_HOTPLUG */
#endif /* CONFIG_VM_EVENT_COUNTERS */
/*
* Manage combined zone based / global counters
*
* vm_stat contains the global counters
*/
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
EXPORT_SYMBOL(vm_stat);
#ifdef CONFIG_SMP
int calculate_pressure_threshold(struct zone *zone)
{
int threshold;
int watermark_distance;
/*
* As vmstats are not up to date, there is drift between the estimated
* and real values. For high thresholds and a high number of CPUs, it
* is possible for the min watermark to be breached while the estimated
* value looks fine. The pressure threshold is a reduced value such
* that even the maximum amount of drift will not accidentally breach
* the min watermark
*/
watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
threshold = max(1, (int)(watermark_distance / num_online_cpus()));
/*
* Maximum threshold is 125
*/
threshold = min(125, threshold);
return threshold;
}
int calculate_normal_threshold(struct zone *zone)
{
int threshold;
int mem; /* memory in 128 MB units */
/*
* The threshold scales with the number of processors and the amount
* of memory per zone. More memory means that we can defer updates for
* longer, more processors could lead to more contention.
* fls() is used to have a cheap way of logarithmic scaling.
*
* Some sample thresholds:
*
* Threshold Processors (fls) Zonesize fls(mem+1)
* ------------------------------------------------------------------
* 8 1 1 0.9-1 GB 4
* 16 2 2 0.9-1 GB 4
* 20 2 2 1-2 GB 5
* 24 2 2 2-4 GB 6
* 28 2 2 4-8 GB 7
* 32 2 2 8-16 GB 8
* 4 2 2 <128M 1
* 30 4 3 2-4 GB 5
* 48 4 3 8-16 GB 8
* 32 8 4 1-2 GB 4
* 32 8 4 0.9-1GB 4
* 10 16 5 <128M 1
* 40 16 5 900M 4
* 70 64 7 2-4 GB 5
* 84 64 7 4-8 GB 6
* 108 512 9 4-8 GB 6
* 125 1024 10 8-16 GB 8
* 125 1024 10 16-32 GB 9
*/
mem = zone->present_pages >> (27 - PAGE_SHIFT);
threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
/*
* Maximum threshold is 125
*/
threshold = min(125, threshold);
return threshold;
}
/*
* Refresh the thresholds for each zone.
*/
void refresh_zone_stat_thresholds(void)
{
struct zone *zone;
int cpu;
int threshold;
for_each_populated_zone(zone) {
unsigned long max_drift, tolerate_drift;
threshold = calculate_normal_threshold(zone);
for_each_online_cpu(cpu)
per_cpu_ptr(zone->pageset, cpu)->stat_threshold
= threshold;
/*
* Only set percpu_drift_mark if there is a danger that
* NR_FREE_PAGES reports the low watermark is ok when in fact
* the min watermark could be breached by an allocation
*/
tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
max_drift = num_online_cpus() * threshold;
if (max_drift > tolerate_drift)
zone->percpu_drift_mark = high_wmark_pages(zone) +
max_drift;
}
}
void set_pgdat_percpu_threshold(pg_data_t *pgdat,
int (*calculate_pressure)(struct zone *))
{
struct zone *zone;
int cpu;
int threshold;
int i;
for (i = 0; i < pgdat->nr_zones; i++) {
zone = &pgdat->node_zones[i];
if (!zone->percpu_drift_mark)
continue;
threshold = (*calculate_pressure)(zone);
for_each_possible_cpu(cpu)
per_cpu_ptr(zone->pageset, cpu)->stat_threshold
= threshold;
}
}
/*
* For use when we know that interrupts are disabled.
*/
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
int delta)
{
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
long x;
long t;
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(x > t || x < -t)) {
zone_page_state_add(x, zone, item);
x = 0;
}
__this_cpu_write(*p, x);
}
EXPORT_SYMBOL(__mod_zone_page_state);
/*
* Optimized increment and decrement functions.
*
* These are only for a single page and therefore can take a struct page *
* argument instead of struct zone *. This allows the inclusion of the code
* generated for page_zone(page) into the optimized functions.
*
* No overflow check is necessary and therefore the differential can be
* incremented or decremented in place which may allow the compilers to
* generate better code.
* The increment or decrement is known and therefore one boundary check can
* be omitted.
*
* NOTE: These functions are very performance sensitive. Change only
* with care.
*
* Some processors have inc/dec instructions that are atomic vs an interrupt.
* However, the code must first determine the differential location in a zone
* based on the processor number and then inc/dec the counter. There is no
* guarantee without disabling preemption that the processor will not change
* in between and therefore the atomicity vs. interrupt cannot be exploited
* in a useful way here.
*/
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
s8 overstep = t >> 1;
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
__inc_zone_state(page_zone(page), item);
}
EXPORT_SYMBOL(__inc_zone_page_state);
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
s8 overstep = t >> 1;
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
{
__dec_zone_state(page_zone(page), item);
}
EXPORT_SYMBOL(__dec_zone_page_state);
#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
/*
* If we have cmpxchg_local support then we do not need to incur the overhead
* that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
*
* mod_state() modifies the zone counter state through atomic per cpu
* operations.
*
* Overstep mode specifies how overstep should handled:
* 0 No overstepping
* 1 Overstepping half of threshold
* -1 Overstepping minus half of threshold
*/
static inline void mod_state(struct zone *zone,
enum zone_stat_item item, int delta, int overstep_mode)
{
struct per_cpu_pageset __percpu *pcp = zone->pageset;
s8 __percpu *p = pcp->vm_stat_diff + item;
long o, n, t, z;
do {
z = 0; /* overflow to zone counters */
/*
* The fetching of the stat_threshold is racy. We may apply
* a counter threshold to the wrong the cpu if we get
* rescheduled while executing here. However, the next
* counter update will apply the threshold again and
* therefore bring the counter under the threshold again.
*
* Most of the time the thresholds are the same anyways
* for all cpus in a zone.
*/
t = this_cpu_read(pcp->stat_threshold);
o = this_cpu_read(*p);
n = delta + o;
if (n > t || n < -t) {
int os = overstep_mode * (t >> 1) ;
/* Overflow must be added to zone counters */
z = n + os;
n = -os;
}
} while (this_cpu_cmpxchg(*p, o, n) != o);
if (z)
zone_page_state_add(z, zone, item);
}
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
int delta)
{
mod_state(zone, item, delta, 0);
}
EXPORT_SYMBOL(mod_zone_page_state);
void inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
mod_state(zone, item, 1, 1);
}
void inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
mod_state(page_zone(page), item, 1, 1);
}
EXPORT_SYMBOL(inc_zone_page_state);
void dec_zone_page_state(struct page *page, enum zone_stat_item item)
{
mod_state(page_zone(page), item, -1, -1);
}
EXPORT_SYMBOL(dec_zone_page_state);
#else
/*
* Use interrupt disable to serialize counter updates
*/
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
int delta)
{
unsigned long flags;
local_irq_save(flags);
__mod_zone_page_state(zone, item, delta);
local_irq_restore(flags);
}
EXPORT_SYMBOL(mod_zone_page_state);
void inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
__inc_zone_state(zone, item);
local_irq_restore(flags);
}
void inc_zone_page_state(struct page *page, enum zone_stat_item item)
{
unsigned long flags;
struct zone *zone;
zone = page_zone(page);
local_irq_save(flags);
__inc_zone_state(zone, item);
local_irq_restore(flags);
}
EXPORT_SYMBOL(inc_zone_page_state);
void dec_zone_page_state(struct page *page, enum zone_stat_item item)
{
unsigned long flags;
local_irq_save(flags);
__dec_zone_page_state(page, item);
local_irq_restore(flags);
}
EXPORT_SYMBOL(dec_zone_page_state);
#endif
/*
* Update the zone counters for one cpu.
*
* The cpu specified must be either the current cpu or a processor that
* is not online. If it is the current cpu then the execution thread must
* be pinned to the current cpu.
*
* Note that refresh_cpu_vm_stats strives to only access
* node local memory. The per cpu pagesets on remote zones are placed
* in the memory local to the processor using that pageset. So the
* loop over all zones will access a series of cachelines local to
* the processor.
*
* The call to zone_page_state_add updates the cachelines with the
* statistics in the remote zone struct as well as the global cachelines
* with the global counters. These could cause remote node cache line
* bouncing and will have to be only done when necessary.
*/
void refresh_cpu_vm_stats(int cpu)
{
struct zone *zone;
int i;
int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
for_each_populated_zone(zone) {
struct per_cpu_pageset *p;
p = per_cpu_ptr(zone->pageset, cpu);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (p->vm_stat_diff[i]) {
unsigned long flags;
int v;
local_irq_save(flags);
v = p->vm_stat_diff[i];
p->vm_stat_diff[i] = 0;
local_irq_restore(flags);
atomic_long_add(v, &zone->vm_stat[i]);
global_diff[i] += v;
#ifdef CONFIG_NUMA
/* 3 seconds idle till flush */
p->expire = 3;
#endif
}
cond_resched();
#ifdef CONFIG_NUMA
/*
* Deal with draining the remote pageset of this
* processor
*
* Check if there are pages remaining in this pageset
* if not then there is nothing to expire.
*/
if (!p->expire || !p->pcp.count)
continue;
/*
* We never drain zones local to this processor.
*/
if (zone_to_nid(zone) == numa_node_id()) {
p->expire = 0;
continue;
}
p->expire--;
if (p->expire)
continue;
if (p->pcp.count)
drain_zone_pages(zone, &p->pcp);
#endif
}
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (global_diff[i])
atomic_long_add(global_diff[i], &vm_stat[i]);
}
#endif
#ifdef CONFIG_NUMA
/*
* zonelist = the list of zones passed to the allocator
* z = the zone from which the allocation occurred.
*
* Must be called with interrupts disabled.
*
* When __GFP_OTHER_NODE is set assume the node of the preferred
* zone is the local node. This is useful for daemons who allocate
* memory on behalf of other processes.
*/
void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
{
if (z->zone_pgdat == preferred_zone->zone_pgdat) {
__inc_zone_state(z, NUMA_HIT);
} else {
__inc_zone_state(z, NUMA_MISS);
__inc_zone_state(preferred_zone, NUMA_FOREIGN);
}
if (z->node == ((flags & __GFP_OTHER_NODE) ?
preferred_zone->node : numa_node_id()))
__inc_zone_state(z, NUMA_LOCAL);
else
__inc_zone_state(z, NUMA_OTHER);
}
#endif
#ifdef CONFIG_COMPACTION
struct contig_page_info {
unsigned long free_pages;
unsigned long free_blocks_total;
unsigned long free_blocks_suitable;
};
/*
* Calculate the number of free pages in a zone, how many contiguous
* pages are free and how many are large enough to satisfy an allocation of
* the target size. Note that this function makes no attempt to estimate
* how many suitable free blocks there *might* be if MOVABLE pages were
* migrated. Calculating that is possible, but expensive and can be
* figured out from userspace
*/
static void fill_contig_page_info(struct zone *zone,
unsigned int suitable_order,
struct contig_page_info *info)
{
unsigned int order;
info->free_pages = 0;
info->free_blocks_total = 0;
info->free_blocks_suitable = 0;
for (order = 0; order < MAX_ORDER; order++) {
unsigned long blocks;
/* Count number of free blocks */
blocks = zone->free_area[order].nr_free;
info->free_blocks_total += blocks;
/* Count free base pages */
info->free_pages += blocks << order;
/* Count the suitable free blocks */
if (order >= suitable_order)
info->free_blocks_suitable += blocks <<
(order - suitable_order);
}
}
/*
* A fragmentation index only makes sense if an allocation of a requested
* size would fail. If that is true, the fragmentation index indicates
* whether external fragmentation or a lack of memory was the problem.
* The value can be used to determine if page reclaim or compaction
* should be used
*/
static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
{
unsigned long requested = 1UL << order;
if (!info->free_blocks_total)
return 0;
/* Fragmentation index only makes sense when a request would fail */
if (info->free_blocks_suitable)
return -1000;
/*
* Index is between 0 and 1 so return within 3 decimal places
*
* 0 => allocation would fail due to lack of memory
* 1 => allocation would fail due to fragmentation
*/
return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
}
/* Same as __fragmentation index but allocs contig_page_info on stack */
int fragmentation_index(struct zone *zone, unsigned int order)
{
struct contig_page_info info;
fill_contig_page_info(zone, order, &info);
return __fragmentation_index(order, &info);
}
#endif
#if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION)
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
static char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
"Reclaimable",
"Movable",
"Reserve",
#ifdef CONFIG_CMA
"CMA",
#endif
"Isolate",
};
static void *frag_start(struct seq_file *m, loff_t *pos)
{
pg_data_t *pgdat;
loff_t node = *pos;
for (pgdat = first_online_pgdat();
pgdat && node;
pgdat = next_online_pgdat(pgdat))
--node;
return pgdat;
}
static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
{
pg_data_t *pgdat = (pg_data_t *)arg;
(*pos)++;
return next_online_pgdat(pgdat);
}
static void frag_stop(struct seq_file *m, void *arg)
{
}
/* Walk all the zones in a node and print using a callback */
static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
{
struct zone *zone;
struct zone *node_zones = pgdat->node_zones;
unsigned long flags;
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
if (!populated_zone(zone))
continue;
spin_lock_irqsave(&zone->lock, flags);
print(m, pgdat, zone);
spin_unlock_irqrestore(&zone->lock, flags);
}
}
#endif
#if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
#ifdef CONFIG_ZONE_DMA
#define TEXT_FOR_DMA(xx) xx "_dma",
#else
#define TEXT_FOR_DMA(xx)
#endif
#ifdef CONFIG_ZONE_DMA32
#define TEXT_FOR_DMA32(xx) xx "_dma32",
#else
#define TEXT_FOR_DMA32(xx)
#endif
#ifdef CONFIG_HIGHMEM
#define TEXT_FOR_HIGHMEM(xx) xx "_high",
#else
#define TEXT_FOR_HIGHMEM(xx)
#endif
#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
TEXT_FOR_HIGHMEM(xx) xx "_movable",
const char * const vmstat_text[] = {
/* Zoned VM counters */
"nr_free_pages",
"nr_inactive_anon",
"nr_active_anon",
"nr_inactive_file",
"nr_active_file",
"nr_unevictable",
"nr_mlock",
"nr_anon_pages",
"nr_mapped",
"nr_file_pages",
"nr_dirty",
"nr_writeback",
"nr_slab_reclaimable",
"nr_slab_unreclaimable",
"nr_page_table_pages",
"nr_kernel_stack",
"nr_unstable",
"nr_bounce",
"nr_vmscan_write",
"nr_vmscan_immediate_reclaim",
"nr_writeback_temp",
"nr_isolated_anon",
"nr_isolated_file",
"nr_shmem",
"nr_dirtied",
"nr_written",
#ifdef CONFIG_NUMA
"numa_hit",
"numa_miss",
"numa_foreign",
"numa_interleave",
"numa_local",
"numa_other",
#endif
"nr_anon_transparent_hugepages",
"nr_dirty_threshold",
"nr_dirty_background_threshold",
#ifdef CONFIG_VM_EVENT_COUNTERS
"pgpgin",
"pgpgout",
"pswpin",
"pswpout",
TEXTS_FOR_ZONES("pgalloc")
"pgfree",
"pgactivate",
"pgdeactivate",
"pgfault",
"pgmajfault",
TEXTS_FOR_ZONES("pgrefill")
TEXTS_FOR_ZONES("pgsteal_kswapd")
TEXTS_FOR_ZONES("pgsteal_direct")
TEXTS_FOR_ZONES("pgscan_kswapd")
TEXTS_FOR_ZONES("pgscan_direct")
"pgscan_direct_throttle",
#ifdef CONFIG_NUMA
"zone_reclaim_failed",
#endif
"pginodesteal",
"slabs_scanned",
"kswapd_inodesteal",
"kswapd_low_wmark_hit_quickly",
"kswapd_high_wmark_hit_quickly",
"kswapd_skip_congestion_wait",
"pageoutrun",
"allocstall",
"pgrotated",
#ifdef CONFIG_COMPACTION
"compact_blocks_moved",
"compact_pages_moved",
"compact_pagemigrate_failed",
"compact_stall",
"compact_fail",
"compact_success",
#endif
#ifdef CONFIG_HUGETLB_PAGE
"htlb_buddy_alloc_success",
"htlb_buddy_alloc_fail",
#endif
"unevictable_pgs_culled",
"unevictable_pgs_scanned",
"unevictable_pgs_rescued",
"unevictable_pgs_mlocked",
"unevictable_pgs_munlocked",
"unevictable_pgs_cleared",
"unevictable_pgs_stranded",
"unevictable_pgs_mlockfreed",
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"thp_fault_alloc",
"thp_fault_fallback",
"thp_collapse_alloc",
"thp_collapse_alloc_failed",
"thp_split",
#endif
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
#endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
#ifdef CONFIG_PROC_FS
static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
struct zone *zone)
{
int order;
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
for (order = 0; order < MAX_ORDER; ++order)
seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
seq_putc(m, '\n');
}
/*
* This walks the free areas for each zone.
*/
static int frag_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
walk_zones_in_node(m, pgdat, frag_show_print);
return 0;
}
static void pagetypeinfo_showfree_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
{
int order, mtype;
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
seq_printf(m, "Node %4d, zone %8s, type %12s ",
pgdat->node_id,
zone->name,
migratetype_names[mtype]);
for (order = 0; order < MAX_ORDER; ++order) {
unsigned long freecount = 0;
struct free_area *area;
struct list_head *curr;
area = &(zone->free_area[order]);
list_for_each(curr, &area->free_list[mtype])
freecount++;
seq_printf(m, "%6lu ", freecount);
}
seq_putc(m, '\n');
}
}
/* Print out the free pages at each order for each migatetype */
static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
{
int order;
pg_data_t *pgdat = (pg_data_t *)arg;
/* Print header */
seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
for (order = 0; order < MAX_ORDER; ++order)
seq_printf(m, "%6d ", order);
seq_putc(m, '\n');
walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
return 0;
}
static void pagetypeinfo_showblockcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
{
int mtype;
unsigned long pfn;
unsigned long start_pfn = zone->zone_start_pfn;
unsigned long end_pfn = start_pfn + zone->spanned_pages;
unsigned long count[MIGRATE_TYPES] = { 0, };
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
struct page *page;
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
/* Watch for unexpected holes punched in the memmap */
if (!memmap_valid_within(pfn, page, zone))
continue;
mtype = get_pageblock_migratetype(page);
if (mtype < MIGRATE_TYPES)
count[mtype]++;
}
/* Print counts */
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
seq_printf(m, "%12lu ", count[mtype]);
seq_putc(m, '\n');
}
/* Print out the free pages at each order for each migratetype */
static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
{
int mtype;
pg_data_t *pgdat = (pg_data_t *)arg;
seq_printf(m, "\n%-23s", "Number of blocks type ");
for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
seq_printf(m, "%12s ", migratetype_names[mtype]);
seq_putc(m, '\n');
walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
return 0;
}
/*
* This prints out statistics in relation to grouping pages by mobility.
* It is expensive to collect so do not constantly read the file.
*/
static int pagetypeinfo_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
/* check memoryless node */
if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
return 0;
seq_printf(m, "Page block order: %d\n", pageblock_order);
seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages);
seq_putc(m, '\n');
pagetypeinfo_showfree(m, pgdat);
pagetypeinfo_showblockcount(m, pgdat);
return 0;
}
static const struct seq_operations fragmentation_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = frag_show,
};
static int fragmentation_open(struct inode *inode, struct file *file)
{
return seq_open(file, &fragmentation_op);
}
static const struct file_operations fragmentation_file_operations = {
.open = fragmentation_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static const struct seq_operations pagetypeinfo_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = pagetypeinfo_show,
};
static int pagetypeinfo_open(struct inode *inode, struct file *file)
{
return seq_open(file, &pagetypeinfo_op);
}
static const struct file_operations pagetypeinfo_file_ops = {
.open = pagetypeinfo_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
struct zone *zone)
{
int i;
seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
seq_printf(m,
"\n pages free %lu"
"\n min %lu"
"\n low %lu"
"\n high %lu"
"\n scanned %lu"
"\n spanned %lu"
"\n present %lu",
zone_page_state(zone, NR_FREE_PAGES),
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
zone->pages_scanned,
zone->spanned_pages,
zone->present_pages);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
seq_printf(m, "\n %-12s %lu", vmstat_text[i],
zone_page_state(zone, i));
seq_printf(m,
"\n protection: (%lu",
zone->lowmem_reserve[0]);
for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
seq_printf(m,
")"
"\n pagesets");
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
pageset = per_cpu_ptr(zone->pageset, i);
seq_printf(m,
"\n cpu: %i"
"\n count: %i"
"\n high: %i"
"\n batch: %i",
i,
pageset->pcp.count,
pageset->pcp.high,
pageset->pcp.batch);
#ifdef CONFIG_SMP
seq_printf(m, "\n vm stats threshold: %d",
pageset->stat_threshold);
#endif
}
seq_printf(m,
"\n all_unreclaimable: %u"
"\n start_pfn: %lu"
"\n inactive_ratio: %u",
zone->all_unreclaimable,
zone->zone_start_pfn,
zone->inactive_ratio);
seq_putc(m, '\n');
}
/*
* Output information about zones in @pgdat.
*/
static int zoneinfo_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
walk_zones_in_node(m, pgdat, zoneinfo_show_print);
return 0;
}
static const struct seq_operations zoneinfo_op = {
.start = frag_start, /* iterate over all zones. The same as in
* fragmentation. */
.next = frag_next,
.stop = frag_stop,
.show = zoneinfo_show,
};
static int zoneinfo_open(struct inode *inode, struct file *file)
{
return seq_open(file, &zoneinfo_op);
}
static const struct file_operations proc_zoneinfo_file_operations = {
.open = zoneinfo_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
enum writeback_stat_item {
NR_DIRTY_THRESHOLD,
NR_DIRTY_BG_THRESHOLD,
NR_VM_WRITEBACK_STAT_ITEMS,
};
static void *vmstat_start(struct seq_file *m, loff_t *pos)
{
unsigned long *v;
int i, stat_items_size;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
#ifdef CONFIG_VM_EVENT_COUNTERS
stat_items_size += sizeof(struct vm_event_state);
#endif
v = kmalloc(stat_items_size, GFP_KERNEL);
m->private = v;
if (!v)
return ERR_PTR(-ENOMEM);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
v[i] = global_page_state(i);
v += NR_VM_ZONE_STAT_ITEMS;
global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
v + NR_DIRTY_THRESHOLD);
v += NR_VM_WRITEBACK_STAT_ITEMS;
#ifdef CONFIG_VM_EVENT_COUNTERS
all_vm_events(v);
v[PGPGIN] /= 2; /* sectors -> kbytes */
v[PGPGOUT] /= 2;
#endif
return (unsigned long *)m->private + *pos;
}
static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
{
(*pos)++;
if (*pos >= ARRAY_SIZE(vmstat_text))
return NULL;
return (unsigned long *)m->private + *pos;
}
static int vmstat_show(struct seq_file *m, void *arg)
{
unsigned long *l = arg;
unsigned long off = l - (unsigned long *)m->private;
seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
return 0;
}
static void vmstat_stop(struct seq_file *m, void *arg)
{
kfree(m->private);
m->private = NULL;
}
static const struct seq_operations vmstat_op = {
.start = vmstat_start,
.next = vmstat_next,
.stop = vmstat_stop,
.show = vmstat_show,
};
static int vmstat_open(struct inode *inode, struct file *file)
{
return seq_open(file, &vmstat_op);
}
static const struct file_operations proc_vmstat_file_operations = {
.open = vmstat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
int sysctl_stat_interval __read_mostly = HZ;
static void vmstat_update(struct work_struct *w)
{
refresh_cpu_vm_stats(smp_processor_id());
schedule_delayed_work(&__get_cpu_var(vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
}
static void __cpuinit start_cpu_timer(int cpu)
{
struct delayed_work *work = &per_cpu(vmstat_work, cpu);
INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
}
/*
* Use the cpu notifier to insure that the thresholds are recalculated
* when necessary.
*/
static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
long cpu = (long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
refresh_zone_stat_thresholds();
start_cpu_timer(cpu);
node_set_state(cpu_to_node(cpu), N_CPU);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
per_cpu(vmstat_work, cpu).work.func = NULL;
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
start_cpu_timer(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
refresh_zone_stat_thresholds();
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata vmstat_notifier =
{ &vmstat_cpuup_callback, NULL, 0 };
#endif
static int __init setup_vmstat(void)
{
#ifdef CONFIG_SMP
int cpu;
register_cpu_notifier(&vmstat_notifier);
for_each_online_cpu(cpu)
start_cpu_timer(cpu);
#endif
#ifdef CONFIG_PROC_FS
proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
#endif
return 0;
}
module_init(setup_vmstat)
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
#include <linux/debugfs.h>
/*
* Return an index indicating how much of the available free memory is
* unusable for an allocation of the requested size.
*/
static int unusable_free_index(unsigned int order,
struct contig_page_info *info)
{
/* No free memory is interpreted as all free memory is unusable */
if (info->free_pages == 0)
return 1000;
/*
* Index should be a value between 0 and 1. Return a value to 3
* decimal places.
*
* 0 => no fragmentation
* 1 => high fragmentation
*/
return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
}
static void unusable_show_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
{
unsigned int order;
int index;
struct contig_page_info info;
seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id,
zone->name);
for (order = 0; order < MAX_ORDER; ++order) {
fill_contig_page_info(zone, order, &info);
index = unusable_free_index(order, &info);
seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
}
seq_putc(m, '\n');
}
/*
* Display unusable free space index
*
* The unusable free space index measures how much of the available free
* memory cannot be used to satisfy an allocation of a given size and is a
* value between 0 and 1. The higher the value, the more of free memory is
* unusable and by implication, the worse the external fragmentation is. This
* can be expressed as a percentage by multiplying by 100.
*/
static int unusable_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
/* check memoryless node */
if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
return 0;
walk_zones_in_node(m, pgdat, unusable_show_print);
return 0;
}
static const struct seq_operations unusable_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = unusable_show,
};
static int unusable_open(struct inode *inode, struct file *file)
{
return seq_open(file, &unusable_op);
}
static const struct file_operations unusable_file_ops = {
.open = unusable_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void extfrag_show_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone)
{
unsigned int order;
int index;
/* Alloc on stack as interrupts are disabled for zone walk */
struct contig_page_info info;
seq_printf(m, "Node %d, zone %8s ",
pgdat->node_id,
zone->name);
for (order = 0; order < MAX_ORDER; ++order) {
fill_contig_page_info(zone, order, &info);
index = __fragmentation_index(order, &info);
seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
}
seq_putc(m, '\n');
}
/*
* Display fragmentation index for orders that allocations would fail for
*/
static int extfrag_show(struct seq_file *m, void *arg)
{
pg_data_t *pgdat = (pg_data_t *)arg;
walk_zones_in_node(m, pgdat, extfrag_show_print);
return 0;
}
static const struct seq_operations extfrag_op = {
.start = frag_start,
.next = frag_next,
.stop = frag_stop,
.show = extfrag_show,
};
static int extfrag_open(struct inode *inode, struct file *file)
{
return seq_open(file, &extfrag_op);
}
static const struct file_operations extfrag_file_ops = {
.open = extfrag_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init extfrag_debug_init(void)
{
struct dentry *extfrag_debug_root;
extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
if (!extfrag_debug_root)
return -ENOMEM;
if (!debugfs_create_file("unusable_index", 0444,
extfrag_debug_root, NULL, &unusable_file_ops))
goto fail;
if (!debugfs_create_file("extfrag_index", 0444,
extfrag_debug_root, NULL, &extfrag_file_ops))
goto fail;
return 0;
fail:
debugfs_remove_recursive(extfrag_debug_root);
return -ENOMEM;
}
module_init(extfrag_debug_init);
#endif
| gpl-2.0 |
AmauryEsparza/linux | arch/hexagon/mm/vm_fault.c | 589 | 4677 | /*
* Memory fault handling for Hexagon
*
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
/*
* Page fault handling for the Hexagon Virtual Machine.
* Can also be called by a native port emulating the HVM
* execptions.
*/
#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <linux/mm.h>
#include <linux/signal.h>
#include <linux/module.h>
#include <linux/hardirq.h>
/*
* Decode of hardware exception sends us to one of several
* entry points. At each, we generate canonical arguments
* for handling by the abstract memory management code.
*/
#define FLT_IFETCH -1
#define FLT_LOAD 0
#define FLT_STORE 1
/*
* Canonical page fault handler
*/
void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
siginfo_t info;
int si_code = SEGV_MAPERR;
int fault;
const struct exception_table_entry *fixup;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(cause > 0 ? FAULT_FLAG_WRITE : 0);
/*
* If we're in an interrupt or have no user context,
* then must not take the fault.
*/
if (unlikely(in_interrupt() || !mm))
goto no_context;
local_irq_enable();
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
good_area:
/* Address space is OK. Now check access rights. */
si_code = SEGV_ACCERR;
switch (cause) {
case FLT_IFETCH:
if (!(vma->vm_flags & VM_EXEC))
goto bad_area;
break;
case FLT_LOAD:
if (!(vma->vm_flags & VM_READ))
goto bad_area;
break;
case FLT_STORE:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
break;
}
fault = handle_mm_fault(mm, vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
/* The most common case -- we are done. */
if (likely(!(fault & VM_FAULT_ERROR))) {
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR)
current->maj_flt++;
else
current->min_flt++;
if (fault & VM_FAULT_RETRY) {
flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
up_read(&mm->mmap_sem);
return;
}
up_read(&mm->mmap_sem);
/* Handle copyin/out exception cases */
if (!user_mode(regs))
goto no_context;
if (fault & VM_FAULT_OOM) {
pagefault_out_of_memory();
return;
}
/* User-mode address is in the memory map, but we are
* unable to fix up the page fault.
*/
if (fault & VM_FAULT_SIGBUS) {
info.si_signo = SIGBUS;
info.si_code = BUS_ADRERR;
}
/* Address is not in the memory map */
else {
info.si_signo = SIGSEGV;
info.si_code = SEGV_ACCERR;
}
info.si_errno = 0;
info.si_addr = (void __user *)address;
force_sig_info(info.si_signo, &info, current);
return;
bad_area:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *)address;
force_sig_info(info.si_signo, &info, current);
return;
}
/* Kernel-mode fault falls through */
no_context:
fixup = search_exception_tables(pt_elr(regs));
if (fixup) {
pt_set_elr(regs, fixup->fixup);
return;
}
/* Things are looking very, very bad now */
bust_spinlocks(1);
printk(KERN_EMERG "Unable to handle kernel paging request at "
"virtual address 0x%08lx, regs %p\n", address, regs);
die("Bad Kernel VA", regs, SIGKILL);
}
void read_protection_fault(struct pt_regs *regs)
{
unsigned long badvadr = pt_badva(regs);
do_page_fault(badvadr, FLT_LOAD, regs);
}
void write_protection_fault(struct pt_regs *regs)
{
unsigned long badvadr = pt_badva(regs);
do_page_fault(badvadr, FLT_STORE, regs);
}
void execute_protection_fault(struct pt_regs *regs)
{
unsigned long badvadr = pt_badva(regs);
do_page_fault(badvadr, FLT_IFETCH, regs);
}
| gpl-2.0 |
jogger0703/linux | drivers/mfd/palmas.c | 589 | 18233 | /*
* TI Palmas MFD Driver
*
* Copyright 2011-2012 Texas Instruments Inc.
*
* Author: Graeme Gregory <gg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/palmas.h>
#include <linux/of_device.h>
static const struct regmap_config palmas_regmap_config[PALMAS_NUM_CLIENTS] = {
{
.reg_bits = 8,
.val_bits = 8,
.max_register = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
PALMAS_PRIMARY_SECONDARY_PAD3),
},
{
.reg_bits = 8,
.val_bits = 8,
.max_register = PALMAS_BASE_TO_REG(PALMAS_GPADC_BASE,
PALMAS_GPADC_SMPS_VSEL_MONITORING),
},
{
.reg_bits = 8,
.val_bits = 8,
.max_register = PALMAS_BASE_TO_REG(PALMAS_TRIM_GPADC_BASE,
PALMAS_GPADC_TRIM16),
},
};
static const struct regmap_irq tps65917_irqs[] = {
/* INT1 IRQs */
[TPS65917_RESERVED1] = {
.mask = TPS65917_RESERVED,
},
[TPS65917_PWRON_IRQ] = {
.mask = TPS65917_INT1_STATUS_PWRON,
},
[TPS65917_LONG_PRESS_KEY_IRQ] = {
.mask = TPS65917_INT1_STATUS_LONG_PRESS_KEY,
},
[TPS65917_RESERVED2] = {
.mask = TPS65917_RESERVED,
},
[TPS65917_PWRDOWN_IRQ] = {
.mask = TPS65917_INT1_STATUS_PWRDOWN,
},
[TPS65917_HOTDIE_IRQ] = {
.mask = TPS65917_INT1_STATUS_HOTDIE,
},
[TPS65917_VSYS_MON_IRQ] = {
.mask = TPS65917_INT1_STATUS_VSYS_MON,
},
[TPS65917_RESERVED3] = {
.mask = TPS65917_RESERVED,
},
/* INT2 IRQs*/
[TPS65917_RESERVED4] = {
.mask = TPS65917_RESERVED,
.reg_offset = 1,
},
[TPS65917_OTP_ERROR_IRQ] = {
.mask = TPS65917_INT2_STATUS_OTP_ERROR,
.reg_offset = 1,
},
[TPS65917_WDT_IRQ] = {
.mask = TPS65917_INT2_STATUS_WDT,
.reg_offset = 1,
},
[TPS65917_RESERVED5] = {
.mask = TPS65917_RESERVED,
.reg_offset = 1,
},
[TPS65917_RESET_IN_IRQ] = {
.mask = TPS65917_INT2_STATUS_RESET_IN,
.reg_offset = 1,
},
[TPS65917_FSD_IRQ] = {
.mask = TPS65917_INT2_STATUS_FSD,
.reg_offset = 1,
},
[TPS65917_SHORT_IRQ] = {
.mask = TPS65917_INT2_STATUS_SHORT,
.reg_offset = 1,
},
[TPS65917_RESERVED6] = {
.mask = TPS65917_RESERVED,
.reg_offset = 1,
},
/* INT3 IRQs */
[TPS65917_GPADC_AUTO_0_IRQ] = {
.mask = TPS65917_INT3_STATUS_GPADC_AUTO_0,
.reg_offset = 2,
},
[TPS65917_GPADC_AUTO_1_IRQ] = {
.mask = TPS65917_INT3_STATUS_GPADC_AUTO_1,
.reg_offset = 2,
},
[TPS65917_GPADC_EOC_SW_IRQ] = {
.mask = TPS65917_INT3_STATUS_GPADC_EOC_SW,
.reg_offset = 2,
},
[TPS65917_RESREVED6] = {
.mask = TPS65917_RESERVED6,
.reg_offset = 2,
},
[TPS65917_RESERVED7] = {
.mask = TPS65917_RESERVED,
.reg_offset = 2,
},
[TPS65917_RESERVED8] = {
.mask = TPS65917_RESERVED,
.reg_offset = 2,
},
[TPS65917_RESERVED9] = {
.mask = TPS65917_RESERVED,
.reg_offset = 2,
},
[TPS65917_VBUS_IRQ] = {
.mask = TPS65917_INT3_STATUS_VBUS,
.reg_offset = 2,
},
/* INT4 IRQs */
[TPS65917_GPIO_0_IRQ] = {
.mask = TPS65917_INT4_STATUS_GPIO_0,
.reg_offset = 3,
},
[TPS65917_GPIO_1_IRQ] = {
.mask = TPS65917_INT4_STATUS_GPIO_1,
.reg_offset = 3,
},
[TPS65917_GPIO_2_IRQ] = {
.mask = TPS65917_INT4_STATUS_GPIO_2,
.reg_offset = 3,
},
[TPS65917_GPIO_3_IRQ] = {
.mask = TPS65917_INT4_STATUS_GPIO_3,
.reg_offset = 3,
},
[TPS65917_GPIO_4_IRQ] = {
.mask = TPS65917_INT4_STATUS_GPIO_4,
.reg_offset = 3,
},
[TPS65917_GPIO_5_IRQ] = {
.mask = TPS65917_INT4_STATUS_GPIO_5,
.reg_offset = 3,
},
[TPS65917_GPIO_6_IRQ] = {
.mask = TPS65917_INT4_STATUS_GPIO_6,
.reg_offset = 3,
},
[TPS65917_RESERVED10] = {
.mask = TPS65917_RESERVED10,
.reg_offset = 3,
},
};
static const struct regmap_irq palmas_irqs[] = {
/* INT1 IRQs */
[PALMAS_CHARG_DET_N_VBUS_OVV_IRQ] = {
.mask = PALMAS_INT1_STATUS_CHARG_DET_N_VBUS_OVV,
},
[PALMAS_PWRON_IRQ] = {
.mask = PALMAS_INT1_STATUS_PWRON,
},
[PALMAS_LONG_PRESS_KEY_IRQ] = {
.mask = PALMAS_INT1_STATUS_LONG_PRESS_KEY,
},
[PALMAS_RPWRON_IRQ] = {
.mask = PALMAS_INT1_STATUS_RPWRON,
},
[PALMAS_PWRDOWN_IRQ] = {
.mask = PALMAS_INT1_STATUS_PWRDOWN,
},
[PALMAS_HOTDIE_IRQ] = {
.mask = PALMAS_INT1_STATUS_HOTDIE,
},
[PALMAS_VSYS_MON_IRQ] = {
.mask = PALMAS_INT1_STATUS_VSYS_MON,
},
[PALMAS_VBAT_MON_IRQ] = {
.mask = PALMAS_INT1_STATUS_VBAT_MON,
},
/* INT2 IRQs*/
[PALMAS_RTC_ALARM_IRQ] = {
.mask = PALMAS_INT2_STATUS_RTC_ALARM,
.reg_offset = 1,
},
[PALMAS_RTC_TIMER_IRQ] = {
.mask = PALMAS_INT2_STATUS_RTC_TIMER,
.reg_offset = 1,
},
[PALMAS_WDT_IRQ] = {
.mask = PALMAS_INT2_STATUS_WDT,
.reg_offset = 1,
},
[PALMAS_BATREMOVAL_IRQ] = {
.mask = PALMAS_INT2_STATUS_BATREMOVAL,
.reg_offset = 1,
},
[PALMAS_RESET_IN_IRQ] = {
.mask = PALMAS_INT2_STATUS_RESET_IN,
.reg_offset = 1,
},
[PALMAS_FBI_BB_IRQ] = {
.mask = PALMAS_INT2_STATUS_FBI_BB,
.reg_offset = 1,
},
[PALMAS_SHORT_IRQ] = {
.mask = PALMAS_INT2_STATUS_SHORT,
.reg_offset = 1,
},
[PALMAS_VAC_ACOK_IRQ] = {
.mask = PALMAS_INT2_STATUS_VAC_ACOK,
.reg_offset = 1,
},
/* INT3 IRQs */
[PALMAS_GPADC_AUTO_0_IRQ] = {
.mask = PALMAS_INT3_STATUS_GPADC_AUTO_0,
.reg_offset = 2,
},
[PALMAS_GPADC_AUTO_1_IRQ] = {
.mask = PALMAS_INT3_STATUS_GPADC_AUTO_1,
.reg_offset = 2,
},
[PALMAS_GPADC_EOC_SW_IRQ] = {
.mask = PALMAS_INT3_STATUS_GPADC_EOC_SW,
.reg_offset = 2,
},
[PALMAS_GPADC_EOC_RT_IRQ] = {
.mask = PALMAS_INT3_STATUS_GPADC_EOC_RT,
.reg_offset = 2,
},
[PALMAS_ID_OTG_IRQ] = {
.mask = PALMAS_INT3_STATUS_ID_OTG,
.reg_offset = 2,
},
[PALMAS_ID_IRQ] = {
.mask = PALMAS_INT3_STATUS_ID,
.reg_offset = 2,
},
[PALMAS_VBUS_OTG_IRQ] = {
.mask = PALMAS_INT3_STATUS_VBUS_OTG,
.reg_offset = 2,
},
[PALMAS_VBUS_IRQ] = {
.mask = PALMAS_INT3_STATUS_VBUS,
.reg_offset = 2,
},
/* INT4 IRQs */
[PALMAS_GPIO_0_IRQ] = {
.mask = PALMAS_INT4_STATUS_GPIO_0,
.reg_offset = 3,
},
[PALMAS_GPIO_1_IRQ] = {
.mask = PALMAS_INT4_STATUS_GPIO_1,
.reg_offset = 3,
},
[PALMAS_GPIO_2_IRQ] = {
.mask = PALMAS_INT4_STATUS_GPIO_2,
.reg_offset = 3,
},
[PALMAS_GPIO_3_IRQ] = {
.mask = PALMAS_INT4_STATUS_GPIO_3,
.reg_offset = 3,
},
[PALMAS_GPIO_4_IRQ] = {
.mask = PALMAS_INT4_STATUS_GPIO_4,
.reg_offset = 3,
},
[PALMAS_GPIO_5_IRQ] = {
.mask = PALMAS_INT4_STATUS_GPIO_5,
.reg_offset = 3,
},
[PALMAS_GPIO_6_IRQ] = {
.mask = PALMAS_INT4_STATUS_GPIO_6,
.reg_offset = 3,
},
[PALMAS_GPIO_7_IRQ] = {
.mask = PALMAS_INT4_STATUS_GPIO_7,
.reg_offset = 3,
},
};
static struct regmap_irq_chip palmas_irq_chip = {
.name = "palmas",
.irqs = palmas_irqs,
.num_irqs = ARRAY_SIZE(palmas_irqs),
.num_regs = 4,
.irq_reg_stride = 5,
.status_base = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE,
PALMAS_INT1_STATUS),
.mask_base = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE,
PALMAS_INT1_MASK),
};
static struct regmap_irq_chip tps65917_irq_chip = {
.name = "tps65917",
.irqs = tps65917_irqs,
.num_irqs = ARRAY_SIZE(tps65917_irqs),
.num_regs = 4,
.irq_reg_stride = 5,
.status_base = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE,
PALMAS_INT1_STATUS),
.mask_base = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE,
PALMAS_INT1_MASK),
};
int palmas_ext_control_req_config(struct palmas *palmas,
enum palmas_external_requestor_id id, int ext_ctrl, bool enable)
{
struct palmas_pmic_driver_data *pmic_ddata = palmas->pmic_ddata;
int preq_mask_bit = 0;
int reg_add = 0;
int bit_pos, ret;
if (!(ext_ctrl & PALMAS_EXT_REQ))
return 0;
if (id >= PALMAS_EXTERNAL_REQSTR_ID_MAX)
return 0;
if (ext_ctrl & PALMAS_EXT_CONTROL_NSLEEP) {
reg_add = PALMAS_NSLEEP_RES_ASSIGN;
preq_mask_bit = 0;
} else if (ext_ctrl & PALMAS_EXT_CONTROL_ENABLE1) {
reg_add = PALMAS_ENABLE1_RES_ASSIGN;
preq_mask_bit = 1;
} else if (ext_ctrl & PALMAS_EXT_CONTROL_ENABLE2) {
reg_add = PALMAS_ENABLE2_RES_ASSIGN;
preq_mask_bit = 2;
}
bit_pos = pmic_ddata->sleep_req_info[id].bit_pos;
reg_add += pmic_ddata->sleep_req_info[id].reg_offset;
if (enable)
ret = palmas_update_bits(palmas, PALMAS_RESOURCE_BASE,
reg_add, BIT(bit_pos), BIT(bit_pos));
else
ret = palmas_update_bits(palmas, PALMAS_RESOURCE_BASE,
reg_add, BIT(bit_pos), 0);
if (ret < 0) {
dev_err(palmas->dev, "Resource reg 0x%02x update failed %d\n",
reg_add, ret);
return ret;
}
/* Unmask the PREQ */
ret = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
PALMAS_POWER_CTRL, BIT(preq_mask_bit), 0);
if (ret < 0) {
dev_err(palmas->dev, "POWER_CTRL register update failed %d\n",
ret);
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(palmas_ext_control_req_config);
static int palmas_set_pdata_irq_flag(struct i2c_client *i2c,
struct palmas_platform_data *pdata)
{
struct irq_data *irq_data = irq_get_irq_data(i2c->irq);
if (!irq_data) {
dev_err(&i2c->dev, "Invalid IRQ: %d\n", i2c->irq);
return -EINVAL;
}
pdata->irq_flags = irqd_get_trigger_type(irq_data);
dev_info(&i2c->dev, "Irq flag is 0x%08x\n", pdata->irq_flags);
return 0;
}
static void palmas_dt_to_pdata(struct i2c_client *i2c,
struct palmas_platform_data *pdata)
{
struct device_node *node = i2c->dev.of_node;
int ret;
u32 prop;
ret = of_property_read_u32(node, "ti,mux-pad1", &prop);
if (!ret) {
pdata->mux_from_pdata = 1;
pdata->pad1 = prop;
}
ret = of_property_read_u32(node, "ti,mux-pad2", &prop);
if (!ret) {
pdata->mux_from_pdata = 1;
pdata->pad2 = prop;
}
/* The default for this register is all masked */
ret = of_property_read_u32(node, "ti,power-ctrl", &prop);
if (!ret)
pdata->power_ctrl = prop;
else
pdata->power_ctrl = PALMAS_POWER_CTRL_NSLEEP_MASK |
PALMAS_POWER_CTRL_ENABLE1_MASK |
PALMAS_POWER_CTRL_ENABLE2_MASK;
if (i2c->irq)
palmas_set_pdata_irq_flag(i2c, pdata);
pdata->pm_off = of_property_read_bool(node,
"ti,system-power-controller");
}
static struct palmas *palmas_dev;
static void palmas_power_off(void)
{
unsigned int addr;
int ret, slave;
if (!palmas_dev)
return;
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PMU_CONTROL_BASE);
addr = PALMAS_BASE_TO_REG(PALMAS_PMU_CONTROL_BASE, PALMAS_DEV_CTRL);
ret = regmap_update_bits(
palmas_dev->regmap[slave],
addr,
PALMAS_DEV_CTRL_DEV_ON,
0);
if (ret)
pr_err("%s: Unable to write to DEV_CTRL_DEV_ON: %d\n",
__func__, ret);
}
static unsigned int palmas_features = PALMAS_PMIC_FEATURE_SMPS10_BOOST;
static unsigned int tps659038_features;
struct palmas_driver_data {
unsigned int *features;
struct regmap_irq_chip *irq_chip;
};
static struct palmas_driver_data palmas_data = {
.features = &palmas_features,
.irq_chip = &palmas_irq_chip,
};
static struct palmas_driver_data tps659038_data = {
.features = &tps659038_features,
.irq_chip = &palmas_irq_chip,
};
static struct palmas_driver_data tps65917_data = {
.features = &tps659038_features,
.irq_chip = &tps65917_irq_chip,
};
static const struct of_device_id of_palmas_match_tbl[] = {
{
.compatible = "ti,palmas",
.data = &palmas_data,
},
{
.compatible = "ti,tps659038",
.data = &tps659038_data,
},
{
.compatible = "ti,tps65917",
.data = &tps65917_data,
},
{ },
};
MODULE_DEVICE_TABLE(of, of_palmas_match_tbl);
static int palmas_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct palmas *palmas;
struct palmas_platform_data *pdata;
struct palmas_driver_data *driver_data;
struct device_node *node = i2c->dev.of_node;
int ret = 0, i;
unsigned int reg, addr;
int slave;
const struct of_device_id *match;
pdata = dev_get_platdata(&i2c->dev);
if (node && !pdata) {
pdata = devm_kzalloc(&i2c->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
palmas_dt_to_pdata(i2c, pdata);
}
if (!pdata)
return -EINVAL;
palmas = devm_kzalloc(&i2c->dev, sizeof(struct palmas), GFP_KERNEL);
if (palmas == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, palmas);
palmas->dev = &i2c->dev;
palmas->irq = i2c->irq;
match = of_match_device(of_palmas_match_tbl, &i2c->dev);
if (!match)
return -ENODATA;
driver_data = (struct palmas_driver_data *)match->data;
palmas->features = *driver_data->features;
for (i = 0; i < PALMAS_NUM_CLIENTS; i++) {
if (i == 0)
palmas->i2c_clients[i] = i2c;
else {
palmas->i2c_clients[i] =
i2c_new_dummy(i2c->adapter,
i2c->addr + i);
if (!palmas->i2c_clients[i]) {
dev_err(palmas->dev,
"can't attach client %d\n", i);
ret = -ENOMEM;
goto err_i2c;
}
palmas->i2c_clients[i]->dev.of_node = of_node_get(node);
}
palmas->regmap[i] = devm_regmap_init_i2c(palmas->i2c_clients[i],
&palmas_regmap_config[i]);
if (IS_ERR(palmas->regmap[i])) {
ret = PTR_ERR(palmas->regmap[i]);
dev_err(palmas->dev,
"Failed to allocate regmap %d, err: %d\n",
i, ret);
goto err_i2c;
}
}
if (!palmas->irq) {
dev_warn(palmas->dev, "IRQ missing: skipping irq request\n");
goto no_irq;
}
/* Change interrupt line output polarity */
if (pdata->irq_flags & IRQ_TYPE_LEVEL_HIGH)
reg = PALMAS_POLARITY_CTRL_INT_POLARITY;
else
reg = 0;
ret = palmas_update_bits(palmas, PALMAS_PU_PD_OD_BASE,
PALMAS_POLARITY_CTRL, PALMAS_POLARITY_CTRL_INT_POLARITY,
reg);
if (ret < 0) {
dev_err(palmas->dev, "POLARITY_CTRL updat failed: %d\n", ret);
goto err_i2c;
}
/* Change IRQ into clear on read mode for efficiency */
slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE);
addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL);
reg = PALMAS_INT_CTRL_INT_CLEAR;
regmap_write(palmas->regmap[slave], addr, reg);
ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq,
IRQF_ONESHOT | pdata->irq_flags, 0,
driver_data->irq_chip, &palmas->irq_data);
if (ret < 0)
goto err_i2c;
no_irq:
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
PALMAS_PRIMARY_SECONDARY_PAD1);
if (pdata->mux_from_pdata) {
reg = pdata->pad1;
ret = regmap_write(palmas->regmap[slave], addr, reg);
if (ret)
goto err_irq;
} else {
ret = regmap_read(palmas->regmap[slave], addr, ®);
if (ret)
goto err_irq;
}
if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_0))
palmas->gpio_muxed |= PALMAS_GPIO_0_MUXED;
if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK))
palmas->gpio_muxed |= PALMAS_GPIO_1_MUXED;
else if ((reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK) ==
(2 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT))
palmas->led_muxed |= PALMAS_LED1_MUXED;
else if ((reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_MASK) ==
(3 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_1_SHIFT))
palmas->pwm_muxed |= PALMAS_PWM1_MUXED;
if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK))
palmas->gpio_muxed |= PALMAS_GPIO_2_MUXED;
else if ((reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK) ==
(2 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT))
palmas->led_muxed |= PALMAS_LED2_MUXED;
else if ((reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_MASK) ==
(3 << PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_2_SHIFT))
palmas->pwm_muxed |= PALMAS_PWM2_MUXED;
if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD1_GPIO_3))
palmas->gpio_muxed |= PALMAS_GPIO_3_MUXED;
addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
PALMAS_PRIMARY_SECONDARY_PAD2);
if (pdata->mux_from_pdata) {
reg = pdata->pad2;
ret = regmap_write(palmas->regmap[slave], addr, reg);
if (ret)
goto err_irq;
} else {
ret = regmap_read(palmas->regmap[slave], addr, ®);
if (ret)
goto err_irq;
}
if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_4))
palmas->gpio_muxed |= PALMAS_GPIO_4_MUXED;
if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK))
palmas->gpio_muxed |= PALMAS_GPIO_5_MUXED;
if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_6))
palmas->gpio_muxed |= PALMAS_GPIO_6_MUXED;
if (!(reg & PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK))
palmas->gpio_muxed |= PALMAS_GPIO_7_MUXED;
dev_info(palmas->dev, "Muxing GPIO %x, PWM %x, LED %x\n",
palmas->gpio_muxed, palmas->pwm_muxed,
palmas->led_muxed);
reg = pdata->power_ctrl;
slave = PALMAS_BASE_TO_SLAVE(PALMAS_PMU_CONTROL_BASE);
addr = PALMAS_BASE_TO_REG(PALMAS_PMU_CONTROL_BASE, PALMAS_POWER_CTRL);
ret = regmap_write(palmas->regmap[slave], addr, reg);
if (ret)
goto err_irq;
/*
* If we are probing with DT do this the DT way and return here
* otherwise continue and add devices using mfd helpers.
*/
if (node) {
ret = of_platform_populate(node, NULL, NULL, &i2c->dev);
if (ret < 0) {
goto err_irq;
} else if (pdata->pm_off && !pm_power_off) {
palmas_dev = palmas;
pm_power_off = palmas_power_off;
}
}
return ret;
err_irq:
regmap_del_irq_chip(palmas->irq, palmas->irq_data);
err_i2c:
for (i = 1; i < PALMAS_NUM_CLIENTS; i++) {
if (palmas->i2c_clients[i])
i2c_unregister_device(palmas->i2c_clients[i]);
}
return ret;
}
static int palmas_i2c_remove(struct i2c_client *i2c)
{
struct palmas *palmas = i2c_get_clientdata(i2c);
int i;
regmap_del_irq_chip(palmas->irq, palmas->irq_data);
for (i = 1; i < PALMAS_NUM_CLIENTS; i++) {
if (palmas->i2c_clients[i])
i2c_unregister_device(palmas->i2c_clients[i]);
}
if (palmas == palmas_dev) {
pm_power_off = NULL;
palmas_dev = NULL;
}
return 0;
}
static const struct i2c_device_id palmas_i2c_id[] = {
{ "palmas", },
{ "twl6035", },
{ "twl6037", },
{ "tps65913", },
{ /* end */ }
};
MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
static struct i2c_driver palmas_i2c_driver = {
.driver = {
.name = "palmas",
.of_match_table = of_palmas_match_tbl,
},
.probe = palmas_i2c_probe,
.remove = palmas_i2c_remove,
.id_table = palmas_i2c_id,
};
static int __init palmas_i2c_init(void)
{
return i2c_add_driver(&palmas_i2c_driver);
}
/* init early so consumer devices can complete system boot */
subsys_initcall(palmas_i2c_init);
static void __exit palmas_i2c_exit(void)
{
i2c_del_driver(&palmas_i2c_driver);
}
module_exit(palmas_i2c_exit);
MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
MODULE_DESCRIPTION("Palmas chip family multi-function driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
HostZero/android_kernel_zuk_msm8996 | drivers/video/fbdev/platinumfb.c | 845 | 20095 | /*
* platinumfb.c -- frame buffer device for the PowerMac 'platinum' display
*
* Copyright (C) 1998 Franz Sirl
*
* Frame buffer structure from:
* drivers/video/controlfb.c -- frame buffer device for
* Apple 'control' display chip.
* Copyright (C) 1998 Dan Jacobowitz
*
* Hardware information from:
* platinum.c: Console support for PowerMac "platinum" display adaptor.
* Copyright (C) 1996 Paul Mackerras and Mark Abene
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#undef DEBUG
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/nvram.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pgtable.h>
#include "macmodes.h"
#include "platinumfb.h"
static int default_vmode = VMODE_NVRAM;
static int default_cmode = CMODE_NVRAM;
struct fb_info_platinum {
struct fb_info *info;
int vmode, cmode;
int xres, yres;
int vxres, vyres;
int xoffset, yoffset;
struct {
__u8 red, green, blue;
} palette[256];
u32 pseudo_palette[16];
volatile struct cmap_regs __iomem *cmap_regs;
unsigned long cmap_regs_phys;
volatile struct platinum_regs __iomem *platinum_regs;
unsigned long platinum_regs_phys;
__u8 __iomem *frame_buffer;
volatile __u8 __iomem *base_frame_buffer;
unsigned long frame_buffer_phys;
unsigned long total_vram;
int clktype;
int dactype;
struct resource rsrc_fb, rsrc_reg;
};
/*
* Frame buffer device API
*/
static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info);
static int platinumfb_blank(int blank_mode, struct fb_info *info);
static int platinumfb_set_par (struct fb_info *info);
static int platinumfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info);
/*
* internal functions
*/
static inline int platinum_vram_reqd(int video_mode, int color_mode);
static int read_platinum_sense(struct fb_info_platinum *pinfo);
static void set_platinum_clock(struct fb_info_platinum *pinfo);
static void platinum_set_hardware(struct fb_info_platinum *pinfo);
static int platinum_var_to_par(struct fb_var_screeninfo *var,
struct fb_info_platinum *pinfo,
int check_only);
/*
* Interface used by the world
*/
static struct fb_ops platinumfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = platinumfb_check_var,
.fb_set_par = platinumfb_set_par,
.fb_setcolreg = platinumfb_setcolreg,
.fb_blank = platinumfb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/*
* Checks a var structure
*/
static int platinumfb_check_var (struct fb_var_screeninfo *var, struct fb_info *info)
{
return platinum_var_to_par(var, info->par, 1);
}
/*
* Applies current var to display
*/
static int platinumfb_set_par (struct fb_info *info)
{
struct fb_info_platinum *pinfo = info->par;
struct platinum_regvals *init;
int err, offset = 0x20;
if((err = platinum_var_to_par(&info->var, pinfo, 0))) {
printk (KERN_ERR "platinumfb_set_par: error calling"
" platinum_var_to_par: %d.\n", err);
return err;
}
platinum_set_hardware(pinfo);
init = platinum_reg_init[pinfo->vmode-1];
if ((pinfo->vmode == VMODE_832_624_75) && (pinfo->cmode > CMODE_8))
offset = 0x10;
info->screen_base = pinfo->frame_buffer + init->fb_offset + offset;
mutex_lock(&info->mm_lock);
info->fix.smem_start = (pinfo->frame_buffer_phys) + init->fb_offset + offset;
mutex_unlock(&info->mm_lock);
info->fix.visual = (pinfo->cmode == CMODE_8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
info->fix.line_length = vmode_attrs[pinfo->vmode-1].hres * (1<<pinfo->cmode)
+ offset;
printk("line_length: %x\n", info->fix.line_length);
return 0;
}
static int platinumfb_blank(int blank, struct fb_info *fb)
{
/*
* Blank the screen if blank_mode != 0, else unblank. If blank == NULL
* then the caller blanks by setting the CLUT (Color Look Up Table) to all
* black. Return 0 if blanking succeeded, != 0 if un-/blanking failed due
* to e.g. a video mode which doesn't support it. Implements VESA suspend
* and powerdown modes on hardware that supports disabling hsync/vsync:
* blank_mode == 2: suspend vsync
* blank_mode == 3: suspend hsync
* blank_mode == 4: powerdown
*/
/* [danj] I think there's something fishy about those constants... */
/*
struct fb_info_platinum *info = (struct fb_info_platinum *) fb;
int ctrl;
ctrl = ld_le32(&info->platinum_regs->ctrl.r) | 0x33;
if (blank)
--blank_mode;
if (blank & VESA_VSYNC_SUSPEND)
ctrl &= ~3;
if (blank & VESA_HSYNC_SUSPEND)
ctrl &= ~0x30;
out_le32(&info->platinum_regs->ctrl.r, ctrl);
*/
/* TODO: Figure out how the heck to powerdown this thing! */
return 0;
}
static int platinumfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
struct fb_info_platinum *pinfo = info->par;
volatile struct cmap_regs __iomem *cmap_regs = pinfo->cmap_regs;
if (regno > 255)
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
pinfo->palette[regno].red = red;
pinfo->palette[regno].green = green;
pinfo->palette[regno].blue = blue;
out_8(&cmap_regs->addr, regno); /* tell clut what addr to fill */
out_8(&cmap_regs->lut, red); /* send one color channel at */
out_8(&cmap_regs->lut, green); /* a time... */
out_8(&cmap_regs->lut, blue);
if (regno < 16) {
int i;
u32 *pal = info->pseudo_palette;
switch (pinfo->cmode) {
case CMODE_16:
pal[regno] = (regno << 10) | (regno << 5) | regno;
break;
case CMODE_32:
i = (regno << 8) | regno;
pal[regno] = (i << 16) | i;
break;
}
}
return 0;
}
static inline int platinum_vram_reqd(int video_mode, int color_mode)
{
int baseval = vmode_attrs[video_mode-1].hres * (1<<color_mode);
if ((video_mode == VMODE_832_624_75) && (color_mode > CMODE_8))
baseval += 0x10;
else
baseval += 0x20;
return vmode_attrs[video_mode-1].vres * baseval + 0x1000;
}
#define STORE_D2(a, d) { \
out_8(&cmap_regs->addr, (a+32)); \
out_8(&cmap_regs->d2, (d)); \
}
static void set_platinum_clock(struct fb_info_platinum *pinfo)
{
volatile struct cmap_regs __iomem *cmap_regs = pinfo->cmap_regs;
struct platinum_regvals *init;
init = platinum_reg_init[pinfo->vmode-1];
STORE_D2(6, 0xc6);
out_8(&cmap_regs->addr,3+32);
if (in_8(&cmap_regs->d2) == 2) {
STORE_D2(7, init->clock_params[pinfo->clktype][0]);
STORE_D2(8, init->clock_params[pinfo->clktype][1]);
STORE_D2(3, 3);
} else {
STORE_D2(4, init->clock_params[pinfo->clktype][0]);
STORE_D2(5, init->clock_params[pinfo->clktype][1]);
STORE_D2(3, 2);
}
__delay(5000);
STORE_D2(9, 0xa6);
}
/* Now how about actually saying, Make it so! */
/* Some things in here probably don't need to be done each time. */
static void platinum_set_hardware(struct fb_info_platinum *pinfo)
{
volatile struct platinum_regs __iomem *platinum_regs = pinfo->platinum_regs;
volatile struct cmap_regs __iomem *cmap_regs = pinfo->cmap_regs;
struct platinum_regvals *init;
int i;
int vmode, cmode;
vmode = pinfo->vmode;
cmode = pinfo->cmode;
init = platinum_reg_init[vmode - 1];
/* Initialize display timing registers */
out_be32(&platinum_regs->reg[24].r, 7); /* turn display off */
for (i = 0; i < 26; ++i)
out_be32(&platinum_regs->reg[i+32].r, init->regs[i]);
out_be32(&platinum_regs->reg[26+32].r, (pinfo->total_vram == 0x100000 ?
init->offset[cmode] + 4 - cmode :
init->offset[cmode]));
out_be32(&platinum_regs->reg[16].r, (unsigned) pinfo->frame_buffer_phys+init->fb_offset+0x10);
out_be32(&platinum_regs->reg[18].r, init->pitch[cmode]);
out_be32(&platinum_regs->reg[19].r, (pinfo->total_vram == 0x100000 ?
init->mode[cmode+1] :
init->mode[cmode]));
out_be32(&platinum_regs->reg[20].r, (pinfo->total_vram == 0x100000 ? 0x11 : 0x1011));
out_be32(&platinum_regs->reg[21].r, 0x100);
out_be32(&platinum_regs->reg[22].r, 1);
out_be32(&platinum_regs->reg[23].r, 1);
out_be32(&platinum_regs->reg[26].r, 0xc00);
out_be32(&platinum_regs->reg[27].r, 0x235);
/* out_be32(&platinum_regs->reg[27].r, 0x2aa); */
STORE_D2(0, (pinfo->total_vram == 0x100000 ?
init->dacula_ctrl[cmode] & 0xf :
init->dacula_ctrl[cmode]));
STORE_D2(1, 4);
STORE_D2(2, 0);
set_platinum_clock(pinfo);
out_be32(&platinum_regs->reg[24].r, 0); /* turn display on */
}
/*
* Set misc info vars for this driver
*/
static void platinum_init_info(struct fb_info *info,
struct fb_info_platinum *pinfo)
{
/* Fill fb_info */
info->fbops = &platinumfb_ops;
info->pseudo_palette = pinfo->pseudo_palette;
info->flags = FBINFO_DEFAULT;
info->screen_base = pinfo->frame_buffer + 0x20;
fb_alloc_cmap(&info->cmap, 256, 0);
/* Fill fix common fields */
strcpy(info->fix.id, "platinum");
info->fix.mmio_start = pinfo->platinum_regs_phys;
info->fix.mmio_len = 0x1000;
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.smem_start = pinfo->frame_buffer_phys + 0x20; /* will be updated later */
info->fix.smem_len = pinfo->total_vram - 0x20;
info->fix.ywrapstep = 0;
info->fix.xpanstep = 0;
info->fix.ypanstep = 0;
info->fix.type_aux = 0;
info->fix.accel = FB_ACCEL_NONE;
}
static int platinum_init_fb(struct fb_info *info)
{
struct fb_info_platinum *pinfo = info->par;
struct fb_var_screeninfo var;
int sense, rc;
sense = read_platinum_sense(pinfo);
printk(KERN_INFO "platinumfb: Monitor sense value = 0x%x, ", sense);
if (default_vmode == VMODE_NVRAM) {
#ifdef CONFIG_NVRAM
default_vmode = nvram_read_byte(NV_VMODE);
if (default_vmode <= 0 || default_vmode > VMODE_MAX ||
!platinum_reg_init[default_vmode-1])
#endif
default_vmode = VMODE_CHOOSE;
}
if (default_vmode == VMODE_CHOOSE) {
default_vmode = mac_map_monitor_sense(sense);
}
if (default_vmode <= 0 || default_vmode > VMODE_MAX)
default_vmode = VMODE_640_480_60;
#ifdef CONFIG_NVRAM
if (default_cmode == CMODE_NVRAM)
default_cmode = nvram_read_byte(NV_CMODE);
#endif
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
default_cmode = CMODE_8;
/*
* Reduce the pixel size if we don't have enough VRAM.
*/
while(default_cmode > CMODE_8 &&
platinum_vram_reqd(default_vmode, default_cmode) > pinfo->total_vram)
default_cmode--;
printk("platinumfb: Using video mode %d and color mode %d.\n", default_vmode, default_cmode);
/* Setup default var */
if (mac_vmode_to_var(default_vmode, default_cmode, &var) < 0) {
/* This shouldn't happen! */
printk("mac_vmode_to_var(%d, %d,) failed\n", default_vmode, default_cmode);
try_again:
default_vmode = VMODE_640_480_60;
default_cmode = CMODE_8;
if (mac_vmode_to_var(default_vmode, default_cmode, &var) < 0) {
printk(KERN_ERR "platinumfb: mac_vmode_to_var() failed\n");
return -ENXIO;
}
}
/* Initialize info structure */
platinum_init_info(info, pinfo);
/* Apply default var */
info->var = var;
var.activate = FB_ACTIVATE_NOW;
rc = fb_set_var(info, &var);
if (rc && (default_vmode != VMODE_640_480_60 || default_cmode != CMODE_8))
goto try_again;
/* Register with fbdev layer */
rc = register_framebuffer(info);
if (rc < 0)
return rc;
fb_info(info, "Apple Platinum frame buffer device\n");
return 0;
}
/*
* Get the monitor sense value.
* Note that this can be called before calibrate_delay,
* so we can't use udelay.
*/
static int read_platinum_sense(struct fb_info_platinum *info)
{
volatile struct platinum_regs __iomem *platinum_regs = info->platinum_regs;
int sense;
out_be32(&platinum_regs->reg[23].r, 7); /* turn off drivers */
__delay(2000);
sense = (~in_be32(&platinum_regs->reg[23].r) & 7) << 8;
/* drive each sense line low in turn and collect the other 2 */
out_be32(&platinum_regs->reg[23].r, 3); /* drive A low */
__delay(2000);
sense |= (~in_be32(&platinum_regs->reg[23].r) & 3) << 4;
out_be32(&platinum_regs->reg[23].r, 5); /* drive B low */
__delay(2000);
sense |= (~in_be32(&platinum_regs->reg[23].r) & 4) << 1;
sense |= (~in_be32(&platinum_regs->reg[23].r) & 1) << 2;
out_be32(&platinum_regs->reg[23].r, 6); /* drive C low */
__delay(2000);
sense |= (~in_be32(&platinum_regs->reg[23].r) & 6) >> 1;
out_be32(&platinum_regs->reg[23].r, 7); /* turn off drivers */
return sense;
}
/*
* This routine takes a user-supplied var, and picks the best vmode/cmode from it.
* It also updates the var structure to the actual mode data obtained
*/
static int platinum_var_to_par(struct fb_var_screeninfo *var,
struct fb_info_platinum *pinfo,
int check_only)
{
int vmode, cmode;
if (mac_var_to_vmode(var, &vmode, &cmode) != 0) {
printk(KERN_ERR "platinum_var_to_par: mac_var_to_vmode unsuccessful.\n");
printk(KERN_ERR "platinum_var_to_par: var->xres = %d\n", var->xres);
printk(KERN_ERR "platinum_var_to_par: var->yres = %d\n", var->yres);
printk(KERN_ERR "platinum_var_to_par: var->xres_virtual = %d\n", var->xres_virtual);
printk(KERN_ERR "platinum_var_to_par: var->yres_virtual = %d\n", var->yres_virtual);
printk(KERN_ERR "platinum_var_to_par: var->bits_per_pixel = %d\n", var->bits_per_pixel);
printk(KERN_ERR "platinum_var_to_par: var->pixclock = %d\n", var->pixclock);
printk(KERN_ERR "platinum_var_to_par: var->vmode = %d\n", var->vmode);
return -EINVAL;
}
if (!platinum_reg_init[vmode-1]) {
printk(KERN_ERR "platinum_var_to_par, vmode %d not valid.\n", vmode);
return -EINVAL;
}
if (platinum_vram_reqd(vmode, cmode) > pinfo->total_vram) {
printk(KERN_ERR "platinum_var_to_par, not enough ram for vmode %d, cmode %d.\n", vmode, cmode);
return -EINVAL;
}
if (mac_vmode_to_var(vmode, cmode, var))
return -EINVAL;
if (check_only)
return 0;
pinfo->vmode = vmode;
pinfo->cmode = cmode;
pinfo->xres = vmode_attrs[vmode-1].hres;
pinfo->yres = vmode_attrs[vmode-1].vres;
pinfo->xoffset = 0;
pinfo->yoffset = 0;
pinfo->vxres = pinfo->xres;
pinfo->vyres = pinfo->yres;
return 0;
}
/*
* Parse user specified options (`video=platinumfb:')
*/
static int __init platinumfb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "vmode:", 6)) {
int vmode = simple_strtoul(this_opt+6, NULL, 0);
if (vmode > 0 && vmode <= VMODE_MAX)
default_vmode = vmode;
} else if (!strncmp(this_opt, "cmode:", 6)) {
int depth = simple_strtoul(this_opt+6, NULL, 0);
switch (depth) {
case 0:
case 8:
default_cmode = CMODE_8;
break;
case 15:
case 16:
default_cmode = CMODE_16;
break;
case 24:
case 32:
default_cmode = CMODE_32;
break;
}
}
}
return 0;
}
#ifdef __powerpc__
#define invalidate_cache(addr) \
asm volatile("eieio; dcbf 0,%1" \
: "=m" (*(addr)) : "r" (addr) : "memory");
#else
#define invalidate_cache(addr)
#endif
static int platinumfb_probe(struct platform_device* odev)
{
struct device_node *dp = odev->dev.of_node;
struct fb_info *info;
struct fb_info_platinum *pinfo;
volatile __u8 *fbuffer;
int bank0, bank1, bank2, bank3, rc;
dev_info(&odev->dev, "Found Apple Platinum video hardware\n");
info = framebuffer_alloc(sizeof(*pinfo), &odev->dev);
if (info == NULL) {
dev_err(&odev->dev, "Failed to allocate fbdev !\n");
return -ENOMEM;
}
pinfo = info->par;
if (of_address_to_resource(dp, 0, &pinfo->rsrc_reg) ||
of_address_to_resource(dp, 1, &pinfo->rsrc_fb)) {
dev_err(&odev->dev, "Can't get resources\n");
framebuffer_release(info);
return -ENXIO;
}
dev_dbg(&odev->dev, " registers : 0x%llx...0x%llx\n",
(unsigned long long)pinfo->rsrc_reg.start,
(unsigned long long)pinfo->rsrc_reg.end);
dev_dbg(&odev->dev, " framebuffer: 0x%llx...0x%llx\n",
(unsigned long long)pinfo->rsrc_fb.start,
(unsigned long long)pinfo->rsrc_fb.end);
/* Do not try to request register space, they overlap with the
* northbridge and that can fail. Only request framebuffer
*/
if (!request_mem_region(pinfo->rsrc_fb.start,
resource_size(&pinfo->rsrc_fb),
"platinumfb framebuffer")) {
printk(KERN_ERR "platinumfb: Can't request framebuffer !\n");
framebuffer_release(info);
return -ENXIO;
}
/* frame buffer - map only 4MB */
pinfo->frame_buffer_phys = pinfo->rsrc_fb.start;
pinfo->frame_buffer = __ioremap(pinfo->rsrc_fb.start, 0x400000,
_PAGE_WRITETHRU);
pinfo->base_frame_buffer = pinfo->frame_buffer;
/* registers */
pinfo->platinum_regs_phys = pinfo->rsrc_reg.start;
pinfo->platinum_regs = ioremap(pinfo->rsrc_reg.start, 0x1000);
pinfo->cmap_regs_phys = 0xf301b000; /* XXX not in prom? */
request_mem_region(pinfo->cmap_regs_phys, 0x1000, "platinumfb cmap");
pinfo->cmap_regs = ioremap(pinfo->cmap_regs_phys, 0x1000);
/* Grok total video ram */
out_be32(&pinfo->platinum_regs->reg[16].r, (unsigned)pinfo->frame_buffer_phys);
out_be32(&pinfo->platinum_regs->reg[20].r, 0x1011); /* select max vram */
out_be32(&pinfo->platinum_regs->reg[24].r, 0); /* switch in vram */
fbuffer = pinfo->base_frame_buffer;
fbuffer[0x100000] = 0x34;
fbuffer[0x100008] = 0x0;
invalidate_cache(&fbuffer[0x100000]);
fbuffer[0x200000] = 0x56;
fbuffer[0x200008] = 0x0;
invalidate_cache(&fbuffer[0x200000]);
fbuffer[0x300000] = 0x78;
fbuffer[0x300008] = 0x0;
invalidate_cache(&fbuffer[0x300000]);
bank0 = 1; /* builtin 1MB vram, always there */
bank1 = fbuffer[0x100000] == 0x34;
bank2 = fbuffer[0x200000] == 0x56;
bank3 = fbuffer[0x300000] == 0x78;
pinfo->total_vram = (bank0 + bank1 + bank2 + bank3) * 0x100000;
printk(KERN_INFO "platinumfb: Total VRAM = %dMB (%d%d%d%d)\n",
(unsigned int) (pinfo->total_vram / 1024 / 1024),
bank3, bank2, bank1, bank0);
/*
* Try to determine whether we have an old or a new DACula.
*/
out_8(&pinfo->cmap_regs->addr, 0x40);
pinfo->dactype = in_8(&pinfo->cmap_regs->d2);
switch (pinfo->dactype) {
case 0x3c:
pinfo->clktype = 1;
printk(KERN_INFO "platinumfb: DACula type 0x3c\n");
break;
case 0x84:
pinfo->clktype = 0;
printk(KERN_INFO "platinumfb: DACula type 0x84\n");
break;
default:
pinfo->clktype = 0;
printk(KERN_INFO "platinumfb: Unknown DACula type: %x\n", pinfo->dactype);
break;
}
dev_set_drvdata(&odev->dev, info);
rc = platinum_init_fb(info);
if (rc != 0) {
iounmap(pinfo->frame_buffer);
iounmap(pinfo->platinum_regs);
iounmap(pinfo->cmap_regs);
framebuffer_release(info);
}
return rc;
}
static int platinumfb_remove(struct platform_device* odev)
{
struct fb_info *info = dev_get_drvdata(&odev->dev);
struct fb_info_platinum *pinfo = info->par;
unregister_framebuffer (info);
/* Unmap frame buffer and registers */
iounmap(pinfo->frame_buffer);
iounmap(pinfo->platinum_regs);
iounmap(pinfo->cmap_regs);
release_mem_region(pinfo->rsrc_fb.start,
resource_size(&pinfo->rsrc_fb));
release_mem_region(pinfo->cmap_regs_phys, 0x1000);
framebuffer_release(info);
return 0;
}
static struct of_device_id platinumfb_match[] =
{
{
.name = "platinum",
},
{},
};
static struct platform_driver platinum_driver =
{
.driver = {
.name = "platinumfb",
.owner = THIS_MODULE,
.of_match_table = platinumfb_match,
},
.probe = platinumfb_probe,
.remove = platinumfb_remove,
};
static int __init platinumfb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("platinumfb", &option))
return -ENODEV;
platinumfb_setup(option);
#endif
platform_driver_register(&platinum_driver);
return 0;
}
static void __exit platinumfb_exit(void)
{
platform_driver_unregister(&platinum_driver);
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("framebuffer driver for Apple Platinum video");
module_init(platinumfb_init);
#ifdef MODULE
module_exit(platinumfb_exit);
#endif
| gpl-2.0 |
rutvik95/android_kernel_ms013g | arch/arm/mach-msm/qdsp6/audiov2/qcelp_in.c | 1101 | 5995 | /*
* Copyright (C) 2009 Google, Inc.
* Copyright (C) 2009 HTC Corporation
* Copyright (c) 2009, The Linux Foundation. All rights reserved.
*
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include <linux/msm_audio_qcp.h>
#include <mach/msm_qdsp6_audiov2.h>
#include "dal_audio.h"
#include "dal_audio_format.h"
#include <mach/debug_mm.h>
struct qcelp {
struct mutex lock;
struct msm_audio_qcelp_enc_config cfg;
struct msm_audio_stream_config str_cfg;
struct audio_client *audio_client;
};
static long q6_qcelp_in_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct qcelp *qcelp = file->private_data;
struct adsp_open_command rpc;
int rc = 0;
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
memset(&stats, 0, sizeof(stats));
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
mutex_lock(&qcelp->lock);
switch (cmd) {
case AUDIO_START:
if (qcelp->audio_client) {
rc = -EBUSY;
break;
} else {
qcelp->audio_client = q6audio_open(AUDIO_FLAG_READ,
qcelp->str_cfg.buffer_size);
if (!qcelp->audio_client) {
kfree(qcelp);
rc = -ENOMEM;
break;
}
}
tx_clk_freq = 8000;
memset(&rpc, 0, sizeof(rpc));
rpc.format_block.standard.format = ADSP_AUDIO_FORMAT_V13K_FS;
rpc.format_block.standard.channels = 1;
rpc.format_block.standard.bits_per_sample = 16;
rpc.format_block.standard.sampling_rate = 8000;
rpc.format_block.standard.is_signed = 1;
rpc.format_block.standard.is_interleaved = 0;
rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_OPEN_READ;
rpc.device = ADSP_AUDIO_DEVICE_ID_DEFAULT;
rpc.stream_context = ADSP_AUDIO_DEVICE_CONTEXT_RECORD;
rpc.buf_max_size = qcelp->str_cfg.buffer_size;
rpc.config.qcelp13k.min_rate = qcelp->cfg.min_bit_rate;
rpc.config.qcelp13k.max_rate = qcelp->cfg.max_bit_rate;
q6audio_start(qcelp->audio_client, &rpc, sizeof(rpc));
break;
case AUDIO_STOP:
break;
case AUDIO_FLUSH:
break;
case AUDIO_SET_VOLUME:
break;
case AUDIO_GET_STREAM_CONFIG:
if (copy_to_user((void *)arg, &qcelp->str_cfg,
sizeof(struct msm_audio_stream_config)))
rc = -EFAULT;
break;
case AUDIO_SET_STREAM_CONFIG:
if (copy_from_user(&qcelp->str_cfg, (void *)arg,
sizeof(struct msm_audio_stream_config))) {
rc = -EFAULT;
break;
}
if (qcelp->str_cfg.buffer_size < 35) {
pr_err("[%s:%s] Buffer size too small\n", __MM_FILE__,
__func__);
rc = -EINVAL;
break;
}
if (qcelp->str_cfg.buffer_count != 2)
pr_info("[%s:%s] Buffer count set to 2\n", __MM_FILE__,
__func__);
break;
case AUDIO_SET_QCELP_ENC_CONFIG:
if (copy_from_user(&qcelp->cfg, (void *) arg,
sizeof(struct msm_audio_qcelp_enc_config)))
rc = -EFAULT;
if (qcelp->cfg.min_bit_rate > 4 ||
qcelp->cfg.min_bit_rate < 1) {
pr_err("[%s:%s] invalid min bitrate\n", __MM_FILE__,
__func__);
rc = -EINVAL;
}
if (qcelp->cfg.max_bit_rate > 4 ||
qcelp->cfg.max_bit_rate < 1) {
pr_err("[%s:%s] invalid max bitrate\n", __MM_FILE__,
__func__);
rc = -EINVAL;
}
break;
case AUDIO_GET_QCELP_ENC_CONFIG:
if (copy_to_user((void *) arg, &qcelp->cfg,
sizeof(struct msm_audio_qcelp_enc_config)))
rc = -EFAULT;
break;
default:
rc = -EINVAL;
}
mutex_unlock(&qcelp->lock);
return rc;
}
static int q6_qcelp_in_open(struct inode *inode, struct file *file)
{
struct qcelp *qcelp;
qcelp = kmalloc(sizeof(struct qcelp), GFP_KERNEL);
if (qcelp == NULL) {
pr_err("[%s:%s] Could not allocate memory for qcelp driver\n",
__MM_FILE__, __func__);
return -ENOMEM;
}
mutex_init(&qcelp->lock);
file->private_data = qcelp;
qcelp->audio_client = NULL;
qcelp->str_cfg.buffer_size = 35;
qcelp->str_cfg.buffer_count = 2;
qcelp->cfg.cdma_rate = CDMA_RATE_FULL;
qcelp->cfg.min_bit_rate = 1;
qcelp->cfg.max_bit_rate = 4;
return 0;
}
static ssize_t q6_qcelp_in_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct audio_client *ac;
struct audio_buffer *ab;
const char __user *start = buf;
struct qcelp *qcelp = file->private_data;
int xfer = 0;
int res;
mutex_lock(&qcelp->lock);
ac = qcelp->audio_client;
if (!ac) {
res = -ENODEV;
goto fail;
}
while (count > xfer) {
ab = ac->buf + ac->cpu_buf;
if (ab->used)
wait_event(ac->wait, (ab->used == 0));
xfer = ab->actual_size;
if (copy_to_user(buf, ab->data, xfer)) {
res = -EFAULT;
goto fail;
}
buf += xfer;
count -= xfer;
ab->used = 1;
q6audio_read(ac, ab);
ac->cpu_buf ^= 1;
}
res = buf - start;
fail:
mutex_unlock(&qcelp->lock);
return res;
}
static int q6_qcelp_in_release(struct inode *inode, struct file *file)
{
int rc = 0;
struct qcelp *qcelp = file->private_data;
mutex_lock(&qcelp->lock);
if (qcelp->audio_client)
rc = q6audio_close(qcelp->audio_client);
mutex_unlock(&qcelp->lock);
kfree(qcelp);
return rc;
}
static const struct file_operations q6_qcelp_in_fops = {
.owner = THIS_MODULE,
.open = q6_qcelp_in_open,
.read = q6_qcelp_in_read,
.release = q6_qcelp_in_release,
.unlocked_ioctl = q6_qcelp_in_ioctl,
};
struct miscdevice q6_qcelp_in_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_qcelp_in",
.fops = &q6_qcelp_in_fops,
};
static int __init q6_qcelp_in_init(void)
{
return misc_register(&q6_qcelp_in_misc);
}
device_initcall(q6_qcelp_in_init);
| gpl-2.0 |
ChaOSChriS/android_kernel_asus_flo | arch/arm/mach-msm/qdsp6/audiov2/amrnb_in.c | 1101 | 5690 | /*
* Copyright (C) 2009 Google, Inc.
* Copyright (C) 2009 HTC Corporation
* Copyright (c) 2010, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include <linux/msm_audio_amrnb.h>
#include <mach/msm_qdsp6_audiov2.h>
#include "dal_audio.h"
#include "dal_audio_format.h"
#include <mach/debug_mm.h>
struct amrnb {
struct mutex lock;
struct msm_audio_amrnb_enc_config_v2 cfg;
struct msm_audio_stream_config str_cfg;
struct audio_client *audio_client;
};
static long q6_amrnb_in_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct amrnb *amrnb = file->private_data;
struct adsp_open_command rpc;
int rc = 0;
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
memset(&stats, 0, sizeof(stats));
if (copy_to_user((void *) arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
mutex_lock(&amrnb->lock);
switch (cmd) {
case AUDIO_START:
if (amrnb->audio_client) {
rc = -EBUSY;
break;
} else {
amrnb->audio_client = q6audio_open(AUDIO_FLAG_READ,
amrnb->str_cfg.buffer_size);
if (!amrnb->audio_client) {
kfree(amrnb);
rc = -ENOMEM;
break;
}
}
tx_clk_freq = 8000;
memset(&rpc, 0, sizeof(rpc));
rpc.format_block.standard.format = ADSP_AUDIO_FORMAT_AMRNB_FS;
rpc.format_block.standard.channels = 1;
rpc.format_block.standard.bits_per_sample = 16;
rpc.format_block.standard.sampling_rate = 8000;
rpc.format_block.standard.is_signed = 1;
rpc.format_block.standard.is_interleaved = 0;
rpc.hdr.opcode = ADSP_AUDIO_IOCTL_CMD_OPEN_READ;
rpc.device = ADSP_AUDIO_DEVICE_ID_DEFAULT;
rpc.stream_context = ADSP_AUDIO_DEVICE_CONTEXT_RECORD;
rpc.buf_max_size = amrnb->str_cfg.buffer_size;
rpc.config.amr.mode = amrnb->cfg.band_mode;
rpc.config.amr.dtx_mode = amrnb->cfg.dtx_enable;
rpc.config.amr.enable = 1;
q6audio_start(amrnb->audio_client, &rpc, sizeof(rpc));
break;
case AUDIO_STOP:
break;
case AUDIO_FLUSH:
break;
case AUDIO_SET_VOLUME:
break;
case AUDIO_GET_STREAM_CONFIG:
if (copy_to_user((void *)arg, &amrnb->str_cfg,
sizeof(struct msm_audio_stream_config)))
rc = -EFAULT;
break;
case AUDIO_SET_STREAM_CONFIG:
if (copy_from_user(&amrnb->str_cfg, (void *)arg,
sizeof(struct msm_audio_stream_config))) {
rc = -EFAULT;
break;
}
if (amrnb->str_cfg.buffer_size < 768) {
pr_err("[%s:%s] Buffer size too small\n", __MM_FILE__,
__func__);
rc = -EINVAL;
break;
}
if (amrnb->str_cfg.buffer_count != 2)
pr_info("[%s:%s] Buffer count set to 2\n", __MM_FILE__,
__func__);
break;
case AUDIO_SET_AMRNB_ENC_CONFIG:
if (copy_from_user(&amrnb->cfg, (void *) arg,
sizeof(struct msm_audio_amrnb_enc_config_v2)))
rc = -EFAULT;
break;
case AUDIO_GET_AMRNB_ENC_CONFIG:
if (copy_to_user((void *) arg, &amrnb->cfg,
sizeof(struct msm_audio_amrnb_enc_config_v2)))
rc = -EFAULT;
break;
default:
rc = -EINVAL;
}
mutex_unlock(&amrnb->lock);
return rc;
}
static int q6_amrnb_in_open(struct inode *inode, struct file *file)
{
struct amrnb *amrnb;
amrnb = kmalloc(sizeof(struct amrnb), GFP_KERNEL);
if (amrnb == NULL) {
pr_err("[%s:%s] Could not allocate memory for amrnb driver\n",
__MM_FILE__, __func__);
return -ENOMEM;
}
mutex_init(&amrnb->lock);
file->private_data = amrnb;
amrnb->audio_client = NULL;
amrnb->str_cfg.buffer_size = 768;
amrnb->str_cfg.buffer_count = 2;
amrnb->cfg.band_mode = ADSP_AUDIO_AMR_MR475;
amrnb->cfg.dtx_enable = ADSP_AUDIO_AMR_DTX_MODE_ON_AUTO;
amrnb->cfg.frame_format = ADSP_AUDIO_FORMAT_AMRNB_FS;
return 0;
}
static ssize_t q6_amrnb_in_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct audio_client *ac;
struct audio_buffer *ab;
const char __user *start = buf;
struct amrnb *amrnb = file->private_data;
int xfer = 0;
int res;
mutex_lock(&amrnb->lock);
ac = amrnb->audio_client;
if (!ac) {
res = -ENODEV;
goto fail;
}
while (count > xfer) {
ab = ac->buf + ac->cpu_buf;
if (ab->used)
wait_event(ac->wait, (ab->used == 0));
xfer = ab->actual_size;
if (copy_to_user(buf, ab->data, xfer)) {
res = -EFAULT;
goto fail;
}
buf += xfer;
count -= xfer;
ab->used = 1;
q6audio_read(ac, ab);
ac->cpu_buf ^= 1;
}
res = buf - start;
fail:
mutex_unlock(&amrnb->lock);
return res;
}
static int q6_amrnb_in_release(struct inode *inode, struct file *file)
{
int rc = 0;
struct amrnb *amrnb = file->private_data;
mutex_lock(&amrnb->lock);
if (amrnb->audio_client)
rc = q6audio_close(amrnb->audio_client);
mutex_unlock(&amrnb->lock);
kfree(amrnb);
return rc;
}
static const struct file_operations q6_amrnb_in_fops = {
.owner = THIS_MODULE,
.open = q6_amrnb_in_open,
.read = q6_amrnb_in_read,
.release = q6_amrnb_in_release,
.unlocked_ioctl = q6_amrnb_in_ioctl,
};
struct miscdevice q6_amrnb_in_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_amr_in",
.fops = &q6_amrnb_in_fops,
};
static int __init q6_amrnb_in_init(void)
{
return misc_register(&q6_amrnb_in_misc);
}
device_initcall(q6_amrnb_in_init);
| gpl-2.0 |
crseanpaul/kernel | drivers/iio/dac/ad5064.c | 1357 | 16307 | /*
* AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R,
* AD5648, AD5666, AD5668, AD5669R Digital to analog converters driver
*
* Copyright 2011 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
#include <asm/unaligned.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#define AD5064_MAX_DAC_CHANNELS 8
#define AD5064_MAX_VREFS 4
#define AD5064_ADDR(x) ((x) << 20)
#define AD5064_CMD(x) ((x) << 24)
#define AD5064_ADDR_ALL_DAC 0xF
#define AD5064_CMD_WRITE_INPUT_N 0x0
#define AD5064_CMD_UPDATE_DAC_N 0x1
#define AD5064_CMD_WRITE_INPUT_N_UPDATE_ALL 0x2
#define AD5064_CMD_WRITE_INPUT_N_UPDATE_N 0x3
#define AD5064_CMD_POWERDOWN_DAC 0x4
#define AD5064_CMD_CLEAR 0x5
#define AD5064_CMD_LDAC_MASK 0x6
#define AD5064_CMD_RESET 0x7
#define AD5064_CMD_CONFIG 0x8
#define AD5064_CONFIG_DAISY_CHAIN_ENABLE BIT(1)
#define AD5064_CONFIG_INT_VREF_ENABLE BIT(0)
#define AD5064_LDAC_PWRDN_NONE 0x0
#define AD5064_LDAC_PWRDN_1K 0x1
#define AD5064_LDAC_PWRDN_100K 0x2
#define AD5064_LDAC_PWRDN_3STATE 0x3
/**
* struct ad5064_chip_info - chip specific information
* @shared_vref: whether the vref supply is shared between channels
* @internal_vref: internal reference voltage. 0 if the chip has no internal
* vref.
* @channel: channel specification
* @num_channels: number of channels
*/
struct ad5064_chip_info {
bool shared_vref;
unsigned long internal_vref;
const struct iio_chan_spec *channels;
unsigned int num_channels;
};
struct ad5064_state;
typedef int (*ad5064_write_func)(struct ad5064_state *st, unsigned int cmd,
unsigned int addr, unsigned int val);
/**
* struct ad5064_state - driver instance specific data
* @dev: the device for this driver instance
* @chip_info: chip model specific constants, available modes etc
* @vref_reg: vref supply regulators
* @pwr_down: whether channel is powered down
* @pwr_down_mode: channel's current power down mode
* @dac_cache: current DAC raw value (chip does not support readback)
* @use_internal_vref: set to true if the internal reference voltage should be
* used.
* @write: register write callback
* @data: i2c/spi transfer buffers
*/
struct ad5064_state {
struct device *dev;
const struct ad5064_chip_info *chip_info;
struct regulator_bulk_data vref_reg[AD5064_MAX_VREFS];
bool pwr_down[AD5064_MAX_DAC_CHANNELS];
u8 pwr_down_mode[AD5064_MAX_DAC_CHANNELS];
unsigned int dac_cache[AD5064_MAX_DAC_CHANNELS];
bool use_internal_vref;
ad5064_write_func write;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
*/
union {
u8 i2c[3];
__be32 spi;
} data ____cacheline_aligned;
};
enum ad5064_type {
ID_AD5024,
ID_AD5025,
ID_AD5044,
ID_AD5045,
ID_AD5064,
ID_AD5064_1,
ID_AD5065,
ID_AD5628_1,
ID_AD5628_2,
ID_AD5648_1,
ID_AD5648_2,
ID_AD5666_1,
ID_AD5666_2,
ID_AD5668_1,
ID_AD5668_2,
};
static int ad5064_write(struct ad5064_state *st, unsigned int cmd,
unsigned int addr, unsigned int val, unsigned int shift)
{
val <<= shift;
return st->write(st, cmd, addr, val);
}
static int ad5064_sync_powerdown_mode(struct ad5064_state *st,
const struct iio_chan_spec *chan)
{
unsigned int val;
int ret;
val = (0x1 << chan->address);
if (st->pwr_down[chan->channel])
val |= st->pwr_down_mode[chan->channel] << 8;
ret = ad5064_write(st, AD5064_CMD_POWERDOWN_DAC, 0, val, 0);
return ret;
}
static const char * const ad5064_powerdown_modes[] = {
"1kohm_to_gnd",
"100kohm_to_gnd",
"three_state",
};
static int ad5064_get_powerdown_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct ad5064_state *st = iio_priv(indio_dev);
return st->pwr_down_mode[chan->channel] - 1;
}
static int ad5064_set_powerdown_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int mode)
{
struct ad5064_state *st = iio_priv(indio_dev);
int ret;
mutex_lock(&indio_dev->mlock);
st->pwr_down_mode[chan->channel] = mode + 1;
ret = ad5064_sync_powerdown_mode(st, chan);
mutex_unlock(&indio_dev->mlock);
return ret;
}
static const struct iio_enum ad5064_powerdown_mode_enum = {
.items = ad5064_powerdown_modes,
.num_items = ARRAY_SIZE(ad5064_powerdown_modes),
.get = ad5064_get_powerdown_mode,
.set = ad5064_set_powerdown_mode,
};
static ssize_t ad5064_read_dac_powerdown(struct iio_dev *indio_dev,
uintptr_t private, const struct iio_chan_spec *chan, char *buf)
{
struct ad5064_state *st = iio_priv(indio_dev);
return sprintf(buf, "%d\n", st->pwr_down[chan->channel]);
}
static ssize_t ad5064_write_dac_powerdown(struct iio_dev *indio_dev,
uintptr_t private, const struct iio_chan_spec *chan, const char *buf,
size_t len)
{
struct ad5064_state *st = iio_priv(indio_dev);
bool pwr_down;
int ret;
ret = strtobool(buf, &pwr_down);
if (ret)
return ret;
mutex_lock(&indio_dev->mlock);
st->pwr_down[chan->channel] = pwr_down;
ret = ad5064_sync_powerdown_mode(st, chan);
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
static int ad5064_get_vref(struct ad5064_state *st,
struct iio_chan_spec const *chan)
{
unsigned int i;
if (st->use_internal_vref)
return st->chip_info->internal_vref;
i = st->chip_info->shared_vref ? 0 : chan->channel;
return regulator_get_voltage(st->vref_reg[i].consumer);
}
static int ad5064_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
struct ad5064_state *st = iio_priv(indio_dev);
int scale_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
*val = st->dac_cache[chan->channel];
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
scale_uv = ad5064_get_vref(st, chan);
if (scale_uv < 0)
return scale_uv;
*val = scale_uv / 1000;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
default:
break;
}
return -EINVAL;
}
static int ad5064_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
struct ad5064_state *st = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
if (val >= (1 << chan->scan_type.realbits) || val < 0)
return -EINVAL;
mutex_lock(&indio_dev->mlock);
ret = ad5064_write(st, AD5064_CMD_WRITE_INPUT_N_UPDATE_N,
chan->address, val, chan->scan_type.shift);
if (ret == 0)
st->dac_cache[chan->channel] = val;
mutex_unlock(&indio_dev->mlock);
break;
default:
ret = -EINVAL;
}
return ret;
}
static const struct iio_info ad5064_info = {
.read_raw = ad5064_read_raw,
.write_raw = ad5064_write_raw,
.driver_module = THIS_MODULE,
};
static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
{
.name = "powerdown",
.read = ad5064_read_dac_powerdown,
.write = ad5064_write_dac_powerdown,
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5064_powerdown_mode_enum),
IIO_ENUM_AVAILABLE("powerdown_mode", &ad5064_powerdown_mode_enum),
{ },
};
#define AD5064_CHANNEL(chan, addr, bits) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
.channel = (chan), \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
.address = addr, \
.scan_type = { \
.sign = 'u', \
.realbits = (bits), \
.storagebits = 16, \
.shift = 20 - bits, \
}, \
.ext_info = ad5064_ext_info, \
}
#define DECLARE_AD5064_CHANNELS(name, bits) \
const struct iio_chan_spec name[] = { \
AD5064_CHANNEL(0, 0, bits), \
AD5064_CHANNEL(1, 1, bits), \
AD5064_CHANNEL(2, 2, bits), \
AD5064_CHANNEL(3, 3, bits), \
AD5064_CHANNEL(4, 4, bits), \
AD5064_CHANNEL(5, 5, bits), \
AD5064_CHANNEL(6, 6, bits), \
AD5064_CHANNEL(7, 7, bits), \
}
#define DECLARE_AD5065_CHANNELS(name, bits) \
const struct iio_chan_spec name[] = { \
AD5064_CHANNEL(0, 0, bits), \
AD5064_CHANNEL(1, 3, bits), \
}
static DECLARE_AD5064_CHANNELS(ad5024_channels, 12);
static DECLARE_AD5064_CHANNELS(ad5044_channels, 14);
static DECLARE_AD5064_CHANNELS(ad5064_channels, 16);
static DECLARE_AD5065_CHANNELS(ad5025_channels, 12);
static DECLARE_AD5065_CHANNELS(ad5045_channels, 14);
static DECLARE_AD5065_CHANNELS(ad5065_channels, 16);
static const struct ad5064_chip_info ad5064_chip_info_tbl[] = {
[ID_AD5024] = {
.shared_vref = false,
.channels = ad5024_channels,
.num_channels = 4,
},
[ID_AD5025] = {
.shared_vref = false,
.channels = ad5025_channels,
.num_channels = 2,
},
[ID_AD5044] = {
.shared_vref = false,
.channels = ad5044_channels,
.num_channels = 4,
},
[ID_AD5045] = {
.shared_vref = false,
.channels = ad5045_channels,
.num_channels = 2,
},
[ID_AD5064] = {
.shared_vref = false,
.channels = ad5064_channels,
.num_channels = 4,
},
[ID_AD5064_1] = {
.shared_vref = true,
.channels = ad5064_channels,
.num_channels = 4,
},
[ID_AD5065] = {
.shared_vref = false,
.channels = ad5065_channels,
.num_channels = 2,
},
[ID_AD5628_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5024_channels,
.num_channels = 8,
},
[ID_AD5628_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5024_channels,
.num_channels = 8,
},
[ID_AD5648_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5044_channels,
.num_channels = 8,
},
[ID_AD5648_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5044_channels,
.num_channels = 8,
},
[ID_AD5666_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5064_channels,
.num_channels = 4,
},
[ID_AD5666_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5064_channels,
.num_channels = 4,
},
[ID_AD5668_1] = {
.shared_vref = true,
.internal_vref = 2500000,
.channels = ad5064_channels,
.num_channels = 8,
},
[ID_AD5668_2] = {
.shared_vref = true,
.internal_vref = 5000000,
.channels = ad5064_channels,
.num_channels = 8,
},
};
static inline unsigned int ad5064_num_vref(struct ad5064_state *st)
{
return st->chip_info->shared_vref ? 1 : st->chip_info->num_channels;
}
static const char * const ad5064_vref_names[] = {
"vrefA",
"vrefB",
"vrefC",
"vrefD",
};
static const char * const ad5064_vref_name(struct ad5064_state *st,
unsigned int vref)
{
return st->chip_info->shared_vref ? "vref" : ad5064_vref_names[vref];
}
static int ad5064_probe(struct device *dev, enum ad5064_type type,
const char *name, ad5064_write_func write)
{
struct iio_dev *indio_dev;
struct ad5064_state *st;
unsigned int midscale;
unsigned int i;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
dev_set_drvdata(dev, indio_dev);
st->chip_info = &ad5064_chip_info_tbl[type];
st->dev = dev;
st->write = write;
for (i = 0; i < ad5064_num_vref(st); ++i)
st->vref_reg[i].supply = ad5064_vref_name(st, i);
ret = devm_regulator_bulk_get(dev, ad5064_num_vref(st),
st->vref_reg);
if (ret) {
if (!st->chip_info->internal_vref)
return ret;
st->use_internal_vref = true;
ret = ad5064_write(st, AD5064_CMD_CONFIG, 0,
AD5064_CONFIG_INT_VREF_ENABLE, 0);
if (ret) {
dev_err(dev, "Failed to enable internal vref: %d\n",
ret);
return ret;
}
} else {
ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
if (ret)
return ret;
}
indio_dev->dev.parent = dev;
indio_dev->name = name;
indio_dev->info = &ad5064_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
midscale = (1 << indio_dev->channels[0].scan_type.realbits) / 2;
for (i = 0; i < st->chip_info->num_channels; ++i) {
st->pwr_down_mode[i] = AD5064_LDAC_PWRDN_1K;
st->dac_cache[i] = midscale;
}
ret = iio_device_register(indio_dev);
if (ret)
goto error_disable_reg;
return 0;
error_disable_reg:
if (!st->use_internal_vref)
regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
return ret;
}
static int ad5064_remove(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5064_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
if (!st->use_internal_vref)
regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
return 0;
}
#if IS_ENABLED(CONFIG_SPI_MASTER)
static int ad5064_spi_write(struct ad5064_state *st, unsigned int cmd,
unsigned int addr, unsigned int val)
{
struct spi_device *spi = to_spi_device(st->dev);
st->data.spi = cpu_to_be32(AD5064_CMD(cmd) | AD5064_ADDR(addr) | val);
return spi_write(spi, &st->data.spi, sizeof(st->data.spi));
}
static int ad5064_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
return ad5064_probe(&spi->dev, id->driver_data, id->name,
ad5064_spi_write);
}
static int ad5064_spi_remove(struct spi_device *spi)
{
return ad5064_remove(&spi->dev);
}
static const struct spi_device_id ad5064_spi_ids[] = {
{"ad5024", ID_AD5024},
{"ad5025", ID_AD5025},
{"ad5044", ID_AD5044},
{"ad5045", ID_AD5045},
{"ad5064", ID_AD5064},
{"ad5064-1", ID_AD5064_1},
{"ad5065", ID_AD5065},
{"ad5628-1", ID_AD5628_1},
{"ad5628-2", ID_AD5628_2},
{"ad5648-1", ID_AD5648_1},
{"ad5648-2", ID_AD5648_2},
{"ad5666-1", ID_AD5666_1},
{"ad5666-2", ID_AD5666_2},
{"ad5668-1", ID_AD5668_1},
{"ad5668-2", ID_AD5668_2},
{"ad5668-3", ID_AD5668_2}, /* similar enough to ad5668-2 */
{}
};
MODULE_DEVICE_TABLE(spi, ad5064_spi_ids);
static struct spi_driver ad5064_spi_driver = {
.driver = {
.name = "ad5064",
.owner = THIS_MODULE,
},
.probe = ad5064_spi_probe,
.remove = ad5064_spi_remove,
.id_table = ad5064_spi_ids,
};
static int __init ad5064_spi_register_driver(void)
{
return spi_register_driver(&ad5064_spi_driver);
}
static void ad5064_spi_unregister_driver(void)
{
spi_unregister_driver(&ad5064_spi_driver);
}
#else
static inline int ad5064_spi_register_driver(void) { return 0; }
static inline void ad5064_spi_unregister_driver(void) { }
#endif
#if IS_ENABLED(CONFIG_I2C)
static int ad5064_i2c_write(struct ad5064_state *st, unsigned int cmd,
unsigned int addr, unsigned int val)
{
struct i2c_client *i2c = to_i2c_client(st->dev);
st->data.i2c[0] = (cmd << 4) | addr;
put_unaligned_be16(val, &st->data.i2c[1]);
return i2c_master_send(i2c, st->data.i2c, 3);
}
static int ad5064_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
return ad5064_probe(&i2c->dev, id->driver_data, id->name,
ad5064_i2c_write);
}
static int ad5064_i2c_remove(struct i2c_client *i2c)
{
return ad5064_remove(&i2c->dev);
}
static const struct i2c_device_id ad5064_i2c_ids[] = {
{"ad5629-1", ID_AD5628_1},
{"ad5629-2", ID_AD5628_2},
{"ad5629-3", ID_AD5628_2}, /* similar enough to ad5629-2 */
{"ad5669-1", ID_AD5668_1},
{"ad5669-2", ID_AD5668_2},
{"ad5669-3", ID_AD5668_2}, /* similar enough to ad5669-2 */
{}
};
MODULE_DEVICE_TABLE(i2c, ad5064_i2c_ids);
static struct i2c_driver ad5064_i2c_driver = {
.driver = {
.name = "ad5064",
.owner = THIS_MODULE,
},
.probe = ad5064_i2c_probe,
.remove = ad5064_i2c_remove,
.id_table = ad5064_i2c_ids,
};
static int __init ad5064_i2c_register_driver(void)
{
return i2c_add_driver(&ad5064_i2c_driver);
}
static void __exit ad5064_i2c_unregister_driver(void)
{
i2c_del_driver(&ad5064_i2c_driver);
}
#else
static inline int ad5064_i2c_register_driver(void) { return 0; }
static inline void ad5064_i2c_unregister_driver(void) { }
#endif
static int __init ad5064_init(void)
{
int ret;
ret = ad5064_spi_register_driver();
if (ret)
return ret;
ret = ad5064_i2c_register_driver();
if (ret) {
ad5064_spi_unregister_driver();
return ret;
}
return 0;
}
module_init(ad5064_init);
static void __exit ad5064_exit(void)
{
ad5064_i2c_unregister_driver();
ad5064_spi_unregister_driver();
}
module_exit(ad5064_exit);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD5024 and similar multi-channel DACs");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
LuckJC/cubie-linux | arch/arm/kernel/fiq.c | 1869 | 3187 | /*
* linux/arch/arm/kernel/fiq.c
*
* Copyright (C) 1998 Russell King
* Copyright (C) 1998, 1999 Phil Blundell
*
* FIQ support written by Philip Blundell <philb@gnu.org>, 1998.
*
* FIQ support re-written by Russell King to be more generic
*
* We now properly support a method by which the FIQ handlers can
* be stacked onto the vector. We still do not support sharing
* the FIQ vector itself.
*
* Operation is as follows:
* 1. Owner A claims FIQ:
* - default_fiq relinquishes control.
* 2. Owner A:
* - inserts code.
* - sets any registers,
* - enables FIQ.
* 3. Owner B claims FIQ:
* - if owner A has a relinquish function.
* - disable FIQs.
* - saves any registers.
* - returns zero.
* 4. Owner B:
* - inserts code.
* - sets any registers,
* - enables FIQ.
* 5. Owner B releases FIQ:
* - Owner A is asked to reacquire FIQ:
* - inserts code.
* - restores saved registers.
* - enables FIQ.
* 6. Goto 3
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/fiq.h>
#include <asm/irq.h>
#include <asm/traps.h>
static unsigned long no_fiq_insn;
/* Default reacquire function
* - we always relinquish FIQ control
* - we always reacquire FIQ control
*/
static int fiq_def_op(void *ref, int relinquish)
{
if (!relinquish)
set_fiq_handler(&no_fiq_insn, sizeof(no_fiq_insn));
return 0;
}
static struct fiq_handler default_owner = {
.name = "default",
.fiq_op = fiq_def_op,
};
static struct fiq_handler *current_fiq = &default_owner;
int show_fiq_list(struct seq_file *p, int prec)
{
if (current_fiq != &default_owner)
seq_printf(p, "%*s: %s\n", prec, "FIQ",
current_fiq->name);
return 0;
}
void set_fiq_handler(void *start, unsigned int length)
{
#if defined(CONFIG_CPU_USE_DOMAINS)
memcpy((void *)0xffff001c, start, length);
#else
memcpy(vectors_page + 0x1c, start, length);
#endif
flush_icache_range(0xffff001c, 0xffff001c + length);
if (!vectors_high())
flush_icache_range(0x1c, 0x1c + length);
}
int claim_fiq(struct fiq_handler *f)
{
int ret = 0;
if (current_fiq) {
ret = -EBUSY;
if (current_fiq->fiq_op != NULL)
ret = current_fiq->fiq_op(current_fiq->dev_id, 1);
}
if (!ret) {
f->next = current_fiq;
current_fiq = f;
}
return ret;
}
void release_fiq(struct fiq_handler *f)
{
if (current_fiq != f) {
printk(KERN_ERR "%s FIQ trying to release %s FIQ\n",
f->name, current_fiq->name);
dump_stack();
return;
}
do
current_fiq = current_fiq->next;
while (current_fiq->fiq_op(current_fiq->dev_id, 0));
}
void enable_fiq(int fiq)
{
enable_irq(fiq + FIQ_START);
}
void disable_fiq(int fiq)
{
disable_irq(fiq + FIQ_START);
}
EXPORT_SYMBOL(set_fiq_handler);
EXPORT_SYMBOL(__set_fiq_regs); /* defined in fiqasm.S */
EXPORT_SYMBOL(__get_fiq_regs); /* defined in fiqasm.S */
EXPORT_SYMBOL(claim_fiq);
EXPORT_SYMBOL(release_fiq);
EXPORT_SYMBOL(enable_fiq);
EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(void)
{
no_fiq_insn = *(unsigned long *)0xffff001c;
}
| gpl-2.0 |
yuzaipiaofei/android_kernel_xiaomi_msm8916 | net/batman-adv/hash.c | 2381 | 1941 | /* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#include "main.h"
#include "hash.h"
/* clears the hash */
static void batadv_hash_init(struct batadv_hashtable *hash)
{
uint32_t i;
for (i = 0; i < hash->size; i++) {
INIT_HLIST_HEAD(&hash->table[i]);
spin_lock_init(&hash->list_locks[i]);
}
}
/* free only the hashtable and the hash itself. */
void batadv_hash_destroy(struct batadv_hashtable *hash)
{
kfree(hash->list_locks);
kfree(hash->table);
kfree(hash);
}
/* allocates and clears the hash */
struct batadv_hashtable *batadv_hash_new(uint32_t size)
{
struct batadv_hashtable *hash;
hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
if (!hash)
return NULL;
hash->table = kmalloc(sizeof(*hash->table) * size, GFP_ATOMIC);
if (!hash->table)
goto free_hash;
hash->list_locks = kmalloc(sizeof(*hash->list_locks) * size,
GFP_ATOMIC);
if (!hash->list_locks)
goto free_table;
hash->size = size;
batadv_hash_init(hash);
return hash;
free_table:
kfree(hash->table);
free_hash:
kfree(hash);
return NULL;
}
void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
struct lock_class_key *key)
{
uint32_t i;
for (i = 0; i < hash->size; i++)
lockdep_set_class(&hash->list_locks[i], key);
}
| gpl-2.0 |
AOSP-TEAM/android_kernel_google_tuna | fs/dlm/user.c | 2381 | 24873 | /*
* Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*/
#include <linux/miscdevice.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/signal.h>
#include <linux/spinlock.h>
#include <linux/dlm.h>
#include <linux/dlm_device.h>
#include <linux/slab.h>
#include "dlm_internal.h"
#include "lockspace.h"
#include "lock.h"
#include "lvb_table.h"
#include "user.h"
#include "ast.h"
static const char name_prefix[] = "dlm";
static const struct file_operations device_fops;
static atomic_t dlm_monitor_opened;
static int dlm_monitor_unused = 1;
#ifdef CONFIG_COMPAT
struct dlm_lock_params32 {
__u8 mode;
__u8 namelen;
__u16 unused;
__u32 flags;
__u32 lkid;
__u32 parent;
__u64 xid;
__u64 timeout;
__u32 castparam;
__u32 castaddr;
__u32 bastparam;
__u32 bastaddr;
__u32 lksb;
char lvb[DLM_USER_LVB_LEN];
char name[0];
};
struct dlm_write_request32 {
__u32 version[3];
__u8 cmd;
__u8 is64bit;
__u8 unused[2];
union {
struct dlm_lock_params32 lock;
struct dlm_lspace_params lspace;
struct dlm_purge_params purge;
} i;
};
struct dlm_lksb32 {
__u32 sb_status;
__u32 sb_lkid;
__u8 sb_flags;
__u32 sb_lvbptr;
};
struct dlm_lock_result32 {
__u32 version[3];
__u32 length;
__u32 user_astaddr;
__u32 user_astparam;
__u32 user_lksb;
struct dlm_lksb32 lksb;
__u8 bast_mode;
__u8 unused[3];
/* Offsets may be zero if no data is present */
__u32 lvb_offset;
};
static void compat_input(struct dlm_write_request *kb,
struct dlm_write_request32 *kb32,
int namelen)
{
kb->version[0] = kb32->version[0];
kb->version[1] = kb32->version[1];
kb->version[2] = kb32->version[2];
kb->cmd = kb32->cmd;
kb->is64bit = kb32->is64bit;
if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
kb->i.lspace.flags = kb32->i.lspace.flags;
kb->i.lspace.minor = kb32->i.lspace.minor;
memcpy(kb->i.lspace.name, kb32->i.lspace.name, namelen);
} else if (kb->cmd == DLM_USER_PURGE) {
kb->i.purge.nodeid = kb32->i.purge.nodeid;
kb->i.purge.pid = kb32->i.purge.pid;
} else {
kb->i.lock.mode = kb32->i.lock.mode;
kb->i.lock.namelen = kb32->i.lock.namelen;
kb->i.lock.flags = kb32->i.lock.flags;
kb->i.lock.lkid = kb32->i.lock.lkid;
kb->i.lock.parent = kb32->i.lock.parent;
kb->i.lock.xid = kb32->i.lock.xid;
kb->i.lock.timeout = kb32->i.lock.timeout;
kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
}
}
static void compat_output(struct dlm_lock_result *res,
struct dlm_lock_result32 *res32)
{
res32->version[0] = res->version[0];
res32->version[1] = res->version[1];
res32->version[2] = res->version[2];
res32->user_astaddr = (__u32)(long)res->user_astaddr;
res32->user_astparam = (__u32)(long)res->user_astparam;
res32->user_lksb = (__u32)(long)res->user_lksb;
res32->bast_mode = res->bast_mode;
res32->lvb_offset = res->lvb_offset;
res32->length = res->length;
res32->lksb.sb_status = res->lksb.sb_status;
res32->lksb.sb_flags = res->lksb.sb_flags;
res32->lksb.sb_lkid = res->lksb.sb_lkid;
res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
}
#endif
/* Figure out if this lock is at the end of its life and no longer
available for the application to use. The lkb still exists until
the final ast is read. A lock becomes EOL in three situations:
1. a noqueue request fails with EAGAIN
2. an unlock completes with EUNLOCK
3. a cancel of a waiting request completes with ECANCEL/EDEADLK
An EOL lock needs to be removed from the process's list of locks.
And we can't allow any new operation on an EOL lock. This is
not related to the lifetime of the lkb struct which is managed
entirely by refcount. */
static int lkb_is_endoflife(int mode, int status)
{
switch (status) {
case -DLM_EUNLOCK:
return 1;
case -DLM_ECANCEL:
case -ETIMEDOUT:
case -EDEADLK:
case -EAGAIN:
if (mode == DLM_LOCK_IV)
return 1;
break;
}
return 0;
}
/* we could possibly check if the cancel of an orphan has resulted in the lkb
being removed and then remove that lkb from the orphans list and free it */
void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
int status, uint32_t sbflags, uint64_t seq)
{
struct dlm_ls *ls;
struct dlm_user_args *ua;
struct dlm_user_proc *proc;
int rv;
if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
return;
ls = lkb->lkb_resource->res_ls;
mutex_lock(&ls->ls_clear_proc_locks);
/* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
lkb->ua so we can't try to use it. This second check is necessary
for cases where a completion ast is received for an operation that
began before clear_proc_locks did its cancel/unlock. */
if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD))
goto out;
DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb););
ua = lkb->lkb_ua;
proc = ua->proc;
if ((flags & DLM_CB_BAST) && ua->bastaddr == NULL)
goto out;
if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status))
lkb->lkb_flags |= DLM_IFL_ENDOFLIFE;
spin_lock(&proc->asts_spin);
rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
if (rv < 0) {
spin_unlock(&proc->asts_spin);
goto out;
}
if (list_empty(&lkb->lkb_astqueue)) {
kref_get(&lkb->lkb_ref);
list_add_tail(&lkb->lkb_astqueue, &proc->asts);
wake_up_interruptible(&proc->wait);
}
spin_unlock(&proc->asts_spin);
if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
/* N.B. spin_lock locks_spin, not asts_spin */
spin_lock(&proc->locks_spin);
if (!list_empty(&lkb->lkb_ownqueue)) {
list_del_init(&lkb->lkb_ownqueue);
dlm_put_lkb(lkb);
}
spin_unlock(&proc->locks_spin);
}
out:
mutex_unlock(&ls->ls_clear_proc_locks);
}
static int device_user_lock(struct dlm_user_proc *proc,
struct dlm_lock_params *params)
{
struct dlm_ls *ls;
struct dlm_user_args *ua;
int error = -ENOMEM;
ls = dlm_find_lockspace_local(proc->lockspace);
if (!ls)
return -ENOENT;
if (!params->castaddr || !params->lksb) {
error = -EINVAL;
goto out;
}
ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
if (!ua)
goto out;
ua->proc = proc;
ua->user_lksb = params->lksb;
ua->castparam = params->castparam;
ua->castaddr = params->castaddr;
ua->bastparam = params->bastparam;
ua->bastaddr = params->bastaddr;
ua->xid = params->xid;
if (params->flags & DLM_LKF_CONVERT)
error = dlm_user_convert(ls, ua,
params->mode, params->flags,
params->lkid, params->lvb,
(unsigned long) params->timeout);
else {
error = dlm_user_request(ls, ua,
params->mode, params->flags,
params->name, params->namelen,
(unsigned long) params->timeout);
if (!error)
error = ua->lksb.sb_lkid;
}
out:
dlm_put_lockspace(ls);
return error;
}
static int device_user_unlock(struct dlm_user_proc *proc,
struct dlm_lock_params *params)
{
struct dlm_ls *ls;
struct dlm_user_args *ua;
int error = -ENOMEM;
ls = dlm_find_lockspace_local(proc->lockspace);
if (!ls)
return -ENOENT;
ua = kzalloc(sizeof(struct dlm_user_args), GFP_NOFS);
if (!ua)
goto out;
ua->proc = proc;
ua->user_lksb = params->lksb;
ua->castparam = params->castparam;
ua->castaddr = params->castaddr;
if (params->flags & DLM_LKF_CANCEL)
error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
else
error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
params->lvb);
out:
dlm_put_lockspace(ls);
return error;
}
static int device_user_deadlock(struct dlm_user_proc *proc,
struct dlm_lock_params *params)
{
struct dlm_ls *ls;
int error;
ls = dlm_find_lockspace_local(proc->lockspace);
if (!ls)
return -ENOENT;
error = dlm_user_deadlock(ls, params->flags, params->lkid);
dlm_put_lockspace(ls);
return error;
}
static int dlm_device_register(struct dlm_ls *ls, char *name)
{
int error, len;
/* The device is already registered. This happens when the
lockspace is created multiple times from userspace. */
if (ls->ls_device.name)
return 0;
error = -ENOMEM;
len = strlen(name) + strlen(name_prefix) + 2;
ls->ls_device.name = kzalloc(len, GFP_NOFS);
if (!ls->ls_device.name)
goto fail;
snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
name);
ls->ls_device.fops = &device_fops;
ls->ls_device.minor = MISC_DYNAMIC_MINOR;
error = misc_register(&ls->ls_device);
if (error) {
kfree(ls->ls_device.name);
}
fail:
return error;
}
int dlm_device_deregister(struct dlm_ls *ls)
{
int error;
/* The device is not registered. This happens when the lockspace
was never used from userspace, or when device_create_lockspace()
calls dlm_release_lockspace() after the register fails. */
if (!ls->ls_device.name)
return 0;
error = misc_deregister(&ls->ls_device);
if (!error)
kfree(ls->ls_device.name);
return error;
}
static int device_user_purge(struct dlm_user_proc *proc,
struct dlm_purge_params *params)
{
struct dlm_ls *ls;
int error;
ls = dlm_find_lockspace_local(proc->lockspace);
if (!ls)
return -ENOENT;
error = dlm_user_purge(ls, proc, params->nodeid, params->pid);
dlm_put_lockspace(ls);
return error;
}
static int device_create_lockspace(struct dlm_lspace_params *params)
{
dlm_lockspace_t *lockspace;
struct dlm_ls *ls;
int error;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
error = dlm_new_lockspace(params->name, strlen(params->name),
&lockspace, params->flags, DLM_USER_LVB_LEN);
if (error)
return error;
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -ENOENT;
error = dlm_device_register(ls, params->name);
dlm_put_lockspace(ls);
if (error)
dlm_release_lockspace(lockspace, 0);
else
error = ls->ls_device.minor;
return error;
}
static int device_remove_lockspace(struct dlm_lspace_params *params)
{
dlm_lockspace_t *lockspace;
struct dlm_ls *ls;
int error, force = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
ls = dlm_find_lockspace_device(params->minor);
if (!ls)
return -ENOENT;
if (params->flags & DLM_USER_LSFLG_FORCEFREE)
force = 2;
lockspace = ls->ls_local_handle;
dlm_put_lockspace(ls);
/* The final dlm_release_lockspace waits for references to go to
zero, so all processes will need to close their device for the
ls before the release will proceed. release also calls the
device_deregister above. Converting a positive return value
from release to zero means that userspace won't know when its
release was the final one, but it shouldn't need to know. */
error = dlm_release_lockspace(lockspace, force);
if (error > 0)
error = 0;
return error;
}
/* Check the user's version matches ours */
static int check_version(struct dlm_write_request *req)
{
if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
(req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
"user (%d.%d.%d) kernel (%d.%d.%d)\n",
current->comm,
task_pid_nr(current),
req->version[0],
req->version[1],
req->version[2],
DLM_DEVICE_VERSION_MAJOR,
DLM_DEVICE_VERSION_MINOR,
DLM_DEVICE_VERSION_PATCH);
return -EINVAL;
}
return 0;
}
/*
* device_write
*
* device_user_lock
* dlm_user_request -> request_lock
* dlm_user_convert -> convert_lock
*
* device_user_unlock
* dlm_user_unlock -> unlock_lock
* dlm_user_cancel -> cancel_lock
*
* device_create_lockspace
* dlm_new_lockspace
*
* device_remove_lockspace
* dlm_release_lockspace
*/
/* a write to a lockspace device is a lock or unlock request, a write
to the control device is to create/remove a lockspace */
static ssize_t device_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct dlm_user_proc *proc = file->private_data;
struct dlm_write_request *kbuf;
sigset_t tmpsig, allsigs;
int error;
#ifdef CONFIG_COMPAT
if (count < sizeof(struct dlm_write_request32))
#else
if (count < sizeof(struct dlm_write_request))
#endif
return -EINVAL;
kbuf = kzalloc(count + 1, GFP_NOFS);
if (!kbuf)
return -ENOMEM;
if (copy_from_user(kbuf, buf, count)) {
error = -EFAULT;
goto out_free;
}
if (check_version(kbuf)) {
error = -EBADE;
goto out_free;
}
#ifdef CONFIG_COMPAT
if (!kbuf->is64bit) {
struct dlm_write_request32 *k32buf;
int namelen = 0;
if (count > sizeof(struct dlm_write_request32))
namelen = count - sizeof(struct dlm_write_request32);
k32buf = (struct dlm_write_request32 *)kbuf;
/* add 1 after namelen so that the name string is terminated */
kbuf = kzalloc(sizeof(struct dlm_write_request) + namelen + 1,
GFP_NOFS);
if (!kbuf) {
kfree(k32buf);
return -ENOMEM;
}
if (proc)
set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
compat_input(kbuf, k32buf, namelen);
kfree(k32buf);
}
#endif
/* do we really need this? can a write happen after a close? */
if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
(proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))) {
error = -EINVAL;
goto out_free;
}
sigfillset(&allsigs);
sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
error = -EINVAL;
switch (kbuf->cmd)
{
case DLM_USER_LOCK:
if (!proc) {
log_print("no locking on control device");
goto out_sig;
}
error = device_user_lock(proc, &kbuf->i.lock);
break;
case DLM_USER_UNLOCK:
if (!proc) {
log_print("no locking on control device");
goto out_sig;
}
error = device_user_unlock(proc, &kbuf->i.lock);
break;
case DLM_USER_DEADLOCK:
if (!proc) {
log_print("no locking on control device");
goto out_sig;
}
error = device_user_deadlock(proc, &kbuf->i.lock);
break;
case DLM_USER_CREATE_LOCKSPACE:
if (proc) {
log_print("create/remove only on control device");
goto out_sig;
}
error = device_create_lockspace(&kbuf->i.lspace);
break;
case DLM_USER_REMOVE_LOCKSPACE:
if (proc) {
log_print("create/remove only on control device");
goto out_sig;
}
error = device_remove_lockspace(&kbuf->i.lspace);
break;
case DLM_USER_PURGE:
if (!proc) {
log_print("no locking on control device");
goto out_sig;
}
error = device_user_purge(proc, &kbuf->i.purge);
break;
default:
log_print("Unknown command passed to DLM device : %d\n",
kbuf->cmd);
}
out_sig:
sigprocmask(SIG_SETMASK, &tmpsig, NULL);
out_free:
kfree(kbuf);
return error;
}
/* Every process that opens the lockspace device has its own "proc" structure
hanging off the open file that's used to keep track of locks owned by the
process and asts that need to be delivered to the process. */
static int device_open(struct inode *inode, struct file *file)
{
struct dlm_user_proc *proc;
struct dlm_ls *ls;
ls = dlm_find_lockspace_device(iminor(inode));
if (!ls)
return -ENOENT;
proc = kzalloc(sizeof(struct dlm_user_proc), GFP_NOFS);
if (!proc) {
dlm_put_lockspace(ls);
return -ENOMEM;
}
proc->lockspace = ls->ls_local_handle;
INIT_LIST_HEAD(&proc->asts);
INIT_LIST_HEAD(&proc->locks);
INIT_LIST_HEAD(&proc->unlocking);
spin_lock_init(&proc->asts_spin);
spin_lock_init(&proc->locks_spin);
init_waitqueue_head(&proc->wait);
file->private_data = proc;
return 0;
}
static int device_close(struct inode *inode, struct file *file)
{
struct dlm_user_proc *proc = file->private_data;
struct dlm_ls *ls;
sigset_t tmpsig, allsigs;
ls = dlm_find_lockspace_local(proc->lockspace);
if (!ls)
return -ENOENT;
sigfillset(&allsigs);
sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
dlm_clear_proc_locks(ls, proc);
/* at this point no more lkb's should exist for this lockspace,
so there's no chance of dlm_user_add_ast() being called and
looking for lkb->ua->proc */
kfree(proc);
file->private_data = NULL;
dlm_put_lockspace(ls);
dlm_put_lockspace(ls); /* for the find in device_open() */
/* FIXME: AUTOFREE: if this ls is no longer used do
device_remove_lockspace() */
sigprocmask(SIG_SETMASK, &tmpsig, NULL);
recalc_sigpending();
return 0;
}
static int copy_result_to_user(struct dlm_user_args *ua, int compat,
uint32_t flags, int mode, int copy_lvb,
char __user *buf, size_t count)
{
#ifdef CONFIG_COMPAT
struct dlm_lock_result32 result32;
#endif
struct dlm_lock_result result;
void *resultptr;
int error=0;
int len;
int struct_len;
memset(&result, 0, sizeof(struct dlm_lock_result));
result.version[0] = DLM_DEVICE_VERSION_MAJOR;
result.version[1] = DLM_DEVICE_VERSION_MINOR;
result.version[2] = DLM_DEVICE_VERSION_PATCH;
memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
result.user_lksb = ua->user_lksb;
/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
in a conversion unless the conversion is successful. See code
in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
notes that a new blocking AST address and parameter are set even if
the conversion fails, so maybe we should just do that. */
if (flags & DLM_CB_BAST) {
result.user_astaddr = ua->bastaddr;
result.user_astparam = ua->bastparam;
result.bast_mode = mode;
} else {
result.user_astaddr = ua->castaddr;
result.user_astparam = ua->castparam;
}
#ifdef CONFIG_COMPAT
if (compat)
len = sizeof(struct dlm_lock_result32);
else
#endif
len = sizeof(struct dlm_lock_result);
struct_len = len;
/* copy lvb to userspace if there is one, it's been updated, and
the user buffer has space for it */
if (copy_lvb && ua->lksb.sb_lvbptr && count >= len + DLM_USER_LVB_LEN) {
if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
DLM_USER_LVB_LEN)) {
error = -EFAULT;
goto out;
}
result.lvb_offset = len;
len += DLM_USER_LVB_LEN;
}
result.length = len;
resultptr = &result;
#ifdef CONFIG_COMPAT
if (compat) {
compat_output(&result, &result32);
resultptr = &result32;
}
#endif
if (copy_to_user(buf, resultptr, struct_len))
error = -EFAULT;
else
error = len;
out:
return error;
}
static int copy_version_to_user(char __user *buf, size_t count)
{
struct dlm_device_version ver;
memset(&ver, 0, sizeof(struct dlm_device_version));
ver.version[0] = DLM_DEVICE_VERSION_MAJOR;
ver.version[1] = DLM_DEVICE_VERSION_MINOR;
ver.version[2] = DLM_DEVICE_VERSION_PATCH;
if (copy_to_user(buf, &ver, sizeof(struct dlm_device_version)))
return -EFAULT;
return sizeof(struct dlm_device_version);
}
/* a read returns a single ast described in a struct dlm_lock_result */
static ssize_t device_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
struct dlm_user_proc *proc = file->private_data;
struct dlm_lkb *lkb;
DECLARE_WAITQUEUE(wait, current);
struct dlm_callback cb;
int rv, resid, copy_lvb = 0;
if (count == sizeof(struct dlm_device_version)) {
rv = copy_version_to_user(buf, count);
return rv;
}
if (!proc) {
log_print("non-version read from control device %zu", count);
return -EINVAL;
}
#ifdef CONFIG_COMPAT
if (count < sizeof(struct dlm_lock_result32))
#else
if (count < sizeof(struct dlm_lock_result))
#endif
return -EINVAL;
try_another:
/* do we really need this? can a read happen after a close? */
if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
return -EINVAL;
spin_lock(&proc->asts_spin);
if (list_empty(&proc->asts)) {
if (file->f_flags & O_NONBLOCK) {
spin_unlock(&proc->asts_spin);
return -EAGAIN;
}
add_wait_queue(&proc->wait, &wait);
repeat:
set_current_state(TASK_INTERRUPTIBLE);
if (list_empty(&proc->asts) && !signal_pending(current)) {
spin_unlock(&proc->asts_spin);
schedule();
spin_lock(&proc->asts_spin);
goto repeat;
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&proc->wait, &wait);
if (signal_pending(current)) {
spin_unlock(&proc->asts_spin);
return -ERESTARTSYS;
}
}
/* if we empty lkb_callbacks, we don't want to unlock the spinlock
without removing lkb_astqueue; so empty lkb_astqueue is always
consistent with empty lkb_callbacks */
lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_astqueue);
rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
if (rv < 0) {
/* this shouldn't happen; lkb should have been removed from
list when resid was zero */
log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
list_del_init(&lkb->lkb_astqueue);
spin_unlock(&proc->asts_spin);
/* removes ref for proc->asts, may cause lkb to be freed */
dlm_put_lkb(lkb);
goto try_another;
}
if (!resid)
list_del_init(&lkb->lkb_astqueue);
spin_unlock(&proc->asts_spin);
if (cb.flags & DLM_CB_SKIP) {
/* removes ref for proc->asts, may cause lkb to be freed */
if (!resid)
dlm_put_lkb(lkb);
goto try_another;
}
if (cb.flags & DLM_CB_CAST) {
int old_mode, new_mode;
old_mode = lkb->lkb_last_cast.mode;
new_mode = cb.mode;
if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr &&
dlm_lvb_operations[old_mode + 1][new_mode + 1])
copy_lvb = 1;
lkb->lkb_lksb->sb_status = cb.sb_status;
lkb->lkb_lksb->sb_flags = cb.sb_flags;
}
rv = copy_result_to_user(lkb->lkb_ua,
test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
cb.flags, cb.mode, copy_lvb, buf, count);
/* removes ref for proc->asts, may cause lkb to be freed */
if (!resid)
dlm_put_lkb(lkb);
return rv;
}
static unsigned int device_poll(struct file *file, poll_table *wait)
{
struct dlm_user_proc *proc = file->private_data;
poll_wait(file, &proc->wait, wait);
spin_lock(&proc->asts_spin);
if (!list_empty(&proc->asts)) {
spin_unlock(&proc->asts_spin);
return POLLIN | POLLRDNORM;
}
spin_unlock(&proc->asts_spin);
return 0;
}
int dlm_user_daemon_available(void)
{
/* dlm_controld hasn't started (or, has started, but not
properly populated configfs) */
if (!dlm_our_nodeid())
return 0;
/* This is to deal with versions of dlm_controld that don't
know about the monitor device. We assume that if the
dlm_controld was started (above), but the monitor device
was never opened, that it's an old version. dlm_controld
should open the monitor device before populating configfs. */
if (dlm_monitor_unused)
return 1;
return atomic_read(&dlm_monitor_opened) ? 1 : 0;
}
static int ctl_device_open(struct inode *inode, struct file *file)
{
file->private_data = NULL;
return 0;
}
static int ctl_device_close(struct inode *inode, struct file *file)
{
return 0;
}
static int monitor_device_open(struct inode *inode, struct file *file)
{
atomic_inc(&dlm_monitor_opened);
dlm_monitor_unused = 0;
return 0;
}
static int monitor_device_close(struct inode *inode, struct file *file)
{
if (atomic_dec_and_test(&dlm_monitor_opened))
dlm_stop_lockspaces();
return 0;
}
static const struct file_operations device_fops = {
.open = device_open,
.release = device_close,
.read = device_read,
.write = device_write,
.poll = device_poll,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static const struct file_operations ctl_device_fops = {
.open = ctl_device_open,
.release = ctl_device_close,
.read = device_read,
.write = device_write,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static struct miscdevice ctl_device = {
.name = "dlm-control",
.fops = &ctl_device_fops,
.minor = MISC_DYNAMIC_MINOR,
};
static const struct file_operations monitor_device_fops = {
.open = monitor_device_open,
.release = monitor_device_close,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static struct miscdevice monitor_device = {
.name = "dlm-monitor",
.fops = &monitor_device_fops,
.minor = MISC_DYNAMIC_MINOR,
};
int __init dlm_user_init(void)
{
int error;
atomic_set(&dlm_monitor_opened, 0);
error = misc_register(&ctl_device);
if (error) {
log_print("misc_register failed for control device");
goto out;
}
error = misc_register(&monitor_device);
if (error) {
log_print("misc_register failed for monitor device");
misc_deregister(&ctl_device);
}
out:
return error;
}
void dlm_user_exit(void)
{
misc_deregister(&ctl_device);
misc_deregister(&monitor_device);
}
| gpl-2.0 |
dl12345/kernel_sony_kitakami | net/batman-adv/hash.c | 2381 | 1941 | /* Copyright (C) 2006-2013 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#include "main.h"
#include "hash.h"
/* clears the hash */
static void batadv_hash_init(struct batadv_hashtable *hash)
{
uint32_t i;
for (i = 0; i < hash->size; i++) {
INIT_HLIST_HEAD(&hash->table[i]);
spin_lock_init(&hash->list_locks[i]);
}
}
/* free only the hashtable and the hash itself. */
void batadv_hash_destroy(struct batadv_hashtable *hash)
{
kfree(hash->list_locks);
kfree(hash->table);
kfree(hash);
}
/* allocates and clears the hash */
struct batadv_hashtable *batadv_hash_new(uint32_t size)
{
struct batadv_hashtable *hash;
hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
if (!hash)
return NULL;
hash->table = kmalloc(sizeof(*hash->table) * size, GFP_ATOMIC);
if (!hash->table)
goto free_hash;
hash->list_locks = kmalloc(sizeof(*hash->list_locks) * size,
GFP_ATOMIC);
if (!hash->list_locks)
goto free_table;
hash->size = size;
batadv_hash_init(hash);
return hash;
free_table:
kfree(hash->table);
free_hash:
kfree(hash);
return NULL;
}
void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
struct lock_class_key *key)
{
uint32_t i;
for (i = 0; i < hash->size; i++)
lockdep_set_class(&hash->list_locks[i], key);
}
| gpl-2.0 |
yatto/Android_Kernel_ME302KL_CM13 | arch/x86/tools/relocs.c | 2637 | 19228 | #include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <elf.h>
#include <byteswap.h>
#define USE_BSD
#include <endian.h>
#include <regex.h>
#include <tools/le_byteshift.h>
static void die(char *fmt, ...);
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
static Elf32_Ehdr ehdr;
static unsigned long reloc_count, reloc_idx;
static unsigned long *relocs;
static unsigned long reloc16_count, reloc16_idx;
static unsigned long *relocs16;
struct section {
Elf32_Shdr shdr;
struct section *link;
Elf32_Sym *symtab;
Elf32_Rel *reltab;
char *strtab;
};
static struct section *secs;
enum symtype {
S_ABS,
S_REL,
S_SEG,
S_LIN,
S_NSYMTYPES
};
static const char * const sym_regex_kernel[S_NSYMTYPES] = {
/*
* Following symbols have been audited. There values are constant and do
* not change if bzImage is loaded at a different physical address than
* the address for which it has been compiled. Don't warn user about
* absolute relocations present w.r.t these symbols.
*/
[S_ABS] =
"^(xen_irq_disable_direct_reloc$|"
"xen_save_fl_direct_reloc$|"
"VDSO|"
"__crc_)",
/*
* These symbols are known to be relative, even if the linker marks them
* as absolute (typically defined outside any section in the linker script.)
*/
[S_REL] =
"^(__init_(begin|end)|"
"__x86_cpu_dev_(start|end)|"
"(__parainstructions|__alt_instructions)(|_end)|"
"(__iommu_table|__apicdrivers|__smp_locks)(|_end)|"
"_end)$"
};
static const char * const sym_regex_realmode[S_NSYMTYPES] = {
/*
* These are 16-bit segment symbols when compiling 16-bit code.
*/
[S_SEG] =
"^real_mode_seg$",
/*
* These are offsets belonging to segments, as opposed to linear addresses,
* when compiling 16-bit code.
*/
[S_LIN] =
"^pa_",
};
static const char * const *sym_regex;
static regex_t sym_regex_c[S_NSYMTYPES];
static int is_reloc(enum symtype type, const char *sym_name)
{
return sym_regex[type] &&
!regexec(&sym_regex_c[type], sym_name, 0, NULL, 0);
}
static void regex_init(int use_real_mode)
{
char errbuf[128];
int err;
int i;
if (use_real_mode)
sym_regex = sym_regex_realmode;
else
sym_regex = sym_regex_kernel;
for (i = 0; i < S_NSYMTYPES; i++) {
if (!sym_regex[i])
continue;
err = regcomp(&sym_regex_c[i], sym_regex[i],
REG_EXTENDED|REG_NOSUB);
if (err) {
regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
die("%s", errbuf);
}
}
}
static void die(char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
va_end(ap);
exit(1);
}
static const char *sym_type(unsigned type)
{
static const char *type_name[] = {
#define SYM_TYPE(X) [X] = #X
SYM_TYPE(STT_NOTYPE),
SYM_TYPE(STT_OBJECT),
SYM_TYPE(STT_FUNC),
SYM_TYPE(STT_SECTION),
SYM_TYPE(STT_FILE),
SYM_TYPE(STT_COMMON),
SYM_TYPE(STT_TLS),
#undef SYM_TYPE
};
const char *name = "unknown sym type name";
if (type < ARRAY_SIZE(type_name)) {
name = type_name[type];
}
return name;
}
static const char *sym_bind(unsigned bind)
{
static const char *bind_name[] = {
#define SYM_BIND(X) [X] = #X
SYM_BIND(STB_LOCAL),
SYM_BIND(STB_GLOBAL),
SYM_BIND(STB_WEAK),
#undef SYM_BIND
};
const char *name = "unknown sym bind name";
if (bind < ARRAY_SIZE(bind_name)) {
name = bind_name[bind];
}
return name;
}
static const char *sym_visibility(unsigned visibility)
{
static const char *visibility_name[] = {
#define SYM_VISIBILITY(X) [X] = #X
SYM_VISIBILITY(STV_DEFAULT),
SYM_VISIBILITY(STV_INTERNAL),
SYM_VISIBILITY(STV_HIDDEN),
SYM_VISIBILITY(STV_PROTECTED),
#undef SYM_VISIBILITY
};
const char *name = "unknown sym visibility name";
if (visibility < ARRAY_SIZE(visibility_name)) {
name = visibility_name[visibility];
}
return name;
}
static const char *rel_type(unsigned type)
{
static const char *type_name[] = {
#define REL_TYPE(X) [X] = #X
REL_TYPE(R_386_NONE),
REL_TYPE(R_386_32),
REL_TYPE(R_386_PC32),
REL_TYPE(R_386_GOT32),
REL_TYPE(R_386_PLT32),
REL_TYPE(R_386_COPY),
REL_TYPE(R_386_GLOB_DAT),
REL_TYPE(R_386_JMP_SLOT),
REL_TYPE(R_386_RELATIVE),
REL_TYPE(R_386_GOTOFF),
REL_TYPE(R_386_GOTPC),
REL_TYPE(R_386_8),
REL_TYPE(R_386_PC8),
REL_TYPE(R_386_16),
REL_TYPE(R_386_PC16),
#undef REL_TYPE
};
const char *name = "unknown type rel type name";
if (type < ARRAY_SIZE(type_name) && type_name[type]) {
name = type_name[type];
}
return name;
}
static const char *sec_name(unsigned shndx)
{
const char *sec_strtab;
const char *name;
sec_strtab = secs[ehdr.e_shstrndx].strtab;
name = "<noname>";
if (shndx < ehdr.e_shnum) {
name = sec_strtab + secs[shndx].shdr.sh_name;
}
else if (shndx == SHN_ABS) {
name = "ABSOLUTE";
}
else if (shndx == SHN_COMMON) {
name = "COMMON";
}
return name;
}
static const char *sym_name(const char *sym_strtab, Elf32_Sym *sym)
{
const char *name;
name = "<noname>";
if (sym->st_name) {
name = sym_strtab + sym->st_name;
}
else {
name = sec_name(sym->st_shndx);
}
return name;
}
#if BYTE_ORDER == LITTLE_ENDIAN
#define le16_to_cpu(val) (val)
#define le32_to_cpu(val) (val)
#endif
#if BYTE_ORDER == BIG_ENDIAN
#define le16_to_cpu(val) bswap_16(val)
#define le32_to_cpu(val) bswap_32(val)
#endif
static uint16_t elf16_to_cpu(uint16_t val)
{
return le16_to_cpu(val);
}
static uint32_t elf32_to_cpu(uint32_t val)
{
return le32_to_cpu(val);
}
static void read_ehdr(FILE *fp)
{
if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) {
die("Cannot read ELF header: %s\n",
strerror(errno));
}
if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) {
die("No ELF magic\n");
}
if (ehdr.e_ident[EI_CLASS] != ELFCLASS32) {
die("Not a 32 bit executable\n");
}
if (ehdr.e_ident[EI_DATA] != ELFDATA2LSB) {
die("Not a LSB ELF executable\n");
}
if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) {
die("Unknown ELF version\n");
}
/* Convert the fields to native endian */
ehdr.e_type = elf16_to_cpu(ehdr.e_type);
ehdr.e_machine = elf16_to_cpu(ehdr.e_machine);
ehdr.e_version = elf32_to_cpu(ehdr.e_version);
ehdr.e_entry = elf32_to_cpu(ehdr.e_entry);
ehdr.e_phoff = elf32_to_cpu(ehdr.e_phoff);
ehdr.e_shoff = elf32_to_cpu(ehdr.e_shoff);
ehdr.e_flags = elf32_to_cpu(ehdr.e_flags);
ehdr.e_ehsize = elf16_to_cpu(ehdr.e_ehsize);
ehdr.e_phentsize = elf16_to_cpu(ehdr.e_phentsize);
ehdr.e_phnum = elf16_to_cpu(ehdr.e_phnum);
ehdr.e_shentsize = elf16_to_cpu(ehdr.e_shentsize);
ehdr.e_shnum = elf16_to_cpu(ehdr.e_shnum);
ehdr.e_shstrndx = elf16_to_cpu(ehdr.e_shstrndx);
if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) {
die("Unsupported ELF header type\n");
}
if (ehdr.e_machine != EM_386) {
die("Not for x86\n");
}
if (ehdr.e_version != EV_CURRENT) {
die("Unknown ELF version\n");
}
if (ehdr.e_ehsize != sizeof(Elf32_Ehdr)) {
die("Bad Elf header size\n");
}
if (ehdr.e_phentsize != sizeof(Elf32_Phdr)) {
die("Bad program header entry\n");
}
if (ehdr.e_shentsize != sizeof(Elf32_Shdr)) {
die("Bad section header entry\n");
}
if (ehdr.e_shstrndx >= ehdr.e_shnum) {
die("String table index out of bounds\n");
}
}
static void read_shdrs(FILE *fp)
{
int i;
Elf32_Shdr shdr;
secs = calloc(ehdr.e_shnum, sizeof(struct section));
if (!secs) {
die("Unable to allocate %d section headers\n",
ehdr.e_shnum);
}
if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) {
die("Seek to %d failed: %s\n",
ehdr.e_shoff, strerror(errno));
}
for (i = 0; i < ehdr.e_shnum; i++) {
struct section *sec = &secs[i];
if (fread(&shdr, sizeof shdr, 1, fp) != 1)
die("Cannot read ELF section headers %d/%d: %s\n",
i, ehdr.e_shnum, strerror(errno));
sec->shdr.sh_name = elf32_to_cpu(shdr.sh_name);
sec->shdr.sh_type = elf32_to_cpu(shdr.sh_type);
sec->shdr.sh_flags = elf32_to_cpu(shdr.sh_flags);
sec->shdr.sh_addr = elf32_to_cpu(shdr.sh_addr);
sec->shdr.sh_offset = elf32_to_cpu(shdr.sh_offset);
sec->shdr.sh_size = elf32_to_cpu(shdr.sh_size);
sec->shdr.sh_link = elf32_to_cpu(shdr.sh_link);
sec->shdr.sh_info = elf32_to_cpu(shdr.sh_info);
sec->shdr.sh_addralign = elf32_to_cpu(shdr.sh_addralign);
sec->shdr.sh_entsize = elf32_to_cpu(shdr.sh_entsize);
if (sec->shdr.sh_link < ehdr.e_shnum)
sec->link = &secs[sec->shdr.sh_link];
}
}
static void read_strtabs(FILE *fp)
{
int i;
for (i = 0; i < ehdr.e_shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_STRTAB) {
continue;
}
sec->strtab = malloc(sec->shdr.sh_size);
if (!sec->strtab) {
die("malloc of %d bytes for strtab failed\n",
sec->shdr.sh_size);
}
if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
die("Seek to %d failed: %s\n",
sec->shdr.sh_offset, strerror(errno));
}
if (fread(sec->strtab, 1, sec->shdr.sh_size, fp)
!= sec->shdr.sh_size) {
die("Cannot read symbol table: %s\n",
strerror(errno));
}
}
}
static void read_symtabs(FILE *fp)
{
int i,j;
for (i = 0; i < ehdr.e_shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_SYMTAB) {
continue;
}
sec->symtab = malloc(sec->shdr.sh_size);
if (!sec->symtab) {
die("malloc of %d bytes for symtab failed\n",
sec->shdr.sh_size);
}
if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
die("Seek to %d failed: %s\n",
sec->shdr.sh_offset, strerror(errno));
}
if (fread(sec->symtab, 1, sec->shdr.sh_size, fp)
!= sec->shdr.sh_size) {
die("Cannot read symbol table: %s\n",
strerror(errno));
}
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
Elf32_Sym *sym = &sec->symtab[j];
sym->st_name = elf32_to_cpu(sym->st_name);
sym->st_value = elf32_to_cpu(sym->st_value);
sym->st_size = elf32_to_cpu(sym->st_size);
sym->st_shndx = elf16_to_cpu(sym->st_shndx);
}
}
}
static void read_relocs(FILE *fp)
{
int i,j;
for (i = 0; i < ehdr.e_shnum; i++) {
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_REL) {
continue;
}
sec->reltab = malloc(sec->shdr.sh_size);
if (!sec->reltab) {
die("malloc of %d bytes for relocs failed\n",
sec->shdr.sh_size);
}
if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) {
die("Seek to %d failed: %s\n",
sec->shdr.sh_offset, strerror(errno));
}
if (fread(sec->reltab, 1, sec->shdr.sh_size, fp)
!= sec->shdr.sh_size) {
die("Cannot read symbol table: %s\n",
strerror(errno));
}
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
Elf32_Rel *rel = &sec->reltab[j];
rel->r_offset = elf32_to_cpu(rel->r_offset);
rel->r_info = elf32_to_cpu(rel->r_info);
}
}
}
static void print_absolute_symbols(void)
{
int i;
printf("Absolute symbols\n");
printf(" Num: Value Size Type Bind Visibility Name\n");
for (i = 0; i < ehdr.e_shnum; i++) {
struct section *sec = &secs[i];
char *sym_strtab;
int j;
if (sec->shdr.sh_type != SHT_SYMTAB) {
continue;
}
sym_strtab = sec->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
Elf32_Sym *sym;
const char *name;
sym = &sec->symtab[j];
name = sym_name(sym_strtab, sym);
if (sym->st_shndx != SHN_ABS) {
continue;
}
printf("%5d %08x %5d %10s %10s %12s %s\n",
j, sym->st_value, sym->st_size,
sym_type(ELF32_ST_TYPE(sym->st_info)),
sym_bind(ELF32_ST_BIND(sym->st_info)),
sym_visibility(ELF32_ST_VISIBILITY(sym->st_other)),
name);
}
}
printf("\n");
}
static void print_absolute_relocs(void)
{
int i, printed = 0;
for (i = 0; i < ehdr.e_shnum; i++) {
struct section *sec = &secs[i];
struct section *sec_applies, *sec_symtab;
char *sym_strtab;
Elf32_Sym *sh_symtab;
int j;
if (sec->shdr.sh_type != SHT_REL) {
continue;
}
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
continue;
}
sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
Elf32_Rel *rel;
Elf32_Sym *sym;
const char *name;
rel = &sec->reltab[j];
sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
name = sym_name(sym_strtab, sym);
if (sym->st_shndx != SHN_ABS) {
continue;
}
/* Absolute symbols are not relocated if bzImage is
* loaded at a non-compiled address. Display a warning
* to user at compile time about the absolute
* relocations present.
*
* User need to audit the code to make sure
* some symbols which should have been section
* relative have not become absolute because of some
* linker optimization or wrong programming usage.
*
* Before warning check if this absolute symbol
* relocation is harmless.
*/
if (is_reloc(S_ABS, name) || is_reloc(S_REL, name))
continue;
if (!printed) {
printf("WARNING: Absolute relocations"
" present\n");
printf("Offset Info Type Sym.Value "
"Sym.Name\n");
printed = 1;
}
printf("%08x %08x %10s %08x %s\n",
rel->r_offset,
rel->r_info,
rel_type(ELF32_R_TYPE(rel->r_info)),
sym->st_value,
name);
}
}
if (printed)
printf("\n");
}
static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
int use_real_mode)
{
int i;
/* Walk through the relocations */
for (i = 0; i < ehdr.e_shnum; i++) {
char *sym_strtab;
Elf32_Sym *sh_symtab;
struct section *sec_applies, *sec_symtab;
int j;
struct section *sec = &secs[i];
if (sec->shdr.sh_type != SHT_REL) {
continue;
}
sec_symtab = sec->link;
sec_applies = &secs[sec->shdr.sh_info];
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
continue;
}
sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
Elf32_Rel *rel;
Elf32_Sym *sym;
unsigned r_type;
const char *symname;
int shn_abs;
rel = &sec->reltab[j];
sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
r_type = ELF32_R_TYPE(rel->r_info);
shn_abs = sym->st_shndx == SHN_ABS;
switch (r_type) {
case R_386_NONE:
case R_386_PC32:
case R_386_PC16:
case R_386_PC8:
/*
* NONE can be ignored and and PC relative
* relocations don't need to be adjusted.
*/
break;
case R_386_16:
symname = sym_name(sym_strtab, sym);
if (!use_real_mode)
goto bad;
if (shn_abs) {
if (is_reloc(S_ABS, symname))
break;
else if (!is_reloc(S_SEG, symname))
goto bad;
} else {
if (is_reloc(S_LIN, symname))
goto bad;
else
break;
}
visit(rel, sym);
break;
case R_386_32:
symname = sym_name(sym_strtab, sym);
if (shn_abs) {
if (is_reloc(S_ABS, symname))
break;
else if (!is_reloc(S_REL, symname))
goto bad;
} else {
if (use_real_mode &&
!is_reloc(S_LIN, symname))
break;
}
visit(rel, sym);
break;
default:
die("Unsupported relocation type: %s (%d)\n",
rel_type(r_type), r_type);
break;
bad:
symname = sym_name(sym_strtab, sym);
die("Invalid %s %s relocation: %s\n",
shn_abs ? "absolute" : "relative",
rel_type(r_type), symname);
}
}
}
}
static void count_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
{
if (ELF32_R_TYPE(rel->r_info) == R_386_16)
reloc16_count++;
else
reloc_count++;
}
static void collect_reloc(Elf32_Rel *rel, Elf32_Sym *sym)
{
/* Remember the address that needs to be adjusted. */
if (ELF32_R_TYPE(rel->r_info) == R_386_16)
relocs16[reloc16_idx++] = rel->r_offset;
else
relocs[reloc_idx++] = rel->r_offset;
}
static int cmp_relocs(const void *va, const void *vb)
{
const unsigned long *a, *b;
a = va; b = vb;
return (*a == *b)? 0 : (*a > *b)? 1 : -1;
}
static int write32(unsigned int v, FILE *f)
{
unsigned char buf[4];
put_unaligned_le32(v, buf);
return fwrite(buf, 1, 4, f) == 4 ? 0 : -1;
}
static void emit_relocs(int as_text, int use_real_mode)
{
int i;
/* Count how many relocations I have and allocate space for them. */
reloc_count = 0;
walk_relocs(count_reloc, use_real_mode);
relocs = malloc(reloc_count * sizeof(relocs[0]));
if (!relocs) {
die("malloc of %d entries for relocs failed\n",
reloc_count);
}
relocs16 = malloc(reloc16_count * sizeof(relocs[0]));
if (!relocs16) {
die("malloc of %d entries for relocs16 failed\n",
reloc16_count);
}
/* Collect up the relocations */
reloc_idx = 0;
walk_relocs(collect_reloc, use_real_mode);
if (reloc16_count && !use_real_mode)
die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
qsort(relocs, reloc_count, sizeof(relocs[0]), cmp_relocs);
qsort(relocs16, reloc16_count, sizeof(relocs16[0]), cmp_relocs);
/* Print the relocations */
if (as_text) {
/* Print the relocations in a form suitable that
* gas will like.
*/
printf(".section \".data.reloc\",\"a\"\n");
printf(".balign 4\n");
if (use_real_mode) {
printf("\t.long %lu\n", reloc16_count);
for (i = 0; i < reloc16_count; i++)
printf("\t.long 0x%08lx\n", relocs16[i]);
printf("\t.long %lu\n", reloc_count);
for (i = 0; i < reloc_count; i++) {
printf("\t.long 0x%08lx\n", relocs[i]);
}
} else {
/* Print a stop */
printf("\t.long 0x%08lx\n", (unsigned long)0);
for (i = 0; i < reloc_count; i++) {
printf("\t.long 0x%08lx\n", relocs[i]);
}
}
printf("\n");
}
else {
if (use_real_mode) {
write32(reloc16_count, stdout);
for (i = 0; i < reloc16_count; i++)
write32(relocs16[i], stdout);
write32(reloc_count, stdout);
/* Now print each relocation */
for (i = 0; i < reloc_count; i++)
write32(relocs[i], stdout);
} else {
/* Print a stop */
write32(0, stdout);
/* Now print each relocation */
for (i = 0; i < reloc_count; i++) {
write32(relocs[i], stdout);
}
}
}
}
static void usage(void)
{
die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
}
int main(int argc, char **argv)
{
int show_absolute_syms, show_absolute_relocs;
int as_text, use_real_mode;
const char *fname;
FILE *fp;
int i;
show_absolute_syms = 0;
show_absolute_relocs = 0;
as_text = 0;
use_real_mode = 0;
fname = NULL;
for (i = 1; i < argc; i++) {
char *arg = argv[i];
if (*arg == '-') {
if (strcmp(arg, "--abs-syms") == 0) {
show_absolute_syms = 1;
continue;
}
if (strcmp(arg, "--abs-relocs") == 0) {
show_absolute_relocs = 1;
continue;
}
if (strcmp(arg, "--text") == 0) {
as_text = 1;
continue;
}
if (strcmp(arg, "--realmode") == 0) {
use_real_mode = 1;
continue;
}
}
else if (!fname) {
fname = arg;
continue;
}
usage();
}
if (!fname) {
usage();
}
regex_init(use_real_mode);
fp = fopen(fname, "r");
if (!fp) {
die("Cannot open %s: %s\n",
fname, strerror(errno));
}
read_ehdr(fp);
read_shdrs(fp);
read_strtabs(fp);
read_symtabs(fp);
read_relocs(fp);
if (show_absolute_syms) {
print_absolute_symbols();
return 0;
}
if (show_absolute_relocs) {
print_absolute_relocs();
return 0;
}
emit_relocs(as_text, use_real_mode);
return 0;
}
| gpl-2.0 |
hanjin1987/hw_msm8x25_kernel | drivers/staging/rtl8192e/rtl819x_TSProc.c | 4941 | 15523 | /******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
#include "rtllib.h"
#include <linux/etherdevice.h>
#include "rtl819x_TS.h"
static void TsSetupTimeOut(unsigned long data)
{
}
static void TsInactTimeout(unsigned long data)
{
}
static void RxPktPendingTimeout(unsigned long data)
{
struct rx_ts_record *pRxTs = (struct rx_ts_record *)data;
struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
RxTsRecord[pRxTs->num]);
struct rx_reorder_entry *pReorderEntry = NULL;
unsigned long flags = 0;
u8 index = 0;
bool bPktInBuf = false;
spin_lock_irqsave(&(ieee->reorder_spinlock), flags);
if (pRxTs->RxTimeoutIndicateSeq != 0xffff) {
while (!list_empty(&pRxTs->RxPendingPktList)) {
pReorderEntry = (struct rx_reorder_entry *)
list_entry(pRxTs->RxPendingPktList.prev,
struct rx_reorder_entry, List);
if (index == 0)
pRxTs->RxIndicateSeq = pReorderEntry->SeqNum;
if (SN_LESS(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq) ||
SN_EQUAL(pReorderEntry->SeqNum, pRxTs->RxIndicateSeq)) {
list_del_init(&pReorderEntry->List);
if (SN_EQUAL(pReorderEntry->SeqNum,
pRxTs->RxIndicateSeq))
pRxTs->RxIndicateSeq =
(pRxTs->RxIndicateSeq + 1) % 4096;
RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate"
" SeqNum: %d\n", __func__,
pReorderEntry->SeqNum);
ieee->stats_IndicateArray[index] =
pReorderEntry->prxb;
index++;
list_add_tail(&pReorderEntry->List,
&ieee->RxReorder_Unused_List);
} else {
bPktInBuf = true;
break;
}
}
}
if (index > 0) {
pRxTs->RxTimeoutIndicateSeq = 0xffff;
if (index > REORDER_WIN_SIZE) {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():"
" Rx Reorer struct buffer full!!\n");
spin_unlock_irqrestore(&(ieee->reorder_spinlock),
flags);
return;
}
rtllib_indicate_packets(ieee, ieee->stats_IndicateArray, index);
bPktInBuf = false;
}
if (bPktInBuf && (pRxTs->RxTimeoutIndicateSeq == 0xffff)) {
pRxTs->RxTimeoutIndicateSeq = pRxTs->RxIndicateSeq;
mod_timer(&pRxTs->RxPktPendingTimer, jiffies +
MSECS(ieee->pHTInfo->RxReorderPendingTime));
}
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
}
static void TsAddBaProcess(unsigned long data)
{
struct tx_ts_record *pTxTs = (struct tx_ts_record *)data;
u8 num = pTxTs->num;
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[num]);
TsInitAddBA(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
RTLLIB_DEBUG(RTLLIB_DL_BA, "TsAddBaProcess(): ADDBA Req is "
"started!!\n");
}
static void ResetTsCommonInfo(struct ts_common_info *pTsCommonInfo)
{
memset(pTsCommonInfo->Addr, 0, 6);
memset(&pTsCommonInfo->TSpec, 0, sizeof(union tspec_body));
memset(&pTsCommonInfo->TClass, 0, sizeof(union qos_tclas)*TCLAS_NUM);
pTsCommonInfo->TClasProc = 0;
pTsCommonInfo->TClasNum = 0;
}
static void ResetTxTsEntry(struct tx_ts_record *pTS)
{
ResetTsCommonInfo(&pTS->TsCommonInfo);
pTS->TxCurSeq = 0;
pTS->bAddBaReqInProgress = false;
pTS->bAddBaReqDelayed = false;
pTS->bUsingBa = false;
pTS->bDisable_AddBa = false;
ResetBaEntry(&pTS->TxAdmittedBARecord);
ResetBaEntry(&pTS->TxPendingBARecord);
}
static void ResetRxTsEntry(struct rx_ts_record *pTS)
{
ResetTsCommonInfo(&pTS->TsCommonInfo);
pTS->RxIndicateSeq = 0xffff;
pTS->RxTimeoutIndicateSeq = 0xffff;
ResetBaEntry(&pTS->RxAdmittedBARecord);
}
void TSInitialize(struct rtllib_device *ieee)
{
struct tx_ts_record *pTxTS = ieee->TxTsRecord;
struct rx_ts_record *pRxTS = ieee->RxTsRecord;
struct rx_reorder_entry *pRxReorderEntry = ieee->RxReorderEntry;
u8 count = 0;
RTLLIB_DEBUG(RTLLIB_DL_TS, "==========>%s()\n", __func__);
INIT_LIST_HEAD(&ieee->Tx_TS_Admit_List);
INIT_LIST_HEAD(&ieee->Tx_TS_Pending_List);
INIT_LIST_HEAD(&ieee->Tx_TS_Unused_List);
for (count = 0; count < TOTAL_TS_NUM; count++) {
pTxTS->num = count;
_setup_timer(&pTxTS->TsCommonInfo.SetupTimer,
TsSetupTimeOut,
(unsigned long) pTxTS);
_setup_timer(&pTxTS->TsCommonInfo.InactTimer,
TsInactTimeout,
(unsigned long) pTxTS);
_setup_timer(&pTxTS->TsAddBaTimer,
TsAddBaProcess,
(unsigned long) pTxTS);
_setup_timer(&pTxTS->TxPendingBARecord.Timer,
BaSetupTimeOut,
(unsigned long) pTxTS);
_setup_timer(&pTxTS->TxAdmittedBARecord.Timer,
TxBaInactTimeout,
(unsigned long) pTxTS);
ResetTxTsEntry(pTxTS);
list_add_tail(&pTxTS->TsCommonInfo.List,
&ieee->Tx_TS_Unused_List);
pTxTS++;
}
INIT_LIST_HEAD(&ieee->Rx_TS_Admit_List);
INIT_LIST_HEAD(&ieee->Rx_TS_Pending_List);
INIT_LIST_HEAD(&ieee->Rx_TS_Unused_List);
for (count = 0; count < TOTAL_TS_NUM; count++) {
pRxTS->num = count;
INIT_LIST_HEAD(&pRxTS->RxPendingPktList);
_setup_timer(&pRxTS->TsCommonInfo.SetupTimer,
TsSetupTimeOut,
(unsigned long) pRxTS);
_setup_timer(&pRxTS->TsCommonInfo.InactTimer,
TsInactTimeout,
(unsigned long) pRxTS);
_setup_timer(&pRxTS->RxAdmittedBARecord.Timer,
RxBaInactTimeout,
(unsigned long) pRxTS);
_setup_timer(&pRxTS->RxPktPendingTimer,
RxPktPendingTimeout,
(unsigned long) pRxTS);
ResetRxTsEntry(pRxTS);
list_add_tail(&pRxTS->TsCommonInfo.List,
&ieee->Rx_TS_Unused_List);
pRxTS++;
}
INIT_LIST_HEAD(&ieee->RxReorder_Unused_List);
for (count = 0; count < REORDER_ENTRY_NUM; count++) {
list_add_tail(&pRxReorderEntry->List,
&ieee->RxReorder_Unused_List);
if (count == (REORDER_ENTRY_NUM-1))
break;
pRxReorderEntry = &ieee->RxReorderEntry[count+1];
}
}
static void AdmitTS(struct rtllib_device *ieee,
struct ts_common_info *pTsCommonInfo, u32 InactTime)
{
del_timer_sync(&pTsCommonInfo->SetupTimer);
del_timer_sync(&pTsCommonInfo->InactTimer);
if (InactTime != 0)
mod_timer(&pTsCommonInfo->InactTimer, jiffies +
MSECS(InactTime));
}
static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
u8 *Addr, u8 TID,
enum tr_select TxRxSelect)
{
u8 dir;
bool search_dir[4] = {0};
struct list_head *psearch_list;
struct ts_common_info *pRet = NULL;
if (ieee->iw_mode == IW_MODE_MASTER) {
if (TxRxSelect == TX_DIR) {
search_dir[DIR_DOWN] = true;
search_dir[DIR_BI_DIR] = true;
} else {
search_dir[DIR_UP] = true;
search_dir[DIR_BI_DIR] = true;
}
} else if (ieee->iw_mode == IW_MODE_ADHOC) {
if (TxRxSelect == TX_DIR)
search_dir[DIR_UP] = true;
else
search_dir[DIR_DOWN] = true;
} else {
if (TxRxSelect == TX_DIR) {
search_dir[DIR_UP] = true;
search_dir[DIR_BI_DIR] = true;
search_dir[DIR_DIRECT] = true;
} else {
search_dir[DIR_DOWN] = true;
search_dir[DIR_BI_DIR] = true;
search_dir[DIR_DIRECT] = true;
}
}
if (TxRxSelect == TX_DIR)
psearch_list = &ieee->Tx_TS_Admit_List;
else
psearch_list = &ieee->Rx_TS_Admit_List;
for (dir = 0; dir <= DIR_BI_DIR; dir++) {
if (search_dir[dir] == false)
continue;
list_for_each_entry(pRet, psearch_list, List) {
if (memcmp(pRet->Addr, Addr, 6) == 0)
if (pRet->TSpec.f.TSInfo.field.ucTSID == TID)
if (pRet->TSpec.f.TSInfo.field.ucDirection == dir)
break;
}
if (&pRet->List != psearch_list)
break;
}
if (pRet && &pRet->List != psearch_list)
return pRet ;
else
return NULL;
}
static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
union tspec_body *pTSPEC, union qos_tclas *pTCLAS,
u8 TCLAS_Num, u8 TCLAS_Proc)
{
u8 count;
if (pTsCommonInfo == NULL)
return;
memcpy(pTsCommonInfo->Addr, Addr, 6);
if (pTSPEC != NULL)
memcpy((u8 *)(&(pTsCommonInfo->TSpec)), (u8 *)pTSPEC,
sizeof(union tspec_body));
for (count = 0; count < TCLAS_Num; count++)
memcpy((u8 *)(&(pTsCommonInfo->TClass[count])),
(u8 *)pTCLAS, sizeof(union qos_tclas));
pTsCommonInfo->TClasProc = TCLAS_Proc;
pTsCommonInfo->TClasNum = TCLAS_Num;
}
bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
{
u8 UP = 0;
if (is_broadcast_ether_addr(Addr) || is_multicast_ether_addr(Addr)) {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR! get TS for Broadcast or "
"Multicast\n");
return false;
}
if (ieee->current_network.qos_data.supported == 0) {
UP = 0;
} else {
if (!IsACValid(TID)) {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR! in %s(), TID(%d) is "
"not valid\n", __func__, TID);
return false;
}
switch (TID) {
case 0:
case 3:
UP = 0;
break;
case 1:
case 2:
UP = 2;
break;
case 4:
case 5:
UP = 5;
break;
case 6:
case 7:
UP = 7;
break;
}
}
*ppTS = SearchAdmitTRStream(ieee, Addr, UP, TxRxSelect);
if (*ppTS != NULL) {
return true;
} else {
if (bAddNewTs == false) {
RTLLIB_DEBUG(RTLLIB_DL_TS, "add new TS failed"
"(tid:%d)\n", UP);
return false;
} else {
union tspec_body TSpec;
union qos_tsinfo *pTSInfo = &TSpec.f.TSInfo;
struct list_head *pUnusedList =
(TxRxSelect == TX_DIR) ?
(&ieee->Tx_TS_Unused_List) :
(&ieee->Rx_TS_Unused_List);
struct list_head *pAddmitList =
(TxRxSelect == TX_DIR) ?
(&ieee->Tx_TS_Admit_List) :
(&ieee->Rx_TS_Admit_List);
enum direction_value Dir =
(ieee->iw_mode == IW_MODE_MASTER) ?
((TxRxSelect == TX_DIR) ? DIR_DOWN : DIR_UP) :
((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
RTLLIB_DEBUG(RTLLIB_DL_TS, "to add Ts\n");
if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next,
struct ts_common_info, List);
list_del_init(&(*ppTS)->List);
if (TxRxSelect == TX_DIR) {
struct tx_ts_record *tmp =
container_of(*ppTS,
struct tx_ts_record,
TsCommonInfo);
ResetTxTsEntry(tmp);
} else {
struct rx_ts_record *tmp =
container_of(*ppTS,
struct rx_ts_record,
TsCommonInfo);
ResetRxTsEntry(tmp);
}
RTLLIB_DEBUG(RTLLIB_DL_TS, "to init current TS"
", UP:%d, Dir:%d, addr: %pM"
" ppTs=%p\n", UP, Dir,
Addr, *ppTS);
pTSInfo->field.ucTrafficType = 0;
pTSInfo->field.ucTSID = UP;
pTSInfo->field.ucDirection = Dir;
pTSInfo->field.ucAccessPolicy = 1;
pTSInfo->field.ucAggregation = 0;
pTSInfo->field.ucPSB = 0;
pTSInfo->field.ucUP = UP;
pTSInfo->field.ucTSInfoAckPolicy = 0;
pTSInfo->field.ucSchedule = 0;
MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0);
AdmitTS(ieee, *ppTS, 0);
list_add_tail(&((*ppTS)->List), pAddmitList);
return true;
} else {
RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR!!in function "
"%s() There is not enough dir=%d"
"(0=up down=1) TS record to be "
"used!!", __func__, Dir);
return false;
}
}
}
}
static void RemoveTsEntry(struct rtllib_device *ieee, struct ts_common_info *pTs,
enum tr_select TxRxSelect)
{
del_timer_sync(&pTs->SetupTimer);
del_timer_sync(&pTs->InactTimer);
TsInitDelBA(ieee, pTs, TxRxSelect);
if (TxRxSelect == RX_DIR) {
struct rx_reorder_entry *pRxReorderEntry;
struct rx_ts_record *pRxTS = (struct rx_ts_record *)pTs;
if (timer_pending(&pRxTS->RxPktPendingTimer))
del_timer_sync(&pRxTS->RxPktPendingTimer);
while (!list_empty(&pRxTS->RxPendingPktList)) {
pRxReorderEntry = (struct rx_reorder_entry *)
list_entry(pRxTS->RxPendingPktList.prev,
struct rx_reorder_entry, List);
RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Delete SeqNum "
"%d!\n", __func__,
pRxReorderEntry->SeqNum);
list_del_init(&pRxReorderEntry->List);
{
int i = 0;
struct rtllib_rxb *prxb = pRxReorderEntry->prxb;
if (unlikely(!prxb))
return;
for (i = 0; i < prxb->nr_subframes; i++)
dev_kfree_skb(prxb->subframes[i]);
kfree(prxb);
prxb = NULL;
}
list_add_tail(&pRxReorderEntry->List,
&ieee->RxReorder_Unused_List);
}
} else {
struct tx_ts_record *pTxTS = (struct tx_ts_record *)pTs;
del_timer_sync(&pTxTS->TsAddBaTimer);
}
}
void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr)
{
struct ts_common_info *pTS, *pTmpTS;
printk(KERN_INFO "===========>RemovePeerTS, %pM\n", Addr);
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
printk(KERN_INFO "====>remove Tx_TS_admin_list\n");
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
if (memcmp(pTS->Addr, Addr, 6) == 0) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
}
EXPORT_SYMBOL(RemovePeerTS);
void RemoveAllTS(struct rtllib_device *ieee)
{
struct ts_common_info *pTS, *pTmpTS;
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Pending_List, List) {
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Tx_TS_Admit_List, List) {
RemoveTsEntry(ieee, pTS, TX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Tx_TS_Unused_List);
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Pending_List, List) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
list_for_each_entry_safe(pTS, pTmpTS, &ieee->Rx_TS_Admit_List, List) {
RemoveTsEntry(ieee, pTS, RX_DIR);
list_del_init(&pTS->List);
list_add_tail(&pTS->List, &ieee->Rx_TS_Unused_List);
}
}
void TsStartAddBaProcess(struct rtllib_device *ieee, struct tx_ts_record *pTxTS)
{
if (pTxTS->bAddBaReqInProgress == false) {
pTxTS->bAddBaReqInProgress = true;
if (pTxTS->bAddBaReqDelayed) {
RTLLIB_DEBUG(RTLLIB_DL_BA, "TsStartAddBaProcess(): "
"Delayed Start ADDBA after 60 sec!!\n");
mod_timer(&pTxTS->TsAddBaTimer, jiffies +
MSECS(TS_ADDBA_DELAY));
} else {
RTLLIB_DEBUG(RTLLIB_DL_BA, "TsStartAddBaProcess(): "
"Immediately Start ADDBA now!!\n");
mod_timer(&pTxTS->TsAddBaTimer, jiffies+10);
}
} else
RTLLIB_DEBUG(RTLLIB_DL_BA, "%s()==>BA timer is already added\n",
__func__);
}
| gpl-2.0 |
javelinanddart/android_kernel_caf_ville | net/sctp/ulpqueue.c | 8013 | 28422 | /* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* This abstraction carries sctp events to the ULP (sockets).
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/sctp/structs.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *);
static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *);
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
/* 1st Level Abstractions */
/* Initialize a ULP queue from a block of memory. */
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
struct sctp_association *asoc)
{
memset(ulpq, 0, sizeof(struct sctp_ulpq));
ulpq->asoc = asoc;
skb_queue_head_init(&ulpq->reasm);
skb_queue_head_init(&ulpq->lobby);
ulpq->pd_mode = 0;
ulpq->malloced = 0;
return ulpq;
}
/* Flush the reassembly and ordering queues. */
void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
{
struct sk_buff *skb;
struct sctp_ulpevent *event;
while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
}
/* Dispose of a ulpqueue. */
void sctp_ulpq_free(struct sctp_ulpq *ulpq)
{
sctp_ulpq_flush(ulpq);
if (ulpq->malloced)
kfree(ulpq);
}
/* Process an incoming DATA chunk. */
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sk_buff_head temp;
struct sctp_ulpevent *event;
/* Create an event from the incoming chunk. */
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
if (!event)
return -ENOMEM;
/* Do reassembly if needed. */
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */
if ((event) && (event->msg_flags & MSG_EOR)){
/* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
if (event)
sctp_ulpq_tail_event(ulpq, event);
return 0;
}
/* Add a new event for propagation to the ULP. */
/* Clear the partial delivery mode for this socket. Note: This
* assumes that no association is currently in partial delivery mode.
*/
int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
{
struct sctp_sock *sp = sctp_sk(sk);
if (atomic_dec_and_test(&sp->pd_mode)) {
/* This means there are no other associations in PD, so
* we can go ahead and clear out the lobby in one shot
*/
if (!skb_queue_empty(&sp->pd_lobby)) {
struct list_head *list;
sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
INIT_LIST_HEAD(list);
return 1;
}
} else {
/* There are other associations in PD, so we only need to
* pull stuff out of the lobby that belongs to the
* associations that is exiting PD (all of its notifications
* are posted here).
*/
if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == asoc) {
__skb_unlink(skb, &sp->pd_lobby);
__skb_queue_tail(&sk->sk_receive_queue,
skb);
}
}
}
}
return 0;
}
/* Set the pd_mode on the socket and ulpq */
static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
{
struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
atomic_inc(&sp->pd_mode);
ulpq->pd_mode = 1;
}
/* Clear the pd_mode and restart any pending messages waiting for delivery. */
static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
{
ulpq->pd_mode = 0;
sctp_ulpq_reasm_drain(ulpq);
return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
}
/* If the SKB of 'event' is on a list, it is the first such member
* of that list.
*/
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sock *sk = ulpq->asoc->base.sk;
struct sk_buff_head *queue, *skb_list;
struct sk_buff *skb = sctp_event2skb(event);
int clear_pd = 0;
skb_list = (struct sk_buff_head *) skb->prev;
/* If the socket is just going to throw this away, do not
* even try to deliver it.
*/
if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
goto out_free;
/* Check if the user wishes to receive this event. */
if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
goto out_free;
/* If we are in partial delivery mode, post to the lobby until
* partial delivery is cleared, unless, of course _this_ is
* the association the cause of the partial delivery.
*/
if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
queue = &sk->sk_receive_queue;
} else {
if (ulpq->pd_mode) {
/* If the association is in partial delivery, we
* need to finish delivering the partially processed
* packet before passing any other data. This is
* because we don't truly support stream interleaving.
*/
if ((event->msg_flags & MSG_NOTIFICATION) ||
(SCTP_DATA_NOT_FRAG ==
(event->msg_flags & SCTP_DATA_FRAG_MASK)))
queue = &sctp_sk(sk)->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
queue = &sk->sk_receive_queue;
}
} else {
/*
* If fragment interleave is enabled, we
* can queue this to the receive queue instead
* of the lobby.
*/
if (sctp_sk(sk)->frag_interleave)
queue = &sk->sk_receive_queue;
else
queue = &sctp_sk(sk)->pd_lobby;
}
}
/* If we are harvesting multiple skbs they will be
* collected on a list.
*/
if (skb_list)
sctp_skb_list_tail(skb_list, queue);
else
__skb_queue_tail(queue, skb);
/* Did we just complete partial delivery and need to get
* rolling again? Move pending data to the receive
* queue.
*/
if (clear_pd)
sctp_ulpq_clear_pd(ulpq);
if (queue == &sk->sk_receive_queue)
sk->sk_data_ready(sk, 0);
return 1;
out_free:
if (skb_list)
sctp_queue_purge_ulpevents(skb_list);
else
sctp_ulpevent_free(event);
return 0;
}
/* 2nd Level Abstractions */
/* Helper function to store chunks that need to be reassembled. */
static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *pos;
struct sctp_ulpevent *cevent;
__u32 tsn, ctsn;
tsn = event->tsn;
/* See if it belongs at the end. */
pos = skb_peek_tail(&ulpq->reasm);
if (!pos) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
return;
}
/* Short circuit just dropping it at the end. */
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
if (TSN_lt(ctsn, tsn)) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
return;
}
/* Find the right place in this list. We store them by TSN. */
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
if (TSN_lt(tsn, ctsn))
break;
}
/* Insert before pos. */
__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
}
/* Helper function to return an event corresponding to the reassembled
* datagram.
* This routine creates a re-assembled skb given the first and last skb's
* as stored in the reassembly queue. The skb's may be non-linear if the sctp
* payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist.
*/
static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
{
struct sk_buff *pos;
struct sk_buff *new = NULL;
struct sctp_ulpevent *event;
struct sk_buff *pnext, *last;
struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
/* Store the pointer to the 2nd skb */
if (f_frag == l_frag)
pos = NULL;
else
pos = f_frag->next;
/* Get the last skb in the f_frag's frag_list if present. */
for (last = list; list; last = list, list = list->next);
/* Add the list of remaining fragments to the first fragments
* frag_list.
*/
if (last)
last->next = pos;
else {
if (skb_cloned(f_frag)) {
/* This is a cloned skb, we can't just modify
* the frag_list. We need a new skb to do that.
* Instead of calling skb_unshare(), we'll do it
* ourselves since we need to delay the free.
*/
new = skb_copy(f_frag, GFP_ATOMIC);
if (!new)
return NULL; /* try again later */
sctp_skb_set_owner_r(new, f_frag->sk);
skb_shinfo(new)->frag_list = pos;
} else
skb_shinfo(f_frag)->frag_list = pos;
}
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, queue);
/* if we did unshare, then free the old skb and re-assign */
if (new) {
kfree_skb(f_frag);
f_frag = new;
}
while (pos) {
pnext = pos->next;
/* Update the len and data_len fields of the first fragment. */
f_frag->len += pos->len;
f_frag->data_len += pos->len;
/* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, queue);
/* Break if we have reached the last fragment. */
if (pos == l_frag)
break;
pos->next = pnext;
pos = pnext;
}
event = sctp_skb2event(f_frag);
SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
return event;
}
/* Helper function to check if an incoming chunk has filled up the last
* missing fragment in a SCTP datagram and return the corresponding event.
*/
static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos;
struct sctp_ulpevent *cevent;
struct sk_buff *first_frag = NULL;
__u32 ctsn, next_tsn;
struct sctp_ulpevent *retval = NULL;
struct sk_buff *pd_first = NULL;
struct sk_buff *pd_last = NULL;
size_t pd_len = 0;
struct sctp_association *asoc;
u32 pd_point;
/* Initialized to 0 just to avoid compiler warning message. Will
* never be used with this value. It is referenced only after it
* is set when we find the first fragment of a message.
*/
next_tsn = 0;
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for a sequence of
* fragmented chunks that complete a datagram.
* 'first_frag' and next_tsn are reset when we find a chunk which
* is the first fragment of a datagram. Once these 2 fields are set
* we expect to find the remaining middle fragments and the last
* fragment in order. If not, first_frag is reset to NULL and we
* start the next pass when we find another first fragment.
*
* There is a potential to do partial delivery if user sets
* SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
* to see if can do PD.
*/
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
/* If this "FIRST_FRAG" is the first
* element in the queue, then count it towards
* possible PD.
*/
if (pos == ulpq->reasm.next) {
pd_first = pos;
pd_last = pos;
pd_len = pos->len;
} else {
pd_first = NULL;
pd_last = NULL;
pd_len = 0;
}
first_frag = pos;
next_tsn = ctsn + 1;
break;
case SCTP_DATA_MIDDLE_FRAG:
if ((first_frag) && (ctsn == next_tsn)) {
next_tsn++;
if (pd_first) {
pd_last = pos;
pd_len += pos->len;
}
} else
first_frag = NULL;
break;
case SCTP_DATA_LAST_FRAG:
if (first_frag && (ctsn == next_tsn))
goto found;
else
first_frag = NULL;
break;
}
}
asoc = ulpq->asoc;
if (pd_first) {
/* Make sure we can enter partial deliver.
* We can trigger partial delivery only if framgent
* interleave is set, or the socket is not already
* in partial delivery.
*/
if (!sctp_sk(asoc->base.sk)->frag_interleave &&
atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
goto done;
cevent = sctp_skb2event(pd_first);
pd_point = sctp_sk(asoc->base.sk)->pd_point;
if (pd_point && pd_point <= pd_len) {
retval = sctp_make_reassembled_event(&ulpq->reasm,
pd_first,
pd_last);
if (retval)
sctp_ulpq_set_pd(ulpq);
}
}
done:
return retval;
found:
retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
if (retval)
retval->msg_flags |= MSG_EOR;
goto done;
}
/* Retrieve the next set of fragments of a partial message. */
static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *last_frag, *first_frag;
struct sctp_ulpevent *cevent;
__u32 ctsn, next_tsn;
int is_last;
struct sctp_ulpevent *retval;
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for the first
* sequence of fragmented chunks.
*/
if (skb_queue_empty(&ulpq->reasm))
return NULL;
last_frag = first_frag = NULL;
retval = NULL;
next_tsn = 0;
is_last = 0;
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag) {
first_frag = pos;
next_tsn = ctsn + 1;
last_frag = pos;
} else if (next_tsn == ctsn)
next_tsn++;
else
goto done;
break;
case SCTP_DATA_LAST_FRAG:
if (!first_frag)
first_frag = pos;
else if (ctsn != next_tsn)
goto done;
last_frag = pos;
is_last = 1;
goto done;
default:
return NULL;
}
}
/* We have the reassembled event. There is no need to look
* further.
*/
done:
retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
if (retval && is_last)
retval->msg_flags |= MSG_EOR;
return retval;
}
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling.
*/
static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *retval = NULL;
/* Check if this is part of a fragmented message. */
if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
event->msg_flags |= MSG_EOR;
return event;
}
sctp_ulpq_store_reasm(ulpq, event);
if (!ulpq->pd_mode)
retval = sctp_ulpq_retrieve_reassembled(ulpq);
else {
__u32 ctsn, ctsnap;
/* Do not even bother unless this is the next tsn to
* be delivered.
*/
ctsn = event->tsn;
ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
if (TSN_lte(ctsn, ctsnap))
retval = sctp_ulpq_retrieve_partial(ulpq);
}
return retval;
}
/* Retrieve the first part (sequential fragments) for partial delivery. */
static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *last_frag, *first_frag;
struct sctp_ulpevent *cevent;
__u32 ctsn, next_tsn;
struct sctp_ulpevent *retval;
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for a sequence of
* fragmented chunks that start a datagram.
*/
if (skb_queue_empty(&ulpq->reasm))
return NULL;
last_frag = first_frag = NULL;
retval = NULL;
next_tsn = 0;
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
if (!first_frag) {
first_frag = pos;
next_tsn = ctsn + 1;
last_frag = pos;
} else
goto done;
break;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag)
return NULL;
if (ctsn == next_tsn) {
next_tsn++;
last_frag = pos;
} else
goto done;
break;
default:
return NULL;
}
}
/* We have the reassembled event. There is no need to look
* further.
*/
done:
retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
return retval;
}
/*
* Flush out stale fragments from the reassembly queue when processing
* a Forward TSN.
*
* RFC 3758, Section 3.6
*
* After receiving and processing a FORWARD TSN, the data receiver MUST
* take cautions in updating its re-assembly queue. The receiver MUST
* remove any partially reassembled message, which is still missing one
* or more TSNs earlier than or equal to the new cumulative TSN point.
* In the event that the receiver has invoked the partial delivery API,
* a notification SHOULD also be generated to inform the upper layer API
* that the message being partially delivered will NOT be completed.
*/
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *event;
__u32 tsn;
if (skb_queue_empty(&ulpq->reasm))
return;
skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
event = sctp_skb2event(pos);
tsn = event->tsn;
/* Since the entire message must be abandoned by the
* sender (item A3 in Section 3.5, RFC 3758), we can
* free all fragments on the list that are less then
* or equal to ctsn_point
*/
if (TSN_lte(tsn, fwd_tsn)) {
__skb_unlink(pos, &ulpq->reasm);
sctp_ulpevent_free(event);
} else
break;
}
}
/*
* Drain the reassembly queue. If we just cleared parted delivery, it
* is possible that the reassembly queue will contain already reassembled
* messages. Retrieve any such messages and give them to the user.
*/
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
{
struct sctp_ulpevent *event = NULL;
struct sk_buff_head temp;
if (skb_queue_empty(&ulpq->reasm))
return;
while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
/* Do ordering if needed. */
if ((event) && (event->msg_flags & MSG_EOR)){
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. 'event' is the
* sctp_ulpevent for very first SKB on the temp' list.
*/
if (event)
sctp_ulpq_tail_event(ulpq, event);
}
}
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff_head *event_list;
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
struct sctp_stream *in;
__u16 sid, csid, cssn;
sid = event->stream;
in = &ulpq->asoc->ssnmap->in;
event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
/* We are holding the chunks by stream, by SSN. */
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
/* Have we gone too far? */
if (csid > sid)
break;
/* Have we not gone far enough? */
if (csid < sid)
continue;
if (cssn != sctp_ssn_peek(in, sid))
break;
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, sid);
__skb_unlink(pos, &ulpq->lobby);
/* Attach all gathered skbs to the event. */
__skb_queue_tail(event_list, pos);
}
}
/* Helper function to store chunks needing ordering. */
static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *pos;
struct sctp_ulpevent *cevent;
__u16 sid, csid;
__u16 ssn, cssn;
pos = skb_peek_tail(&ulpq->lobby);
if (!pos) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
return;
}
sid = event->stream;
ssn = event->ssn;
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
if (sid > csid) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
return;
}
if ((sid == csid) && SSN_lt(cssn, ssn)) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
return;
}
/* Find the right place in this list. We store them by
* stream ID and then by SSN.
*/
skb_queue_walk(&ulpq->lobby, pos) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
if (csid > sid)
break;
if (csid == sid && SSN_lt(ssn, cssn))
break;
}
/* Insert before pos. */
__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
}
static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
/* Check if this message needs ordering. */
if (SCTP_DATA_UNORDERED & event->msg_flags)
return event;
/* Note: The stream ID must be verified before this routine. */
sid = event->stream;
ssn = event->ssn;
in = &ulpq->asoc->ssnmap->in;
/* Is this the expected SSN for this stream ID? */
if (ssn != sctp_ssn_peek(in, sid)) {
/* We've received something out of order, so find where it
* needs to be placed. We order by stream and then by SSN.
*/
sctp_ulpq_store_ordered(ulpq, event);
return NULL;
}
/* Mark that the next chunk has been found. */
sctp_ssn_next(in, sid);
/* Go find any other chunks that were waiting for
* ordering.
*/
sctp_ulpq_retrieve_ordered(ulpq, event);
return event;
}
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
struct sctp_ulpevent *event;
struct sctp_stream *in;
struct sk_buff_head temp;
struct sk_buff_head *lobby = &ulpq->lobby;
__u16 csid, cssn;
in = &ulpq->asoc->ssnmap->in;
/* We are holding the chunks by stream, by SSN. */
skb_queue_head_init(&temp);
event = NULL;
sctp_skb_for_each(pos, lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
/* Have we gone too far? */
if (csid > sid)
break;
/* Have we not gone far enough? */
if (csid < sid)
continue;
/* see if this ssn has been marked by skipping */
if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
break;
__skb_unlink(pos, lobby);
if (!event)
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
/* Attach all gathered skbs to the event. */
__skb_queue_tail(&temp, pos);
}
/* If we didn't reap any data, see if the next expected SSN
* is next on the queue and if so, use that.
*/
if (event == NULL && pos != (struct sk_buff *)lobby) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
sctp_ssn_next(in, csid);
__skb_unlink(pos, lobby);
__skb_queue_tail(&temp, pos);
event = sctp_skb2event(pos);
}
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
if (event) {
/* see if we have more ordered that we can deliver */
sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, event);
}
}
/* Skip over an SSN. This is used during the processing of
* Forwared TSN chunk to skip over the abandoned ordered data
*/
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
struct sctp_stream *in;
/* Note: The stream ID must be verified before this routine. */
in = &ulpq->asoc->ssnmap->in;
/* Is this an old SSN? If so ignore. */
if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
return;
/* Mark that we are no longer expecting this SSN or lower. */
sctp_ssn_skip(in, sid, ssn);
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
sctp_ulpq_reap_ordered(ulpq, sid);
}
static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
struct sk_buff_head *list, __u16 needed)
{
__u16 freed = 0;
__u32 tsn;
struct sk_buff *skb;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
while ((skb = __skb_dequeue_tail(list)) != NULL) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->tsn;
sctp_ulpevent_free(event);
sctp_tsnmap_renege(tsnmap, tsn);
if (freed >= needed)
return freed;
}
return freed;
}
/* Renege 'needed' bytes from the ordering queue. */
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
{
return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
}
/* Renege 'needed' bytes from the reassembly queue. */
static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
{
return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
}
/* Partial deliver the first message as there is pressure on rwnd. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_association *asoc;
struct sctp_sock *sp;
asoc = ulpq->asoc;
sp = sctp_sk(asoc->base.sk);
/* If the association is already in Partial Delivery mode
* we have noting to do.
*/
if (ulpq->pd_mode)
return;
/* If the user enabled fragment interleave socket option,
* multiple associations can enter partial delivery.
* Otherwise, we can only enter partial delivery if the
* socket is not in partial deliver mode.
*/
if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
/* Is partial delivery possible? */
event = sctp_ulpq_retrieve_first(ulpq);
/* Send event to the ULP. */
if (event) {
sctp_ulpq_tail_event(ulpq, event);
sctp_ulpq_set_pd(ulpq);
return;
}
}
}
/* Renege some packets to make room for an incoming chunk. */
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sctp_association *asoc;
__u16 needed, freed;
asoc = ulpq->asoc;
if (chunk) {
needed = ntohs(chunk->chunk_hdr->length);
needed -= sizeof(sctp_data_chunk_t);
} else
needed = SCTP_DEFAULT_MAXWINDOW;
freed = 0;
if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed);
if (freed < needed) {
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
}
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
__u32 tsn;
tsn = ntohl(chunk->subh.data_hdr->tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
sctp_ulpq_tail_data(ulpq, chunk, gfp);
sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
}
sk_mem_reclaim(asoc->base.sk);
}
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/
void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_ulpevent *ev = NULL;
struct sock *sk;
if (!ulpq->pd_mode)
return;
sk = ulpq->asoc->base.sk;
if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
&sctp_sk(sk)->subscribe))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
SCTP_PARTIAL_DELIVERY_ABORTED,
gfp);
if (ev)
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
/* If there is data waiting, send it up the socket now. */
if (sctp_ulpq_clear_pd(ulpq) || ev)
sk->sk_data_ready(sk, 0);
}
| gpl-2.0 |
mtb3000gt/Deathly_Kernel_D2 | net/sctp/ulpqueue.c | 8013 | 28422 | /* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Nokia, Inc.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* This abstraction carries sctp events to the ULP (sockets).
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/sctp/structs.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Forward declarations for internal helpers. */
static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *);
static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
struct sctp_ulpevent *);
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
/* 1st Level Abstractions */
/* Initialize a ULP queue from a block of memory. */
struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
struct sctp_association *asoc)
{
memset(ulpq, 0, sizeof(struct sctp_ulpq));
ulpq->asoc = asoc;
skb_queue_head_init(&ulpq->reasm);
skb_queue_head_init(&ulpq->lobby);
ulpq->pd_mode = 0;
ulpq->malloced = 0;
return ulpq;
}
/* Flush the reassembly and ordering queues. */
void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
{
struct sk_buff *skb;
struct sctp_ulpevent *event;
while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
event = sctp_skb2event(skb);
sctp_ulpevent_free(event);
}
}
/* Dispose of a ulpqueue. */
void sctp_ulpq_free(struct sctp_ulpq *ulpq)
{
sctp_ulpq_flush(ulpq);
if (ulpq->malloced)
kfree(ulpq);
}
/* Process an incoming DATA chunk. */
int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sk_buff_head temp;
struct sctp_ulpevent *event;
/* Create an event from the incoming chunk. */
event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
if (!event)
return -ENOMEM;
/* Do reassembly if needed. */
event = sctp_ulpq_reasm(ulpq, event);
/* Do ordering if needed. */
if ((event) && (event->msg_flags & MSG_EOR)){
/* Create a temporary list to collect chunks on. */
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
if (event)
sctp_ulpq_tail_event(ulpq, event);
return 0;
}
/* Add a new event for propagation to the ULP. */
/* Clear the partial delivery mode for this socket. Note: This
* assumes that no association is currently in partial delivery mode.
*/
int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
{
struct sctp_sock *sp = sctp_sk(sk);
if (atomic_dec_and_test(&sp->pd_mode)) {
/* This means there are no other associations in PD, so
* we can go ahead and clear out the lobby in one shot
*/
if (!skb_queue_empty(&sp->pd_lobby)) {
struct list_head *list;
sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
INIT_LIST_HEAD(list);
return 1;
}
} else {
/* There are other associations in PD, so we only need to
* pull stuff out of the lobby that belongs to the
* associations that is exiting PD (all of its notifications
* are posted here).
*/
if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
struct sk_buff *skb, *tmp;
struct sctp_ulpevent *event;
sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == asoc) {
__skb_unlink(skb, &sp->pd_lobby);
__skb_queue_tail(&sk->sk_receive_queue,
skb);
}
}
}
}
return 0;
}
/* Set the pd_mode on the socket and ulpq */
static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
{
struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
atomic_inc(&sp->pd_mode);
ulpq->pd_mode = 1;
}
/* Clear the pd_mode and restart any pending messages waiting for delivery. */
static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
{
ulpq->pd_mode = 0;
sctp_ulpq_reasm_drain(ulpq);
return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
}
/* If the SKB of 'event' is on a list, it is the first such member
* of that list.
*/
int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
{
struct sock *sk = ulpq->asoc->base.sk;
struct sk_buff_head *queue, *skb_list;
struct sk_buff *skb = sctp_event2skb(event);
int clear_pd = 0;
skb_list = (struct sk_buff_head *) skb->prev;
/* If the socket is just going to throw this away, do not
* even try to deliver it.
*/
if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
goto out_free;
/* Check if the user wishes to receive this event. */
if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
goto out_free;
/* If we are in partial delivery mode, post to the lobby until
* partial delivery is cleared, unless, of course _this_ is
* the association the cause of the partial delivery.
*/
if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
queue = &sk->sk_receive_queue;
} else {
if (ulpq->pd_mode) {
/* If the association is in partial delivery, we
* need to finish delivering the partially processed
* packet before passing any other data. This is
* because we don't truly support stream interleaving.
*/
if ((event->msg_flags & MSG_NOTIFICATION) ||
(SCTP_DATA_NOT_FRAG ==
(event->msg_flags & SCTP_DATA_FRAG_MASK)))
queue = &sctp_sk(sk)->pd_lobby;
else {
clear_pd = event->msg_flags & MSG_EOR;
queue = &sk->sk_receive_queue;
}
} else {
/*
* If fragment interleave is enabled, we
* can queue this to the receive queue instead
* of the lobby.
*/
if (sctp_sk(sk)->frag_interleave)
queue = &sk->sk_receive_queue;
else
queue = &sctp_sk(sk)->pd_lobby;
}
}
/* If we are harvesting multiple skbs they will be
* collected on a list.
*/
if (skb_list)
sctp_skb_list_tail(skb_list, queue);
else
__skb_queue_tail(queue, skb);
/* Did we just complete partial delivery and need to get
* rolling again? Move pending data to the receive
* queue.
*/
if (clear_pd)
sctp_ulpq_clear_pd(ulpq);
if (queue == &sk->sk_receive_queue)
sk->sk_data_ready(sk, 0);
return 1;
out_free:
if (skb_list)
sctp_queue_purge_ulpevents(skb_list);
else
sctp_ulpevent_free(event);
return 0;
}
/* 2nd Level Abstractions */
/* Helper function to store chunks that need to be reassembled. */
static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *pos;
struct sctp_ulpevent *cevent;
__u32 tsn, ctsn;
tsn = event->tsn;
/* See if it belongs at the end. */
pos = skb_peek_tail(&ulpq->reasm);
if (!pos) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
return;
}
/* Short circuit just dropping it at the end. */
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
if (TSN_lt(ctsn, tsn)) {
__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
return;
}
/* Find the right place in this list. We store them by TSN. */
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
if (TSN_lt(tsn, ctsn))
break;
}
/* Insert before pos. */
__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
}
/* Helper function to return an event corresponding to the reassembled
* datagram.
* This routine creates a re-assembled skb given the first and last skb's
* as stored in the reassembly queue. The skb's may be non-linear if the sctp
* payload was fragmented on the way and ip had to reassemble them.
* We add the rest of skb's to the first skb's fraglist.
*/
static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
{
struct sk_buff *pos;
struct sk_buff *new = NULL;
struct sctp_ulpevent *event;
struct sk_buff *pnext, *last;
struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
/* Store the pointer to the 2nd skb */
if (f_frag == l_frag)
pos = NULL;
else
pos = f_frag->next;
/* Get the last skb in the f_frag's frag_list if present. */
for (last = list; list; last = list, list = list->next);
/* Add the list of remaining fragments to the first fragments
* frag_list.
*/
if (last)
last->next = pos;
else {
if (skb_cloned(f_frag)) {
/* This is a cloned skb, we can't just modify
* the frag_list. We need a new skb to do that.
* Instead of calling skb_unshare(), we'll do it
* ourselves since we need to delay the free.
*/
new = skb_copy(f_frag, GFP_ATOMIC);
if (!new)
return NULL; /* try again later */
sctp_skb_set_owner_r(new, f_frag->sk);
skb_shinfo(new)->frag_list = pos;
} else
skb_shinfo(f_frag)->frag_list = pos;
}
/* Remove the first fragment from the reassembly queue. */
__skb_unlink(f_frag, queue);
/* if we did unshare, then free the old skb and re-assign */
if (new) {
kfree_skb(f_frag);
f_frag = new;
}
while (pos) {
pnext = pos->next;
/* Update the len and data_len fields of the first fragment. */
f_frag->len += pos->len;
f_frag->data_len += pos->len;
/* Remove the fragment from the reassembly queue. */
__skb_unlink(pos, queue);
/* Break if we have reached the last fragment. */
if (pos == l_frag)
break;
pos->next = pnext;
pos = pnext;
}
event = sctp_skb2event(f_frag);
SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
return event;
}
/* Helper function to check if an incoming chunk has filled up the last
* missing fragment in a SCTP datagram and return the corresponding event.
*/
static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos;
struct sctp_ulpevent *cevent;
struct sk_buff *first_frag = NULL;
__u32 ctsn, next_tsn;
struct sctp_ulpevent *retval = NULL;
struct sk_buff *pd_first = NULL;
struct sk_buff *pd_last = NULL;
size_t pd_len = 0;
struct sctp_association *asoc;
u32 pd_point;
/* Initialized to 0 just to avoid compiler warning message. Will
* never be used with this value. It is referenced only after it
* is set when we find the first fragment of a message.
*/
next_tsn = 0;
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for a sequence of
* fragmented chunks that complete a datagram.
* 'first_frag' and next_tsn are reset when we find a chunk which
* is the first fragment of a datagram. Once these 2 fields are set
* we expect to find the remaining middle fragments and the last
* fragment in order. If not, first_frag is reset to NULL and we
* start the next pass when we find another first fragment.
*
* There is a potential to do partial delivery if user sets
* SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
* to see if can do PD.
*/
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
/* If this "FIRST_FRAG" is the first
* element in the queue, then count it towards
* possible PD.
*/
if (pos == ulpq->reasm.next) {
pd_first = pos;
pd_last = pos;
pd_len = pos->len;
} else {
pd_first = NULL;
pd_last = NULL;
pd_len = 0;
}
first_frag = pos;
next_tsn = ctsn + 1;
break;
case SCTP_DATA_MIDDLE_FRAG:
if ((first_frag) && (ctsn == next_tsn)) {
next_tsn++;
if (pd_first) {
pd_last = pos;
pd_len += pos->len;
}
} else
first_frag = NULL;
break;
case SCTP_DATA_LAST_FRAG:
if (first_frag && (ctsn == next_tsn))
goto found;
else
first_frag = NULL;
break;
}
}
asoc = ulpq->asoc;
if (pd_first) {
/* Make sure we can enter partial deliver.
* We can trigger partial delivery only if framgent
* interleave is set, or the socket is not already
* in partial delivery.
*/
if (!sctp_sk(asoc->base.sk)->frag_interleave &&
atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
goto done;
cevent = sctp_skb2event(pd_first);
pd_point = sctp_sk(asoc->base.sk)->pd_point;
if (pd_point && pd_point <= pd_len) {
retval = sctp_make_reassembled_event(&ulpq->reasm,
pd_first,
pd_last);
if (retval)
sctp_ulpq_set_pd(ulpq);
}
}
done:
return retval;
found:
retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
if (retval)
retval->msg_flags |= MSG_EOR;
goto done;
}
/* Retrieve the next set of fragments of a partial message. */
static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *last_frag, *first_frag;
struct sctp_ulpevent *cevent;
__u32 ctsn, next_tsn;
int is_last;
struct sctp_ulpevent *retval;
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for the first
* sequence of fragmented chunks.
*/
if (skb_queue_empty(&ulpq->reasm))
return NULL;
last_frag = first_frag = NULL;
retval = NULL;
next_tsn = 0;
is_last = 0;
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag) {
first_frag = pos;
next_tsn = ctsn + 1;
last_frag = pos;
} else if (next_tsn == ctsn)
next_tsn++;
else
goto done;
break;
case SCTP_DATA_LAST_FRAG:
if (!first_frag)
first_frag = pos;
else if (ctsn != next_tsn)
goto done;
last_frag = pos;
is_last = 1;
goto done;
default:
return NULL;
}
}
/* We have the reassembled event. There is no need to look
* further.
*/
done:
retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
if (retval && is_last)
retval->msg_flags |= MSG_EOR;
return retval;
}
/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
* need reassembling.
*/
static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *retval = NULL;
/* Check if this is part of a fragmented message. */
if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
event->msg_flags |= MSG_EOR;
return event;
}
sctp_ulpq_store_reasm(ulpq, event);
if (!ulpq->pd_mode)
retval = sctp_ulpq_retrieve_reassembled(ulpq);
else {
__u32 ctsn, ctsnap;
/* Do not even bother unless this is the next tsn to
* be delivered.
*/
ctsn = event->tsn;
ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
if (TSN_lte(ctsn, ctsnap))
retval = sctp_ulpq_retrieve_partial(ulpq);
}
return retval;
}
/* Retrieve the first part (sequential fragments) for partial delivery. */
static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
{
struct sk_buff *pos, *last_frag, *first_frag;
struct sctp_ulpevent *cevent;
__u32 ctsn, next_tsn;
struct sctp_ulpevent *retval;
/* The chunks are held in the reasm queue sorted by TSN.
* Walk through the queue sequentially and look for a sequence of
* fragmented chunks that start a datagram.
*/
if (skb_queue_empty(&ulpq->reasm))
return NULL;
last_frag = first_frag = NULL;
retval = NULL;
next_tsn = 0;
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
ctsn = cevent->tsn;
switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
case SCTP_DATA_FIRST_FRAG:
if (!first_frag) {
first_frag = pos;
next_tsn = ctsn + 1;
last_frag = pos;
} else
goto done;
break;
case SCTP_DATA_MIDDLE_FRAG:
if (!first_frag)
return NULL;
if (ctsn == next_tsn) {
next_tsn++;
last_frag = pos;
} else
goto done;
break;
default:
return NULL;
}
}
/* We have the reassembled event. There is no need to look
* further.
*/
done:
retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
return retval;
}
/*
* Flush out stale fragments from the reassembly queue when processing
* a Forward TSN.
*
* RFC 3758, Section 3.6
*
* After receiving and processing a FORWARD TSN, the data receiver MUST
* take cautions in updating its re-assembly queue. The receiver MUST
* remove any partially reassembled message, which is still missing one
* or more TSNs earlier than or equal to the new cumulative TSN point.
* In the event that the receiver has invoked the partial delivery API,
* a notification SHOULD also be generated to inform the upper layer API
* that the message being partially delivered will NOT be completed.
*/
void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *event;
__u32 tsn;
if (skb_queue_empty(&ulpq->reasm))
return;
skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
event = sctp_skb2event(pos);
tsn = event->tsn;
/* Since the entire message must be abandoned by the
* sender (item A3 in Section 3.5, RFC 3758), we can
* free all fragments on the list that are less then
* or equal to ctsn_point
*/
if (TSN_lte(tsn, fwd_tsn)) {
__skb_unlink(pos, &ulpq->reasm);
sctp_ulpevent_free(event);
} else
break;
}
}
/*
* Drain the reassembly queue. If we just cleared parted delivery, it
* is possible that the reassembly queue will contain already reassembled
* messages. Retrieve any such messages and give them to the user.
*/
static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
{
struct sctp_ulpevent *event = NULL;
struct sk_buff_head temp;
if (skb_queue_empty(&ulpq->reasm))
return;
while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
/* Do ordering if needed. */
if ((event) && (event->msg_flags & MSG_EOR)){
skb_queue_head_init(&temp);
__skb_queue_tail(&temp, sctp_event2skb(event));
event = sctp_ulpq_order(ulpq, event);
}
/* Send event to the ULP. 'event' is the
* sctp_ulpevent for very first SKB on the temp' list.
*/
if (event)
sctp_ulpq_tail_event(ulpq, event);
}
}
/* Helper function to gather skbs that have possibly become
* ordered by an an incoming chunk.
*/
static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff_head *event_list;
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
struct sctp_stream *in;
__u16 sid, csid, cssn;
sid = event->stream;
in = &ulpq->asoc->ssnmap->in;
event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
/* We are holding the chunks by stream, by SSN. */
sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
/* Have we gone too far? */
if (csid > sid)
break;
/* Have we not gone far enough? */
if (csid < sid)
continue;
if (cssn != sctp_ssn_peek(in, sid))
break;
/* Found it, so mark in the ssnmap. */
sctp_ssn_next(in, sid);
__skb_unlink(pos, &ulpq->lobby);
/* Attach all gathered skbs to the event. */
__skb_queue_tail(event_list, pos);
}
}
/* Helper function to store chunks needing ordering. */
static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sk_buff *pos;
struct sctp_ulpevent *cevent;
__u16 sid, csid;
__u16 ssn, cssn;
pos = skb_peek_tail(&ulpq->lobby);
if (!pos) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
return;
}
sid = event->stream;
ssn = event->ssn;
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
if (sid > csid) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
return;
}
if ((sid == csid) && SSN_lt(cssn, ssn)) {
__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
return;
}
/* Find the right place in this list. We store them by
* stream ID and then by SSN.
*/
skb_queue_walk(&ulpq->lobby, pos) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
if (csid > sid)
break;
if (csid == sid && SSN_lt(ssn, cssn))
break;
}
/* Insert before pos. */
__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
}
static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
__u16 sid, ssn;
struct sctp_stream *in;
/* Check if this message needs ordering. */
if (SCTP_DATA_UNORDERED & event->msg_flags)
return event;
/* Note: The stream ID must be verified before this routine. */
sid = event->stream;
ssn = event->ssn;
in = &ulpq->asoc->ssnmap->in;
/* Is this the expected SSN for this stream ID? */
if (ssn != sctp_ssn_peek(in, sid)) {
/* We've received something out of order, so find where it
* needs to be placed. We order by stream and then by SSN.
*/
sctp_ulpq_store_ordered(ulpq, event);
return NULL;
}
/* Mark that the next chunk has been found. */
sctp_ssn_next(in, sid);
/* Go find any other chunks that were waiting for
* ordering.
*/
sctp_ulpq_retrieve_ordered(ulpq, event);
return event;
}
/* Helper function to gather skbs that have possibly become
* ordered by forward tsn skipping their dependencies.
*/
static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
{
struct sk_buff *pos, *tmp;
struct sctp_ulpevent *cevent;
struct sctp_ulpevent *event;
struct sctp_stream *in;
struct sk_buff_head temp;
struct sk_buff_head *lobby = &ulpq->lobby;
__u16 csid, cssn;
in = &ulpq->asoc->ssnmap->in;
/* We are holding the chunks by stream, by SSN. */
skb_queue_head_init(&temp);
event = NULL;
sctp_skb_for_each(pos, lobby, tmp) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
/* Have we gone too far? */
if (csid > sid)
break;
/* Have we not gone far enough? */
if (csid < sid)
continue;
/* see if this ssn has been marked by skipping */
if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
break;
__skb_unlink(pos, lobby);
if (!event)
/* Create a temporary list to collect chunks on. */
event = sctp_skb2event(pos);
/* Attach all gathered skbs to the event. */
__skb_queue_tail(&temp, pos);
}
/* If we didn't reap any data, see if the next expected SSN
* is next on the queue and if so, use that.
*/
if (event == NULL && pos != (struct sk_buff *)lobby) {
cevent = (struct sctp_ulpevent *) pos->cb;
csid = cevent->stream;
cssn = cevent->ssn;
if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
sctp_ssn_next(in, csid);
__skb_unlink(pos, lobby);
__skb_queue_tail(&temp, pos);
event = sctp_skb2event(pos);
}
}
/* Send event to the ULP. 'event' is the sctp_ulpevent for
* very first SKB on the 'temp' list.
*/
if (event) {
/* see if we have more ordered that we can deliver */
sctp_ulpq_retrieve_ordered(ulpq, event);
sctp_ulpq_tail_event(ulpq, event);
}
}
/* Skip over an SSN. This is used during the processing of
* Forwared TSN chunk to skip over the abandoned ordered data
*/
void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
{
struct sctp_stream *in;
/* Note: The stream ID must be verified before this routine. */
in = &ulpq->asoc->ssnmap->in;
/* Is this an old SSN? If so ignore. */
if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
return;
/* Mark that we are no longer expecting this SSN or lower. */
sctp_ssn_skip(in, sid, ssn);
/* Go find any other chunks that were waiting for
* ordering and deliver them if needed.
*/
sctp_ulpq_reap_ordered(ulpq, sid);
}
static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
struct sk_buff_head *list, __u16 needed)
{
__u16 freed = 0;
__u32 tsn;
struct sk_buff *skb;
struct sctp_ulpevent *event;
struct sctp_tsnmap *tsnmap;
tsnmap = &ulpq->asoc->peer.tsn_map;
while ((skb = __skb_dequeue_tail(list)) != NULL) {
freed += skb_headlen(skb);
event = sctp_skb2event(skb);
tsn = event->tsn;
sctp_ulpevent_free(event);
sctp_tsnmap_renege(tsnmap, tsn);
if (freed >= needed)
return freed;
}
return freed;
}
/* Renege 'needed' bytes from the ordering queue. */
static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
{
return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
}
/* Renege 'needed' bytes from the reassembly queue. */
static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
{
return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
}
/* Partial deliver the first message as there is pressure on rwnd. */
void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sctp_ulpevent *event;
struct sctp_association *asoc;
struct sctp_sock *sp;
asoc = ulpq->asoc;
sp = sctp_sk(asoc->base.sk);
/* If the association is already in Partial Delivery mode
* we have noting to do.
*/
if (ulpq->pd_mode)
return;
/* If the user enabled fragment interleave socket option,
* multiple associations can enter partial delivery.
* Otherwise, we can only enter partial delivery if the
* socket is not in partial deliver mode.
*/
if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
/* Is partial delivery possible? */
event = sctp_ulpq_retrieve_first(ulpq);
/* Send event to the ULP. */
if (event) {
sctp_ulpq_tail_event(ulpq, event);
sctp_ulpq_set_pd(ulpq);
return;
}
}
}
/* Renege some packets to make room for an incoming chunk. */
void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
gfp_t gfp)
{
struct sctp_association *asoc;
__u16 needed, freed;
asoc = ulpq->asoc;
if (chunk) {
needed = ntohs(chunk->chunk_hdr->length);
needed -= sizeof(sctp_data_chunk_t);
} else
needed = SCTP_DEFAULT_MAXWINDOW;
freed = 0;
if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
freed = sctp_ulpq_renege_order(ulpq, needed);
if (freed < needed) {
freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
}
}
/* If able to free enough room, accept this chunk. */
if (chunk && (freed >= needed)) {
__u32 tsn;
tsn = ntohl(chunk->subh.data_hdr->tsn);
sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
sctp_ulpq_tail_data(ulpq, chunk, gfp);
sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
}
sk_mem_reclaim(asoc->base.sk);
}
/* Notify the application if an association is aborted and in
* partial delivery mode. Send up any pending received messages.
*/
void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_ulpevent *ev = NULL;
struct sock *sk;
if (!ulpq->pd_mode)
return;
sk = ulpq->asoc->base.sk;
if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
&sctp_sk(sk)->subscribe))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
SCTP_PARTIAL_DELIVERY_ABORTED,
gfp);
if (ev)
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
/* If there is data waiting, send it up the socket now. */
if (sctp_ulpq_clear_pd(ulpq) || ev)
sk->sk_data_ready(sk, 0);
}
| gpl-2.0 |
XMelancholy/android_kernel_sony_u8500 | arch/score/kernel/traps.c | 9293 | 9101 | /*
* arch/score/kernel/traps.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/cacheflush.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
unsigned long exception_handlers[32];
/*
* The architecture-independent show_stack generator
*/
void show_stack(struct task_struct *task, unsigned long *sp)
{
int i;
long stackdata;
sp = sp ? sp : (unsigned long *)&sp;
printk(KERN_NOTICE "Stack: ");
i = 1;
while ((long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % 8) == 0))
printk(KERN_NOTICE "\n");
if (i > 40) {
printk(KERN_NOTICE " ...");
break;
}
if (__get_user(stackdata, sp++)) {
printk(KERN_NOTICE " (Bad stack address)");
break;
}
printk(KERN_NOTICE " %08lx", stackdata);
i++;
}
printk(KERN_NOTICE "\n");
}
static void show_trace(long *sp)
{
int i;
long addr;
sp = sp ? sp : (long *) &sp;
printk(KERN_NOTICE "Call Trace: ");
i = 1;
while ((long) sp & (PAGE_SIZE - 1)) {
if (__get_user(addr, sp++)) {
if (i && ((i % 6) == 0))
printk(KERN_NOTICE "\n");
printk(KERN_NOTICE " (Bad stack address)\n");
break;
}
if (kernel_text_address(addr)) {
if (i && ((i % 6) == 0))
printk(KERN_NOTICE "\n");
if (i > 40) {
printk(KERN_NOTICE " ...");
break;
}
printk(KERN_NOTICE " [<%08lx>]", addr);
i++;
}
}
printk(KERN_NOTICE "\n");
}
static void show_code(unsigned int *pc)
{
long i;
printk(KERN_NOTICE "\nCode:");
for (i = -3; i < 6; i++) {
unsigned long insn;
if (__get_user(insn, pc + i)) {
printk(KERN_NOTICE " (Bad address in epc)\n");
break;
}
printk(KERN_NOTICE "%c%08lx%c", (i ? ' ' : '<'),
insn, (i ? ' ' : '>'));
}
}
/*
* FIXME: really the generic show_regs should take a const pointer argument.
*/
void show_regs(struct pt_regs *regs)
{
printk("r0 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3],
regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
printk("r8 : %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11],
regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]);
printk("r16: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19],
regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]);
printk("r24: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
regs->regs[24], regs->regs[25], regs->regs[26], regs->regs[27],
regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]);
printk("CEH : %08lx\n", regs->ceh);
printk("CEL : %08lx\n", regs->cel);
printk("EMA:%08lx, epc:%08lx %s\nPSR: %08lx\nECR:%08lx\nCondition : %08lx\n",
regs->cp0_ema, regs->cp0_epc, print_tainted(), regs->cp0_psr,
regs->cp0_ecr, regs->cp0_condition);
}
static void show_registers(struct pt_regs *regs)
{
show_regs(regs);
printk(KERN_NOTICE "Process %s (pid: %d, stackpage=%08lx)\n",
current->comm, current->pid, (unsigned long) current);
show_stack(current_thread_info()->task, (long *) regs->regs[0]);
show_trace((long *) regs->regs[0]);
show_code((unsigned int *) regs->cp0_epc);
printk(KERN_NOTICE "\n");
}
/*
* The architecture-independent dump_stack generator
*/
void dump_stack(void)
{
show_stack(current_thread_info()->task,
(long *) get_irq_regs()->regs[0]);
}
EXPORT_SYMBOL(dump_stack);
void __die(const char *str, struct pt_regs *regs, const char *file,
const char *func, unsigned long line)
{
console_verbose();
printk("%s", str);
if (file && func)
printk(" in %s:%s, line %ld", file, func, line);
printk(":\n");
show_registers(regs);
do_exit(SIGSEGV);
}
void __die_if_kernel(const char *str, struct pt_regs *regs,
const char *file, const char *func, unsigned long line)
{
if (!user_mode(regs))
__die(str, regs, file, func, line);
}
asmlinkage void do_adelinsn(struct pt_regs *regs)
{
printk("do_ADE-linsn:ema:0x%08lx:epc:0x%08lx\n",
regs->cp0_ema, regs->cp0_epc);
die_if_kernel("do_ade execution Exception\n", regs);
force_sig(SIGBUS, current);
}
asmlinkage void do_adedata(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->cp0_epc);
if (fixup) {
regs->cp0_epc = fixup->fixup;
return;
}
printk("do_ADE-data:ema:0x%08lx:epc:0x%08lx\n",
regs->cp0_ema, regs->cp0_epc);
die_if_kernel("do_ade execution Exception\n", regs);
force_sig(SIGBUS, current);
}
asmlinkage void do_pel(struct pt_regs *regs)
{
die_if_kernel("do_pel execution Exception", regs);
force_sig(SIGFPE, current);
}
asmlinkage void do_cee(struct pt_regs *regs)
{
die_if_kernel("do_cee execution Exception", regs);
force_sig(SIGFPE, current);
}
asmlinkage void do_cpe(struct pt_regs *regs)
{
die_if_kernel("do_cpe execution Exception", regs);
force_sig(SIGFPE, current);
}
asmlinkage void do_be(struct pt_regs *regs)
{
die_if_kernel("do_be execution Exception", regs);
force_sig(SIGBUS, current);
}
asmlinkage void do_ov(struct pt_regs *regs)
{
siginfo_t info;
die_if_kernel("do_ov execution Exception", regs);
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void *)regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
}
asmlinkage void do_tr(struct pt_regs *regs)
{
die_if_kernel("do_tr execution Exception", regs);
force_sig(SIGTRAP, current);
}
asmlinkage void do_ri(struct pt_regs *regs)
{
unsigned long epc_insn;
unsigned long epc = regs->cp0_epc;
read_tsk_long(current, epc, &epc_insn);
if (current->thread.single_step == 1) {
if ((epc == current->thread.addr1) ||
(epc == current->thread.addr2)) {
user_disable_single_step(current);
force_sig(SIGTRAP, current);
return;
} else
BUG();
} else if ((epc_insn == BREAKPOINT32_INSN) ||
((epc_insn & 0x0000FFFF) == 0x7002) ||
((epc_insn & 0xFFFF0000) == 0x70020000)) {
force_sig(SIGTRAP, current);
return;
} else {
die_if_kernel("do_ri execution Exception", regs);
force_sig(SIGILL, current);
}
}
asmlinkage void do_ccu(struct pt_regs *regs)
{
die_if_kernel("do_ccu execution Exception", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
* Game over - no way to handle this if it ever occurs. Most probably
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
die_if_kernel("do_reserved execution Exception", regs);
show_regs(regs);
panic("Caught reserved exception - should not happen.");
}
/*
* NMI exception handler.
*/
void nmi_exception_handler(struct pt_regs *regs)
{
die_if_kernel("nmi_exception_handler execution Exception", regs);
die("NMI", regs);
}
/* Install CPU exception handler */
void *set_except_vector(int n, void *addr)
{
unsigned long handler = (unsigned long) addr;
unsigned long old_handler = exception_handlers[n];
exception_handlers[n] = handler;
return (void *)old_handler;
}
void __init trap_init(void)
{
int i;
pgd_current = (unsigned long)init_mm.pgd;
/* DEBUG EXCEPTION */
memcpy((void *)DEBUG_VECTOR_BASE_ADDR,
&debug_exception_vector, DEBUG_VECTOR_SIZE);
/* NMI EXCEPTION */
memcpy((void *)GENERAL_VECTOR_BASE_ADDR,
&general_exception_vector, GENERAL_VECTOR_SIZE);
/*
* Initialise exception handlers
*/
for (i = 0; i <= 31; i++)
set_except_vector(i, handle_reserved);
set_except_vector(1, handle_nmi);
set_except_vector(2, handle_adelinsn);
set_except_vector(3, handle_tlb_refill);
set_except_vector(4, handle_tlb_invaild);
set_except_vector(5, handle_ibe);
set_except_vector(6, handle_pel);
set_except_vector(7, handle_sys);
set_except_vector(8, handle_ccu);
set_except_vector(9, handle_ri);
set_except_vector(10, handle_tr);
set_except_vector(11, handle_adedata);
set_except_vector(12, handle_adedata);
set_except_vector(13, handle_tlb_refill);
set_except_vector(14, handle_tlb_invaild);
set_except_vector(15, handle_mod);
set_except_vector(16, handle_cee);
set_except_vector(17, handle_cpe);
set_except_vector(18, handle_dbe);
flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR);
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
cpu_cache_init();
}
| gpl-2.0 |
Shmarkus/android_kernel_rockchip_rk292x | fs/squashfs/xz_wrapper.c | 10061 | 4211 | /*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* xz_wrapper.c
*/
#include <linux/mutex.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/xz.h>
#include <linux/bitops.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
struct squashfs_xz {
struct xz_dec *state;
struct xz_buf buf;
};
struct comp_opts {
__le32 dictionary_size;
__le32 flags;
};
static void *squashfs_xz_init(struct squashfs_sb_info *msblk, void *buff,
int len)
{
struct comp_opts *comp_opts = buff;
struct squashfs_xz *stream;
int dict_size = msblk->block_size;
int err, n;
if (comp_opts) {
/* check compressor options are the expected length */
if (len < sizeof(*comp_opts)) {
err = -EIO;
goto failed;
}
dict_size = le32_to_cpu(comp_opts->dictionary_size);
/* the dictionary size should be 2^n or 2^n+2^(n+1) */
n = ffs(dict_size) - 1;
if (dict_size != (1 << n) && dict_size != (1 << n) +
(1 << (n + 1))) {
err = -EIO;
goto failed;
}
}
dict_size = max_t(int, dict_size, SQUASHFS_METADATA_SIZE);
stream = kmalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL) {
err = -ENOMEM;
goto failed;
}
stream->state = xz_dec_init(XZ_PREALLOC, dict_size);
if (stream->state == NULL) {
kfree(stream);
err = -ENOMEM;
goto failed;
}
return stream;
failed:
ERROR("Failed to initialise xz decompressor\n");
return ERR_PTR(err);
}
static void squashfs_xz_free(void *strm)
{
struct squashfs_xz *stream = strm;
if (stream) {
xz_dec_end(stream->state);
kfree(stream);
}
}
static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer,
struct buffer_head **bh, int b, int offset, int length, int srclength,
int pages)
{
enum xz_ret xz_err;
int avail, total = 0, k = 0, page = 0;
struct squashfs_xz *stream = msblk->stream;
mutex_lock(&msblk->read_data_mutex);
xz_dec_reset(stream->state);
stream->buf.in_pos = 0;
stream->buf.in_size = 0;
stream->buf.out_pos = 0;
stream->buf.out_size = PAGE_CACHE_SIZE;
stream->buf.out = buffer[page++];
do {
if (stream->buf.in_pos == stream->buf.in_size && k < b) {
avail = min(length, msblk->devblksize - offset);
length -= avail;
wait_on_buffer(bh[k]);
if (!buffer_uptodate(bh[k]))
goto release_mutex;
stream->buf.in = bh[k]->b_data + offset;
stream->buf.in_size = avail;
stream->buf.in_pos = 0;
offset = 0;
}
if (stream->buf.out_pos == stream->buf.out_size
&& page < pages) {
stream->buf.out = buffer[page++];
stream->buf.out_pos = 0;
total += PAGE_CACHE_SIZE;
}
xz_err = xz_dec_run(stream->state, &stream->buf);
if (stream->buf.in_pos == stream->buf.in_size && k < b)
put_bh(bh[k++]);
} while (xz_err == XZ_OK);
if (xz_err != XZ_STREAM_END) {
ERROR("xz_dec_run error, data probably corrupt\n");
goto release_mutex;
}
if (k < b) {
ERROR("xz_uncompress error, input remaining\n");
goto release_mutex;
}
total += stream->buf.out_pos;
mutex_unlock(&msblk->read_data_mutex);
return total;
release_mutex:
mutex_unlock(&msblk->read_data_mutex);
for (; k < b; k++)
put_bh(bh[k]);
return -EIO;
}
const struct squashfs_decompressor squashfs_xz_comp_ops = {
.init = squashfs_xz_init,
.free = squashfs_xz_free,
.decompress = squashfs_xz_uncompress,
.id = XZ_COMPRESSION,
.name = "xz",
.supported = 1
};
| gpl-2.0 |
googyanas/Googy-Max4-Kernel | sound/isa/galaxy/azt2316.c | 13133 | 3660 | /*
* Aztech AZT2316 Driver
* Copyright (C) 2007,2010 Rene Herman
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#define AZT2316
#define CRD_NAME "Aztech AZT2316"
#define DRV_NAME "AZT2316"
#define DEV_NAME "azt2316"
#define GALAXY_DSP_MAJOR 3
#define GALAXY_DSP_MINOR 1
#define GALAXY_CONFIG_SIZE 4
/*
* 32-bit config register
*/
#define GALAXY_CONFIG_SBA_220 (0 << 0)
#define GALAXY_CONFIG_SBA_240 (1 << 0)
#define GALAXY_CONFIG_SBA_260 (2 << 0)
#define GALAXY_CONFIG_SBA_280 (3 << 0)
#define GALAXY_CONFIG_SBA_MASK GALAXY_CONFIG_SBA_280
#define GALAXY_CONFIG_SBIRQ_2 (1 << 2)
#define GALAXY_CONFIG_SBIRQ_5 (1 << 3)
#define GALAXY_CONFIG_SBIRQ_7 (1 << 4)
#define GALAXY_CONFIG_SBIRQ_10 (1 << 5)
#define GALAXY_CONFIG_SBDMA_DISABLE (0 << 6)
#define GALAXY_CONFIG_SBDMA_0 (1 << 6)
#define GALAXY_CONFIG_SBDMA_1 (2 << 6)
#define GALAXY_CONFIG_SBDMA_3 (3 << 6)
#define GALAXY_CONFIG_WSSA_530 (0 << 8)
#define GALAXY_CONFIG_WSSA_604 (1 << 8)
#define GALAXY_CONFIG_WSSA_E80 (2 << 8)
#define GALAXY_CONFIG_WSSA_F40 (3 << 8)
#define GALAXY_CONFIG_WSS_ENABLE (1 << 10)
#define GALAXY_CONFIG_GAME_ENABLE (1 << 11)
#define GALAXY_CONFIG_MPUA_300 (0 << 12)
#define GALAXY_CONFIG_MPUA_330 (1 << 12)
#define GALAXY_CONFIG_MPU_ENABLE (1 << 13)
#define GALAXY_CONFIG_CDA_310 (0 << 14)
#define GALAXY_CONFIG_CDA_320 (1 << 14)
#define GALAXY_CONFIG_CDA_340 (2 << 14)
#define GALAXY_CONFIG_CDA_350 (3 << 14)
#define GALAXY_CONFIG_CDA_MASK GALAXY_CONFIG_CDA_350
#define GALAXY_CONFIG_CD_DISABLE (0 << 16)
#define GALAXY_CONFIG_CD_PANASONIC (1 << 16)
#define GALAXY_CONFIG_CD_SONY (2 << 16)
#define GALAXY_CONFIG_CD_MITSUMI (3 << 16)
#define GALAXY_CONFIG_CD_AZTECH (4 << 16)
#define GALAXY_CONFIG_CD_UNUSED_5 (5 << 16)
#define GALAXY_CONFIG_CD_UNUSED_6 (6 << 16)
#define GALAXY_CONFIG_CD_UNUSED_7 (7 << 16)
#define GALAXY_CONFIG_CD_MASK GALAXY_CONFIG_CD_UNUSED_7
#define GALAXY_CONFIG_CDDMA8_DISABLE (0 << 20)
#define GALAXY_CONFIG_CDDMA8_0 (1 << 20)
#define GALAXY_CONFIG_CDDMA8_1 (2 << 20)
#define GALAXY_CONFIG_CDDMA8_3 (3 << 20)
#define GALAXY_CONFIG_CDDMA8_MASK GALAXY_CONFIG_CDDMA8_3
#define GALAXY_CONFIG_CDDMA16_DISABLE (0 << 22)
#define GALAXY_CONFIG_CDDMA16_5 (1 << 22)
#define GALAXY_CONFIG_CDDMA16_6 (2 << 22)
#define GALAXY_CONFIG_CDDMA16_7 (3 << 22)
#define GALAXY_CONFIG_CDDMA16_MASK GALAXY_CONFIG_CDDMA16_7
#define GALAXY_CONFIG_MPUIRQ_2 (1 << 24)
#define GALAXY_CONFIG_MPUIRQ_5 (1 << 25)
#define GALAXY_CONFIG_MPUIRQ_7 (1 << 26)
#define GALAXY_CONFIG_MPUIRQ_10 (1 << 27)
#define GALAXY_CONFIG_CDIRQ_5 (1 << 28)
#define GALAXY_CONFIG_CDIRQ_11 (1 << 29)
#define GALAXY_CONFIG_CDIRQ_12 (1 << 30)
#define GALAXY_CONFIG_CDIRQ_15 (1 << 31)
#define GALAXY_CONFIG_CDIRQ_MASK (\
GALAXY_CONFIG_CDIRQ_5 | GALAXY_CONFIG_CDIRQ_11 |\
GALAXY_CONFIG_CDIRQ_12 | GALAXY_CONFIG_CDIRQ_15)
#define GALAXY_CONFIG_MASK (\
GALAXY_CONFIG_SBA_MASK | GALAXY_CONFIG_CDA_MASK |\
GALAXY_CONFIG_CD_MASK | GALAXY_CONFIG_CDDMA16_MASK |\
GALAXY_CONFIG_CDDMA8_MASK | GALAXY_CONFIG_CDIRQ_MASK)
#include "galaxy.c"
| gpl-2.0 |
Vegaviet-DevTeam/Kernel_N4_N910SLK | sound/isa/galaxy/azt1605.c | 13133 | 2807 | /*
* Aztech AZT1605 Driver
* Copyright (C) 2007,2010 Rene Herman
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#define AZT1605
#define CRD_NAME "Aztech AZT1605"
#define DRV_NAME "AZT1605"
#define DEV_NAME "azt1605"
#define GALAXY_DSP_MAJOR 2
#define GALAXY_DSP_MINOR 1
#define GALAXY_CONFIG_SIZE 3
/*
* 24-bit config register
*/
#define GALAXY_CONFIG_SBA_220 (0 << 0)
#define GALAXY_CONFIG_SBA_240 (1 << 0)
#define GALAXY_CONFIG_SBA_260 (2 << 0)
#define GALAXY_CONFIG_SBA_280 (3 << 0)
#define GALAXY_CONFIG_SBA_MASK GALAXY_CONFIG_SBA_280
#define GALAXY_CONFIG_MPUA_300 (0 << 2)
#define GALAXY_CONFIG_MPUA_330 (1 << 2)
#define GALAXY_CONFIG_MPU_ENABLE (1 << 3)
#define GALAXY_CONFIG_GAME_ENABLE (1 << 4)
#define GALAXY_CONFIG_CD_PANASONIC (1 << 5)
#define GALAXY_CONFIG_CD_MITSUMI (1 << 6)
#define GALAXY_CONFIG_CD_MASK (\
GALAXY_CONFIG_CD_PANASONIC | GALAXY_CONFIG_CD_MITSUMI)
#define GALAXY_CONFIG_UNUSED (1 << 7)
#define GALAXY_CONFIG_UNUSED_MASK GALAXY_CONFIG_UNUSED
#define GALAXY_CONFIG_SBIRQ_2 (1 << 8)
#define GALAXY_CONFIG_SBIRQ_3 (1 << 9)
#define GALAXY_CONFIG_SBIRQ_5 (1 << 10)
#define GALAXY_CONFIG_SBIRQ_7 (1 << 11)
#define GALAXY_CONFIG_MPUIRQ_2 (1 << 12)
#define GALAXY_CONFIG_MPUIRQ_3 (1 << 13)
#define GALAXY_CONFIG_MPUIRQ_5 (1 << 14)
#define GALAXY_CONFIG_MPUIRQ_7 (1 << 15)
#define GALAXY_CONFIG_WSSA_530 (0 << 16)
#define GALAXY_CONFIG_WSSA_604 (1 << 16)
#define GALAXY_CONFIG_WSSA_E80 (2 << 16)
#define GALAXY_CONFIG_WSSA_F40 (3 << 16)
#define GALAXY_CONFIG_WSS_ENABLE (1 << 18)
#define GALAXY_CONFIG_CDIRQ_11 (1 << 19)
#define GALAXY_CONFIG_CDIRQ_12 (1 << 20)
#define GALAXY_CONFIG_CDIRQ_15 (1 << 21)
#define GALAXY_CONFIG_CDIRQ_MASK (\
GALAXY_CONFIG_CDIRQ_11 | GALAXY_CONFIG_CDIRQ_12 |\
GALAXY_CONFIG_CDIRQ_15)
#define GALAXY_CONFIG_CDDMA_DISABLE (0 << 22)
#define GALAXY_CONFIG_CDDMA_0 (1 << 22)
#define GALAXY_CONFIG_CDDMA_1 (2 << 22)
#define GALAXY_CONFIG_CDDMA_3 (3 << 22)
#define GALAXY_CONFIG_CDDMA_MASK GALAXY_CONFIG_CDDMA_3
#define GALAXY_CONFIG_MASK (\
GALAXY_CONFIG_SBA_MASK | GALAXY_CONFIG_CD_MASK |\
GALAXY_CONFIG_UNUSED_MASK | GALAXY_CONFIG_CDIRQ_MASK |\
GALAXY_CONFIG_CDDMA_MASK)
#include "galaxy.c"
| gpl-2.0 |
SlimRoms/kernel_samsung_smdk4412 | drivers/video/atafb_iplan2p2.c | 14925 | 6818 | /*
* linux/drivers/video/iplan2p2.c -- Low level frame buffer operations for
* interleaved bitplanes à la Atari (2
* planes, 2 bytes interleave)
*
* Created 5 Apr 1997 by Geert Uytterhoeven
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <asm/setup.h>
#include "atafb.h"
#define BPL 2
#include "atafb_utils.h"
void atafb_iplan2p2_copyarea(struct fb_info *info, u_long next_line,
int sy, int sx, int dy, int dx,
int height, int width)
{
/* bmove() has to distinguish two major cases: If both, source and
* destination, start at even addresses or both are at odd
* addresses, just the first odd and last even column (if present)
* require special treatment (memmove_col()). The rest between
* then can be copied by normal operations, because all adjacent
* bytes are affected and are to be stored in the same order.
* The pathological case is when the move should go from an odd
* address to an even or vice versa. Since the bytes in the plane
* words must be assembled in new order, it seems wisest to make
* all movements by memmove_col().
*/
u8 *src, *dst;
u32 *s, *d;
int w, l , i, j;
u_int colsize;
u_int upwards = (dy < sy) || (dy == sy && dx < sx);
colsize = height;
if (!((sx ^ dx) & 15)) {
/* odd->odd or even->even */
if (upwards) {
src = (u8 *)info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL);
dst = (u8 *)info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL);
if (sx & 15) {
memmove32_col(dst, src, 0xff00ff, height, next_line - BPL * 2);
src += BPL * 2;
dst += BPL * 2;
width -= 8;
}
w = width >> 4;
if (w) {
s = (u32 *)src;
d = (u32 *)dst;
w *= BPL / 2;
l = next_line - w * 4;
for (j = height; j > 0; j--) {
for (i = w; i > 0; i--)
*d++ = *s++;
s = (u32 *)((u8 *)s + l);
d = (u32 *)((u8 *)d + l);
}
}
if (width & 15)
memmove32_col(dst + width / (8 / BPL), src + width / (8 / BPL),
0xff00ff00, height, next_line - BPL * 2);
} else {
src = (u8 *)info->screen_base + (sy - 1) * next_line + ((sx + width + 8) & ~15) / (8 / BPL);
dst = (u8 *)info->screen_base + (dy - 1) * next_line + ((dx + width + 8) & ~15) / (8 / BPL);
if ((sx + width) & 15) {
src -= BPL * 2;
dst -= BPL * 2;
memmove32_col(dst, src, 0xff00ff00, colsize, -next_line - BPL * 2);
width -= 8;
}
w = width >> 4;
if (w) {
s = (u32 *)src;
d = (u32 *)dst;
w *= BPL / 2;
l = next_line - w * 4;
for (j = height; j > 0; j--) {
for (i = w; i > 0; i--)
*--d = *--s;
s = (u32 *)((u8 *)s - l);
d = (u32 *)((u8 *)d - l);
}
}
if (sx & 15)
memmove32_col(dst - (width - 16) / (8 / BPL),
src - (width - 16) / (8 / BPL),
0xff00ff, colsize, -next_line - BPL * 2);
}
} else {
/* odd->even or even->odd */
if (upwards) {
u32 *src32, *dst32;
u32 pval[4], v, v1, mask;
int i, j, w, f;
src = (u8 *)info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL);
dst = (u8 *)info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL);
mask = 0xff00ff00;
f = 0;
w = width;
if (sx & 15) {
f = 1;
w += 8;
}
if ((sx + width) & 15)
f |= 2;
w >>= 4;
for (i = height; i; i--) {
src32 = (u32 *)src;
dst32 = (u32 *)dst;
if (f & 1) {
pval[0] = (*src32++ << 8) & mask;
} else {
pval[0] = dst32[0] & mask;
}
for (j = w; j > 0; j--) {
v = *src32++;
v1 = v & mask;
*dst32++ = pval[0] | (v1 >> 8);
pval[0] = (v ^ v1) << 8;
}
if (f & 2) {
dst32[0] = (dst32[0] & mask) | pval[0];
}
src += next_line;
dst += next_line;
}
} else {
u32 *src32, *dst32;
u32 pval[4], v, v1, mask;
int i, j, w, f;
src = (u8 *)info->screen_base + (sy - 1) * next_line + ((sx + width + 8) & ~15) / (8 / BPL);
dst = (u8 *)info->screen_base + (dy - 1) * next_line + ((dx + width + 8) & ~15) / (8 / BPL);
mask = 0xff00ff;
f = 0;
w = width;
if ((dx + width) & 15)
f = 1;
if (sx & 15) {
f |= 2;
w += 8;
}
w >>= 4;
for (i = height; i; i--) {
src32 = (u32 *)src;
dst32 = (u32 *)dst;
if (f & 1) {
pval[0] = dst32[-1] & mask;
} else {
pval[0] = (*--src32 >> 8) & mask;
}
for (j = w; j > 0; j--) {
v = *--src32;
v1 = v & mask;
*--dst32 = pval[0] | (v1 << 8);
pval[0] = (v ^ v1) >> 8;
}
if (!(f & 2)) {
dst32[-1] = (dst32[-1] & mask) | pval[0];
}
src -= next_line;
dst -= next_line;
}
}
}
}
void atafb_iplan2p2_fillrect(struct fb_info *info, u_long next_line, u32 color,
int sy, int sx, int height, int width)
{
u32 *dest;
int rows, i;
u32 cval[4];
dest = (u32 *)(info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL));
if (sx & 15) {
u8 *dest8 = (u8 *)dest + 1;
expand8_col2mask(color, cval);
for (i = height; i; i--) {
fill8_col(dest8, cval);
dest8 += next_line;
}
dest += BPL / 2;
width -= 8;
}
expand16_col2mask(color, cval);
rows = width >> 4;
if (rows) {
u32 *d = dest;
u32 off = next_line - rows * BPL * 2;
for (i = height; i; i--) {
d = fill16_col(d, rows, cval);
d = (u32 *)((long)d + off);
}
dest += rows * BPL / 2;
width &= 15;
}
if (width) {
u8 *dest8 = (u8 *)dest;
expand8_col2mask(color, cval);
for (i = height; i; i--) {
fill8_col(dest8, cval);
dest8 += next_line;
}
}
}
void atafb_iplan2p2_linefill(struct fb_info *info, u_long next_line,
int dy, int dx, u32 width,
const u8 *data, u32 bgcolor, u32 fgcolor)
{
u32 *dest;
const u16 *data16;
int rows;
u32 fgm[4], bgm[4], m;
dest = (u32 *)(info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL));
if (dx & 15) {
fill8_2col((u8 *)dest + 1, fgcolor, bgcolor, *data++);
dest += BPL / 2;
width -= 8;
}
if (width >= 16) {
data16 = (const u16 *)data;
expand16_2col2mask(fgcolor, bgcolor, fgm, bgm);
for (rows = width / 16; rows; rows--) {
u16 d = *data16++;
m = d | ((u32)d << 16);
*dest++ = (m & fgm[0]) ^ bgm[0];
}
data = (const u8 *)data16;
width &= 15;
}
if (width)
fill8_2col((u8 *)dest, fgcolor, bgcolor, *data);
}
#ifdef MODULE
MODULE_LICENSE("GPL");
int init_module(void)
{
return 0;
}
void cleanup_module(void)
{
}
#endif /* MODULE */
/*
* Visible symbols for modules
*/
EXPORT_SYMBOL(atafb_iplan2p2_copyarea);
EXPORT_SYMBOL(atafb_iplan2p2_fillrect);
EXPORT_SYMBOL(atafb_iplan2p2_linefill);
| gpl-2.0 |
HONO/CM10_Kernel | drivers/video/atafb_iplan2p8.c | 14925 | 8377 | /*
* linux/drivers/video/iplan2p8.c -- Low level frame buffer operations for
* interleaved bitplanes à la Atari (8
* planes, 2 bytes interleave)
*
* Created 5 Apr 1997 by Geert Uytterhoeven
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <asm/setup.h>
#include "atafb.h"
#define BPL 8
#include "atafb_utils.h"
/* Copies a 8 plane column from 's', height 'h', to 'd'. */
/* This expands a 8 bit color into two longs for two movepl (8 plane)
* operations.
*/
void atafb_iplan2p8_copyarea(struct fb_info *info, u_long next_line,
int sy, int sx, int dy, int dx,
int height, int width)
{
/* bmove() has to distinguish two major cases: If both, source and
* destination, start at even addresses or both are at odd
* addresses, just the first odd and last even column (if present)
* require special treatment (memmove_col()). The rest between
* then can be copied by normal operations, because all adjacent
* bytes are affected and are to be stored in the same order.
* The pathological case is when the move should go from an odd
* address to an even or vice versa. Since the bytes in the plane
* words must be assembled in new order, it seems wisest to make
* all movements by memmove_col().
*/
u8 *src, *dst;
u32 *s, *d;
int w, l , i, j;
u_int colsize;
u_int upwards = (dy < sy) || (dy == sy && dx < sx);
colsize = height;
if (!((sx ^ dx) & 15)) {
/* odd->odd or even->even */
if (upwards) {
src = (u8 *)info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL);
dst = (u8 *)info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL);
if (sx & 15) {
memmove32_col(dst, src, 0xff00ff, height, next_line - BPL * 2);
src += BPL * 2;
dst += BPL * 2;
width -= 8;
}
w = width >> 4;
if (w) {
s = (u32 *)src;
d = (u32 *)dst;
w *= BPL / 2;
l = next_line - w * 4;
for (j = height; j > 0; j--) {
for (i = w; i > 0; i--)
*d++ = *s++;
s = (u32 *)((u8 *)s + l);
d = (u32 *)((u8 *)d + l);
}
}
if (width & 15)
memmove32_col(dst + width / (8 / BPL), src + width / (8 / BPL),
0xff00ff00, height, next_line - BPL * 2);
} else {
src = (u8 *)info->screen_base + (sy - 1) * next_line + ((sx + width + 8) & ~15) / (8 / BPL);
dst = (u8 *)info->screen_base + (dy - 1) * next_line + ((dx + width + 8) & ~15) / (8 / BPL);
if ((sx + width) & 15) {
src -= BPL * 2;
dst -= BPL * 2;
memmove32_col(dst, src, 0xff00ff00, colsize, -next_line - BPL * 2);
width -= 8;
}
w = width >> 4;
if (w) {
s = (u32 *)src;
d = (u32 *)dst;
w *= BPL / 2;
l = next_line - w * 4;
for (j = height; j > 0; j--) {
for (i = w; i > 0; i--)
*--d = *--s;
s = (u32 *)((u8 *)s - l);
d = (u32 *)((u8 *)d - l);
}
}
if (sx & 15)
memmove32_col(dst - (width - 16) / (8 / BPL),
src - (width - 16) / (8 / BPL),
0xff00ff, colsize, -next_line - BPL * 2);
}
} else {
/* odd->even or even->odd */
if (upwards) {
u32 *src32, *dst32;
u32 pval[4], v, v1, mask;
int i, j, w, f;
src = (u8 *)info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL);
dst = (u8 *)info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL);
mask = 0xff00ff00;
f = 0;
w = width;
if (sx & 15) {
f = 1;
w += 8;
}
if ((sx + width) & 15)
f |= 2;
w >>= 4;
for (i = height; i; i--) {
src32 = (u32 *)src;
dst32 = (u32 *)dst;
if (f & 1) {
pval[0] = (*src32++ << 8) & mask;
pval[1] = (*src32++ << 8) & mask;
pval[2] = (*src32++ << 8) & mask;
pval[3] = (*src32++ << 8) & mask;
} else {
pval[0] = dst32[0] & mask;
pval[1] = dst32[1] & mask;
pval[2] = dst32[2] & mask;
pval[3] = dst32[3] & mask;
}
for (j = w; j > 0; j--) {
v = *src32++;
v1 = v & mask;
*dst32++ = pval[0] | (v1 >> 8);
pval[0] = (v ^ v1) << 8;
v = *src32++;
v1 = v & mask;
*dst32++ = pval[1] | (v1 >> 8);
pval[1] = (v ^ v1) << 8;
v = *src32++;
v1 = v & mask;
*dst32++ = pval[2] | (v1 >> 8);
pval[2] = (v ^ v1) << 8;
v = *src32++;
v1 = v & mask;
*dst32++ = pval[3] | (v1 >> 8);
pval[3] = (v ^ v1) << 8;
}
if (f & 2) {
dst32[0] = (dst32[0] & mask) | pval[0];
dst32[1] = (dst32[1] & mask) | pval[1];
dst32[2] = (dst32[2] & mask) | pval[2];
dst32[3] = (dst32[3] & mask) | pval[3];
}
src += next_line;
dst += next_line;
}
} else {
u32 *src32, *dst32;
u32 pval[4], v, v1, mask;
int i, j, w, f;
src = (u8 *)info->screen_base + (sy - 1) * next_line + ((sx + width + 8) & ~15) / (8 / BPL);
dst = (u8 *)info->screen_base + (dy - 1) * next_line + ((dx + width + 8) & ~15) / (8 / BPL);
mask = 0xff00ff;
f = 0;
w = width;
if ((dx + width) & 15)
f = 1;
if (sx & 15) {
f |= 2;
w += 8;
}
w >>= 4;
for (i = height; i; i--) {
src32 = (u32 *)src;
dst32 = (u32 *)dst;
if (f & 1) {
pval[0] = dst32[-1] & mask;
pval[1] = dst32[-2] & mask;
pval[2] = dst32[-3] & mask;
pval[3] = dst32[-4] & mask;
} else {
pval[0] = (*--src32 >> 8) & mask;
pval[1] = (*--src32 >> 8) & mask;
pval[2] = (*--src32 >> 8) & mask;
pval[3] = (*--src32 >> 8) & mask;
}
for (j = w; j > 0; j--) {
v = *--src32;
v1 = v & mask;
*--dst32 = pval[0] | (v1 << 8);
pval[0] = (v ^ v1) >> 8;
v = *--src32;
v1 = v & mask;
*--dst32 = pval[1] | (v1 << 8);
pval[1] = (v ^ v1) >> 8;
v = *--src32;
v1 = v & mask;
*--dst32 = pval[2] | (v1 << 8);
pval[2] = (v ^ v1) >> 8;
v = *--src32;
v1 = v & mask;
*--dst32 = pval[3] | (v1 << 8);
pval[3] = (v ^ v1) >> 8;
}
if (!(f & 2)) {
dst32[-1] = (dst32[-1] & mask) | pval[0];
dst32[-2] = (dst32[-2] & mask) | pval[1];
dst32[-3] = (dst32[-3] & mask) | pval[2];
dst32[-4] = (dst32[-4] & mask) | pval[3];
}
src -= next_line;
dst -= next_line;
}
}
}
}
void atafb_iplan2p8_fillrect(struct fb_info *info, u_long next_line, u32 color,
int sy, int sx, int height, int width)
{
u32 *dest;
int rows, i;
u32 cval[4];
dest = (u32 *)(info->screen_base + sy * next_line + (sx & ~15) / (8 / BPL));
if (sx & 15) {
u8 *dest8 = (u8 *)dest + 1;
expand8_col2mask(color, cval);
for (i = height; i; i--) {
fill8_col(dest8, cval);
dest8 += next_line;
}
dest += BPL / 2;
width -= 8;
}
expand16_col2mask(color, cval);
rows = width >> 4;
if (rows) {
u32 *d = dest;
u32 off = next_line - rows * BPL * 2;
for (i = height; i; i--) {
d = fill16_col(d, rows, cval);
d = (u32 *)((long)d + off);
}
dest += rows * BPL / 2;
width &= 15;
}
if (width) {
u8 *dest8 = (u8 *)dest;
expand8_col2mask(color, cval);
for (i = height; i; i--) {
fill8_col(dest8, cval);
dest8 += next_line;
}
}
}
void atafb_iplan2p8_linefill(struct fb_info *info, u_long next_line,
int dy, int dx, u32 width,
const u8 *data, u32 bgcolor, u32 fgcolor)
{
u32 *dest;
const u16 *data16;
int rows;
u32 fgm[4], bgm[4], m;
dest = (u32 *)(info->screen_base + dy * next_line + (dx & ~15) / (8 / BPL));
if (dx & 15) {
fill8_2col((u8 *)dest + 1, fgcolor, bgcolor, *data++);
dest += BPL / 2;
width -= 8;
}
if (width >= 16) {
data16 = (const u16 *)data;
expand16_2col2mask(fgcolor, bgcolor, fgm, bgm);
for (rows = width / 16; rows; rows--) {
u16 d = *data16++;
m = d | ((u32)d << 16);
*dest++ = (m & fgm[0]) ^ bgm[0];
*dest++ = (m & fgm[1]) ^ bgm[1];
*dest++ = (m & fgm[2]) ^ bgm[2];
*dest++ = (m & fgm[3]) ^ bgm[3];
}
data = (const u8 *)data16;
width &= 15;
}
if (width)
fill8_2col((u8 *)dest, fgcolor, bgcolor, *data);
}
#ifdef MODULE
MODULE_LICENSE("GPL");
int init_module(void)
{
return 0;
}
void cleanup_module(void)
{
}
#endif /* MODULE */
/*
* Visible symbols for modules
*/
EXPORT_SYMBOL(atafb_iplan2p8_copyarea);
EXPORT_SYMBOL(atafb_iplan2p8_fillrect);
EXPORT_SYMBOL(atafb_iplan2p8_linefill);
| gpl-2.0 |
hzpeterchen/linux-usb | arch/arm/mach-clps711x/common.c | 78 | 9397 | /*
* linux/arch/arm/mach-clps711x/core.c
*
* Core support for the CLPS711x-based machines.
*
* Copyright (C) 2001,2011 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/io.h>
#include <linux/init.h>
#include <linux/sizes.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/clk-provider.h>
#include <linux/sched_clock.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/system_misc.h>
#include <mach/hardware.h>
static struct clk *clk_pll, *clk_bus, *clk_uart, *clk_timerl, *clk_timerh,
*clk_tint, *clk_spi;
/*
* This maps the generic CLPS711x registers
*/
static struct map_desc clps711x_io_desc[] __initdata = {
{
.virtual = (unsigned long)CLPS711X_VIRT_BASE,
.pfn = __phys_to_pfn(CLPS711X_PHYS_BASE),
.length = SZ_64K,
.type = MT_DEVICE
}
};
void __init clps711x_map_io(void)
{
iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
}
static void int1_mask(struct irq_data *d)
{
u32 intmr1;
intmr1 = clps_readl(INTMR1);
intmr1 &= ~(1 << d->irq);
clps_writel(intmr1, INTMR1);
}
static void int1_eoi(struct irq_data *d)
{
switch (d->irq) {
case IRQ_CSINT: clps_writel(0, COEOI); break;
case IRQ_TC1OI: clps_writel(0, TC1EOI); break;
case IRQ_TC2OI: clps_writel(0, TC2EOI); break;
case IRQ_RTCMI: clps_writel(0, RTCEOI); break;
case IRQ_TINT: clps_writel(0, TEOI); break;
case IRQ_UMSINT: clps_writel(0, UMSEOI); break;
}
}
static void int1_unmask(struct irq_data *d)
{
u32 intmr1;
intmr1 = clps_readl(INTMR1);
intmr1 |= 1 << d->irq;
clps_writel(intmr1, INTMR1);
}
static struct irq_chip int1_chip = {
.name = "Interrupt Vector 1",
.irq_eoi = int1_eoi,
.irq_mask = int1_mask,
.irq_unmask = int1_unmask,
};
static void int2_mask(struct irq_data *d)
{
u32 intmr2;
intmr2 = clps_readl(INTMR2);
intmr2 &= ~(1 << (d->irq - 16));
clps_writel(intmr2, INTMR2);
}
static void int2_eoi(struct irq_data *d)
{
switch (d->irq) {
case IRQ_KBDINT: clps_writel(0, KBDEOI); break;
}
}
static void int2_unmask(struct irq_data *d)
{
u32 intmr2;
intmr2 = clps_readl(INTMR2);
intmr2 |= 1 << (d->irq - 16);
clps_writel(intmr2, INTMR2);
}
static struct irq_chip int2_chip = {
.name = "Interrupt Vector 2",
.irq_eoi = int2_eoi,
.irq_mask = int2_mask,
.irq_unmask = int2_unmask,
};
static void int3_mask(struct irq_data *d)
{
u32 intmr3;
intmr3 = clps_readl(INTMR3);
intmr3 &= ~(1 << (d->irq - 32));
clps_writel(intmr3, INTMR3);
}
static void int3_unmask(struct irq_data *d)
{
u32 intmr3;
intmr3 = clps_readl(INTMR3);
intmr3 |= 1 << (d->irq - 32);
clps_writel(intmr3, INTMR3);
}
static struct irq_chip int3_chip = {
.name = "Interrupt Vector 3",
.irq_mask = int3_mask,
.irq_unmask = int3_unmask,
};
static struct {
int nr;
struct irq_chip *chip;
irq_flow_handler_t handle;
} clps711x_irqdescs[] __initdata = {
{ IRQ_CSINT, &int1_chip, handle_fasteoi_irq, },
{ IRQ_EINT1, &int1_chip, handle_level_irq, },
{ IRQ_EINT2, &int1_chip, handle_level_irq, },
{ IRQ_EINT3, &int1_chip, handle_level_irq, },
{ IRQ_TC1OI, &int1_chip, handle_fasteoi_irq, },
{ IRQ_TC2OI, &int1_chip, handle_fasteoi_irq, },
{ IRQ_RTCMI, &int1_chip, handle_fasteoi_irq, },
{ IRQ_TINT, &int1_chip, handle_fasteoi_irq, },
{ IRQ_UTXINT1, &int1_chip, handle_level_irq, },
{ IRQ_URXINT1, &int1_chip, handle_level_irq, },
{ IRQ_UMSINT, &int1_chip, handle_fasteoi_irq, },
{ IRQ_SSEOTI, &int1_chip, handle_level_irq, },
{ IRQ_KBDINT, &int2_chip, handle_fasteoi_irq, },
{ IRQ_SS2RX, &int2_chip, handle_level_irq, },
{ IRQ_SS2TX, &int2_chip, handle_level_irq, },
{ IRQ_UTXINT2, &int2_chip, handle_level_irq, },
{ IRQ_URXINT2, &int2_chip, handle_level_irq, },
};
void __init clps711x_init_irq(void)
{
unsigned int i;
/* Disable interrupts */
clps_writel(0, INTMR1);
clps_writel(0, INTMR2);
clps_writel(0, INTMR3);
/* Clear down any pending interrupts */
clps_writel(0, BLEOI);
clps_writel(0, MCEOI);
clps_writel(0, COEOI);
clps_writel(0, TC1EOI);
clps_writel(0, TC2EOI);
clps_writel(0, RTCEOI);
clps_writel(0, TEOI);
clps_writel(0, UMSEOI);
clps_writel(0, KBDEOI);
clps_writel(0, SRXEOF);
clps_writel(0xffffffff, DAISR);
for (i = 0; i < ARRAY_SIZE(clps711x_irqdescs); i++) {
irq_set_chip_and_handler(clps711x_irqdescs[i].nr,
clps711x_irqdescs[i].chip,
clps711x_irqdescs[i].handle);
set_irq_flags(clps711x_irqdescs[i].nr,
IRQF_VALID | IRQF_PROBE);
}
if (IS_ENABLED(CONFIG_FIQ)) {
init_FIQ(0);
irq_set_chip_and_handler(IRQ_DAIINT, &int3_chip,
handle_bad_irq);
set_irq_flags(IRQ_DAIINT,
IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
}
}
static inline u32 fls16(u32 x)
{
u32 r = 15;
if (!(x & 0xff00)) {
x <<= 8;
r -= 8;
}
if (!(x & 0xf000)) {
x <<= 4;
r -= 4;
}
if (!(x & 0xc000)) {
x <<= 2;
r -= 2;
}
if (!(x & 0x8000))
r--;
return r;
}
asmlinkage void __exception_irq_entry clps711x_handle_irq(struct pt_regs *regs)
{
do {
u32 irqstat;
void __iomem *base = CLPS711X_VIRT_BASE;
irqstat = readw_relaxed(base + INTSR1) &
readw_relaxed(base + INTMR1);
if (irqstat)
handle_IRQ(fls16(irqstat), regs);
irqstat = readw_relaxed(base + INTSR2) &
readw_relaxed(base + INTMR2);
if (irqstat) {
handle_IRQ(fls16(irqstat) + 16, regs);
continue;
}
break;
} while (1);
}
static u32 notrace clps711x_sched_clock_read(void)
{
return ~readw_relaxed(CLPS711X_VIRT_BASE + TC1D);
}
static void clps711x_clockevent_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
disable_irq(IRQ_TC2OI);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
enable_irq(IRQ_TC2OI);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* Not supported */
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_RESUME:
/* Left event sources disabled, no more interrupts appear */
break;
}
}
static struct clock_event_device clockevent_clps711x = {
.name = "clps711x-clockevent",
.rating = 300,
.features = CLOCK_EVT_FEAT_PERIODIC,
.set_mode = clps711x_clockevent_set_mode,
};
static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
{
clockevent_clps711x.event_handler(&clockevent_clps711x);
return IRQ_HANDLED;
}
static struct irqaction clps711x_timer_irq = {
.name = "clps711x-timer",
.flags = IRQF_TIMER | IRQF_IRQPOLL,
.handler = clps711x_timer_interrupt,
};
static void add_fixed_clk(struct clk *clk, const char *name, int rate)
{
clk = clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
clk_register_clkdev(clk, name, NULL);
}
void __init clps711x_timer_init(void)
{
int osc, ext, pll, cpu, bus, timl, timh, uart, spi;
u32 tmp;
osc = 3686400;
ext = 13000000;
tmp = clps_readl(PLLR) >> 24;
if (tmp)
pll = (osc * tmp) / 2;
else
pll = 73728000; /* Default value */
tmp = clps_readl(SYSFLG2);
if (tmp & SYSFLG2_CKMODE) {
cpu = ext;
bus = cpu;
spi = 135400;
pll = 0;
} else {
cpu = pll;
if (cpu >= 36864000)
bus = cpu / 2;
else
bus = 36864000 / 2;
spi = cpu / 576;
}
uart = bus / 10;
if (tmp & SYSFLG2_CKMODE) {
tmp = clps_readl(SYSCON2);
if (tmp & SYSCON2_OSTB)
timh = ext / 26;
else
timh = 541440;
} else
timh = DIV_ROUND_CLOSEST(cpu, 144);
timl = DIV_ROUND_CLOSEST(timh, 256);
/* All clocks are fixed */
add_fixed_clk(clk_pll, "pll", pll);
add_fixed_clk(clk_bus, "bus", bus);
add_fixed_clk(clk_uart, "uart", uart);
add_fixed_clk(clk_timerl, "timer_lf", timl);
add_fixed_clk(clk_timerh, "timer_hf", timh);
add_fixed_clk(clk_tint, "tint", 64);
add_fixed_clk(clk_spi, "spi", spi);
pr_info("CPU frequency set at %i Hz.\n", cpu);
/* Start Timer1 in free running mode (Low frequency) */
tmp = clps_readl(SYSCON1) & ~(SYSCON1_TC1S | SYSCON1_TC1M);
clps_writel(tmp, SYSCON1);
setup_sched_clock(clps711x_sched_clock_read, 16, timl);
clocksource_mmio_init(CLPS711X_VIRT_BASE + TC1D,
"clps711x_clocksource", timl, 300, 16,
clocksource_mmio_readw_down);
/* Set Timer2 prescaler */
clps_writew(DIV_ROUND_CLOSEST(timh, HZ), TC2D);
/* Start Timer2 in prescale mode (High frequency)*/
tmp = clps_readl(SYSCON1) | SYSCON1_TC2M | SYSCON1_TC2S;
clps_writel(tmp, SYSCON1);
clockevents_config_and_register(&clockevent_clps711x, timh, 0, 0);
setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
}
void clps711x_restart(enum reboot_mode mode, const char *cmd)
{
soft_restart(0);
}
static void clps711x_idle(void)
{
clps_writel(1, HALT);
asm("mov r0, r0");
asm("mov r0, r0");
}
void __init clps711x_init_early(void)
{
arm_pm_idle = clps711x_idle;
}
| gpl-2.0 |
uoaerg/linux-ecn | drivers/usb/host/xhci-mem.c | 78 | 74567 | /*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/usb.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include "xhci.h"
#include "xhci-trace.h"
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
* initializes the segment to zero, and sets the private next pointer to NULL.
*
* Section 4.11.1.1:
* "All components of all Command and Transfer TRBs shall be initialized to '0'"
*/
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
unsigned int cycle_state, gfp_t flags)
{
struct xhci_segment *seg;
dma_addr_t dma;
int i;
seg = kzalloc(sizeof *seg, flags);
if (!seg)
return NULL;
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
return NULL;
}
memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
}
seg->dma = dma;
seg->next = NULL;
return seg;
}
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
if (seg->trbs) {
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
}
kfree(seg);
}
static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment *first)
{
struct xhci_segment *seg;
seg = first->next;
while (seg != first) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
}
xhci_segment_free(xhci, first);
}
/*
* Make the prev segment point to the next segment.
*
* Change the last TRB in the prev segment to be a Link TRB which points to the
* DMA address of the next segment. The caller needs to set any Link TRB
* related flags, such as End TRB, Toggle Cycle, and no snoop.
*/
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
struct xhci_segment *next, enum xhci_ring_type type)
{
u32 val;
if (!prev || !next)
return;
prev->next = next;
if (type != TYPE_EVENT) {
prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
cpu_to_le64(next->dma);
/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */
/* Set chain bit for isoc rings on AMD 0.96 host */
if (xhci_link_trb_quirk(xhci) ||
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
}
/*
* Link the ring to the new segments.
* Set Toggle Cycle for the new ring if needed.
*/
static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
struct xhci_segment *first, struct xhci_segment *last,
unsigned int num_segs)
{
struct xhci_segment *next;
if (!ring || !first || !last)
return;
next = ring->enq_seg->next;
xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
xhci_link_segments(xhci, last, next, ring->type);
ring->num_segs += num_segs;
ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
&= ~cpu_to_le32(LINK_TOGGLE);
last->trbs[TRBS_PER_SEGMENT-1].link.control
|= cpu_to_le32(LINK_TOGGLE);
ring->last_seg = last;
}
}
/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
if (!ring)
return;
if (ring->first_seg)
xhci_free_segments_for_ring(xhci, ring->first_seg);
kfree(ring);
}
static void xhci_initialize_ring_info(struct xhci_ring *ring,
unsigned int cycle_state)
{
/* The ring is empty, so the enqueue pointer == dequeue pointer */
ring->enqueue = ring->first_seg->trbs;
ring->enq_seg = ring->first_seg;
ring->dequeue = ring->enqueue;
ring->deq_seg = ring->first_seg;
/* The ring is initialized to 0. The producer must write 1 to the cycle
* bit to handover ownership of the TRB, so PCS = 1. The consumer must
* compare CCS to the cycle bit to check ownership, so CCS = 1.
*
* New rings are initialized with cycle state equal to 1; if we are
* handling ring expansion, set the cycle state equal to the old ring.
*/
ring->cycle_state = cycle_state;
/* Not necessary for new rings, but needed for re-initialized rings */
ring->enq_updates = 0;
ring->deq_updates = 0;
/*
* Each segment has a link TRB, and leave an extra TRB for SW
* accounting purpose
*/
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}
/* Allocate segments and link them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment **first, struct xhci_segment **last,
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, gfp_t flags)
{
struct xhci_segment *prev;
prev = xhci_segment_alloc(xhci, cycle_state, flags);
if (!prev)
return -ENOMEM;
num_segs--;
*first = prev;
while (num_segs > 0) {
struct xhci_segment *next;
next = xhci_segment_alloc(xhci, cycle_state, flags);
if (!next) {
prev = *first;
while (prev) {
next = prev->next;
xhci_segment_free(xhci, prev);
prev = next;
}
return -ENOMEM;
}
xhci_link_segments(xhci, prev, next, type);
prev = next;
num_segs--;
}
xhci_link_segments(xhci, prev, *first, type);
*last = prev;
return 0;
}
/**
* Create a new ring with zero or more segments.
*
* Link each segment together into a ring.
* Set the end flag and the cycle toggle bit on the last segment.
* See section 4.9.1 and figures 15 and 16.
*/
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
unsigned int num_segs, unsigned int cycle_state,
enum xhci_ring_type type, gfp_t flags)
{
struct xhci_ring *ring;
int ret;
ring = kzalloc(sizeof *(ring), flags);
if (!ring)
return NULL;
ring->num_segs = num_segs;
INIT_LIST_HEAD(&ring->td_list);
ring->type = type;
if (num_segs == 0)
return ring;
ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
&ring->last_seg, num_segs, cycle_state, type, flags);
if (ret)
goto fail;
/* Only event ring does not use link TRB */
if (type != TYPE_EVENT) {
/* See section 4.9.2.1 and 6.4.4.1 */
ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
cpu_to_le32(LINK_TOGGLE);
}
xhci_initialize_ring_info(ring, cycle_state);
return ring;
fail:
kfree(ring);
return NULL;
}
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index)
{
int rings_cached;
rings_cached = virt_dev->num_rings_cached;
if (rings_cached < XHCI_MAX_RINGS_CACHED) {
virt_dev->ring_cache[rings_cached] =
virt_dev->eps[ep_index].ring;
virt_dev->num_rings_cached++;
xhci_dbg(xhci, "Cached old ring, "
"%d ring%s cached\n",
virt_dev->num_rings_cached,
(virt_dev->num_rings_cached > 1) ? "s" : "");
} else {
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
xhci_dbg(xhci, "Ring cache full (%d rings), "
"freeing ring\n",
virt_dev->num_rings_cached);
}
virt_dev->eps[ep_index].ring = NULL;
}
/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
struct xhci_ring *ring, unsigned int cycle_state,
enum xhci_ring_type type)
{
struct xhci_segment *seg = ring->first_seg;
int i;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |=
cpu_to_le32(TRB_CYCLE);
}
/* All endpoint rings have link TRBs */
xhci_link_segments(xhci, seg, seg->next, type);
seg = seg->next;
} while (seg != ring->first_seg);
ring->type = type;
xhci_initialize_ring_info(ring, cycle_state);
/* td list should be empty since all URBs have been cancelled,
* but just in case...
*/
INIT_LIST_HEAD(&ring->td_list);
}
/*
* Expand an existing ring.
* Look for a cached ring or allocate a new ring which has same segment numbers
* and link the two rings.
*/
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags)
{
struct xhci_segment *first;
struct xhci_segment *last;
unsigned int num_segs;
unsigned int num_segs_needed;
int ret;
num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
(TRBS_PER_SEGMENT - 1);
/* Allocate number of segments we needed, or double the ring size */
num_segs = ring->num_segs > num_segs_needed ?
ring->num_segs : num_segs_needed;
ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
num_segs, ring->cycle_state, ring->type, flags);
if (ret)
return -ENOMEM;
xhci_link_rings(xhci, ring, first, last, num_segs);
xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
"ring expansion succeed, now has %d segments",
ring->num_segs);
return 0;
}
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
int type, gfp_t flags)
{
struct xhci_container_ctx *ctx;
if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
return NULL;
ctx = kzalloc(sizeof(*ctx), flags);
if (!ctx)
return NULL;
ctx->type = type;
ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
if (type == XHCI_CTX_TYPE_INPUT)
ctx->size += CTX_SIZE(xhci->hcc_params);
ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
if (!ctx->bytes) {
kfree(ctx);
return NULL;
}
memset(ctx->bytes, 0, ctx->size);
return ctx;
}
static void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (!ctx)
return;
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
kfree(ctx);
}
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (ctx->type != XHCI_CTX_TYPE_INPUT)
return NULL;
return (struct xhci_input_control_ctx *)ctx->bytes;
}
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
if (ctx->type == XHCI_CTX_TYPE_DEVICE)
return (struct xhci_slot_ctx *)ctx->bytes;
return (struct xhci_slot_ctx *)
(ctx->bytes + CTX_SIZE(xhci->hcc_params));
}
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int ep_index)
{
/* increment ep index by offset of start of ep ctx array */
ep_index++;
if (ctx->type == XHCI_CTX_TYPE_INPUT)
ep_index++;
return (struct xhci_ep_ctx *)
(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}
/***************** Streams structures manipulation *************************/
static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
dma_free_coherent(dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
stream_ctx, dma);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_free(xhci->small_streams_pool,
stream_ctx, dma);
else
return dma_pool_free(xhci->medium_streams_pool,
stream_ctx, dma);
}
/*
* The stream context array for each endpoint with bulk streams enabled can
* vary in size, based on:
* - how many streams the endpoint supports,
* - the maximum primary stream array size the host controller supports,
* - and how many streams the device driver asks for.
*
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (num_stream_ctxs > MEDIUM_STREAM_ARRAY_SIZE)
return dma_alloc_coherent(dev,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs,
dma, mem_flags);
else if (num_stream_ctxs <= SMALL_STREAM_ARRAY_SIZE)
return dma_pool_alloc(xhci->small_streams_pool,
mem_flags, dma);
else
return dma_pool_alloc(xhci->medium_streams_pool,
mem_flags, dma);
}
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address)
{
if (ep->ep_state & EP_HAS_STREAMS)
return radix_tree_lookup(&ep->stream_info->trb_address_map,
address >> TRB_SEGMENT_SHIFT);
return ep->ring;
}
struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_virt_device *dev,
unsigned int ep_index,
unsigned int stream_id)
{
struct xhci_virt_ep *ep = &dev->eps[ep_index];
if (stream_id == 0)
return ep->ring;
if (!ep->stream_info)
return NULL;
if (stream_id > ep->stream_info->num_streams)
return NULL;
return ep->stream_info->stream_rings[stream_id];
}
/*
* Change an endpoint's internal structure so it supports stream IDs. The
* number of requested streams includes stream 0, which cannot be used by device
* drivers.
*
* The number of stream contexts in the stream context array may be bigger than
* the number of streams the driver wants to use. This is because the number of
* stream context array entries must be a power of two.
*
* We need a radix tree for mapping physical addresses of TRBs to which stream
* ID they belong to. We need to do this because the host controller won't tell
* us which stream ring the TRB came from. We could store the stream ID in an
* event data TRB, but that doesn't help us for the cancellation case, since the
* endpoint may stop before it reaches that event data TRB.
*
* The radix tree maps the upper portion of the TRB DMA address to a ring
* segment that has the same upper portion of DMA addresses. For example, say I
* have segments of size 1KB, that are always 64-byte aligned. A segment may
* start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
* key to the stream ID is 0x43244. I can use the DMA address of the TRB to
* pass the radix tree a key to get the right stream ID:
*
* 0x10c90fff >> 10 = 0x43243
* 0x10c912c0 >> 10 = 0x43244
* 0x10c91400 >> 10 = 0x43245
*
* Obviously, only those TRBs with DMA addresses that are within the segment
* will make the radix tree return the stream ID for that ring.
*
* Caveats for the radix tree:
*
* The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
* unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
* 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
* key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
* PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
* extended systems (where the DMA address can be bigger than 32-bits),
* if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
*/
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
unsigned int num_streams, gfp_t mem_flags)
{
struct xhci_stream_info *stream_info;
u32 cur_stream;
struct xhci_ring *cur_ring;
unsigned long key;
u64 addr;
int ret;
xhci_dbg(xhci, "Allocating %u streams and %u "
"stream context array entries.\n",
num_streams, num_stream_ctxs);
if (xhci->cmd_ring_reserved_trbs == MAX_RSVD_CMD_TRBS) {
xhci_dbg(xhci, "Command ring has no reserved TRBs available\n");
return NULL;
}
xhci->cmd_ring_reserved_trbs++;
stream_info = kzalloc(sizeof(struct xhci_stream_info), mem_flags);
if (!stream_info)
goto cleanup_trbs;
stream_info->num_streams = num_streams;
stream_info->num_stream_ctxs = num_stream_ctxs;
/* Initialize the array of virtual pointers to stream rings. */
stream_info->stream_rings = kzalloc(
sizeof(struct xhci_ring *)*num_streams,
mem_flags);
if (!stream_info->stream_rings)
goto cleanup_info;
/* Initialize the array of DMA addresses for stream rings for the HW. */
stream_info->stream_ctx_array = xhci_alloc_stream_ctx(xhci,
num_stream_ctxs, &stream_info->ctx_array_dma,
mem_flags);
if (!stream_info->stream_ctx_array)
goto cleanup_ctx;
memset(stream_info->stream_ctx_array, 0,
sizeof(struct xhci_stream_ctx)*num_stream_ctxs);
/* Allocate everything needed to free the stream rings later */
stream_info->free_streams_command =
xhci_alloc_command(xhci, true, true, mem_flags);
if (!stream_info->free_streams_command)
goto cleanup_ctx;
INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
/* Allocate rings for all the streams that the driver will use,
* and add their segment DMA addresses to the radix tree.
* Stream 0 is reserved.
*/
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
stream_info->stream_rings[cur_stream] =
xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
cur_ring = stream_info->stream_rings[cur_stream];
if (!cur_ring)
goto cleanup_rings;
cur_ring->stream_id = cur_stream;
/* Set deq ptr, cycle bit, and stream context type */
addr = cur_ring->first_seg->dma |
SCT_FOR_CTX(SCT_PRI_TR) |
cur_ring->cycle_state;
stream_info->stream_ctx_array[cur_stream].stream_ring =
cpu_to_le64(addr);
xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
cur_stream, (unsigned long long) addr);
key = (unsigned long)
(cur_ring->first_seg->dma >> TRB_SEGMENT_SHIFT);
ret = radix_tree_insert(&stream_info->trb_address_map,
key, cur_ring);
if (ret) {
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
goto cleanup_rings;
}
}
/* Leave the other unused stream ring pointers in the stream context
* array initialized to zero. This will cause the xHC to give us an
* error if the device asks for a stream ID we don't have setup (if it
* was any other way, the host controller would assume the ring is
* "empty" and wait forever for data to be queued to that stream ID).
*/
return stream_info;
cleanup_rings:
for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
}
xhci_free_command(xhci, stream_info->free_streams_command);
cleanup_ctx:
kfree(stream_info->stream_rings);
cleanup_info:
kfree(stream_info);
cleanup_trbs:
xhci->cmd_ring_reserved_trbs--;
return NULL;
}
/*
* Sets the MaxPStreams field and the Linear Stream Array field.
* Sets the dequeue pointer to the stream context array.
*/
void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_stream_info *stream_info)
{
u32 max_primary_streams;
/* MaxPStreams is the number of stream context array entries, not the
* number we're actually using. Must be in 2^(MaxPstreams + 1) format.
* fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
*/
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
"Setting number of stream ctx array entries to %u",
1 << (max_primary_streams + 1));
ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
| EP_HAS_LSA);
ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma);
}
/*
* Sets the MaxPStreams field and the Linear Stream Array field to 0.
* Reinstalls the "normal" endpoint ring (at its previous dequeue mark,
* not at the beginning of the ring).
*/
void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep)
{
dma_addr_t addr;
ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state);
}
/* Frees all stream contexts associated with the endpoint,
*
* Caller should fix the endpoint context streams fields.
*/
void xhci_free_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info)
{
int cur_stream;
struct xhci_ring *cur_ring;
dma_addr_t addr;
if (!stream_info)
return;
for (cur_stream = 1; cur_stream < stream_info->num_streams;
cur_stream++) {
cur_ring = stream_info->stream_rings[cur_stream];
if (cur_ring) {
addr = cur_ring->first_seg->dma;
radix_tree_delete(&stream_info->trb_address_map,
addr >> TRB_SEGMENT_SHIFT);
xhci_ring_free(xhci, cur_ring);
stream_info->stream_rings[cur_stream] = NULL;
}
}
xhci_free_command(xhci, stream_info->free_streams_command);
xhci->cmd_ring_reserved_trbs--;
if (stream_info->stream_ctx_array)
xhci_free_stream_ctx(xhci,
stream_info->num_stream_ctxs,
stream_info->stream_ctx_array,
stream_info->ctx_array_dma);
kfree(stream_info->stream_rings);
kfree(stream_info);
}
/***************** Device context manipulation *************************/
static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep)
{
init_timer(&ep->stop_cmd_timer);
ep->stop_cmd_timer.data = (unsigned long) ep;
ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
ep->xhci = xhci;
}
static void xhci_free_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int slot_id)
{
struct list_head *tt_list_head;
struct xhci_tt_bw_info *tt_info, *next;
bool slot_found = false;
/* If the device never made it past the Set Address stage,
* it may not have the real_port set correctly.
*/
if (virt_dev->real_port == 0 ||
virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
xhci_dbg(xhci, "Bad real port.\n");
return;
}
tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* Multi-TT hubs will have more than one entry */
if (tt_info->slot_id == slot_id) {
slot_found = true;
list_del(&tt_info->tt_list);
kfree(tt_info);
} else if (slot_found) {
break;
}
}
}
int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_tt_bw_info *tt_info;
unsigned int num_ports;
int i, j;
if (!tt->multi)
num_ports = 1;
else
num_ports = hdev->maxchild;
for (i = 0; i < num_ports; i++, tt_info++) {
struct xhci_interval_bw_table *bw_table;
tt_info = kzalloc(sizeof(*tt_info), mem_flags);
if (!tt_info)
goto free_tts;
INIT_LIST_HEAD(&tt_info->tt_list);
list_add(&tt_info->tt_list,
&xhci->rh_bw[virt_dev->real_port - 1].tts);
tt_info->slot_id = virt_dev->udev->slot_id;
if (tt->multi)
tt_info->ttport = i+1;
bw_table = &tt_info->bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++)
INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
}
return 0;
free_tts:
xhci_free_tt_info(xhci, virt_dev, virt_dev->udev->slot_id);
return -ENOMEM;
}
/* All the xhci_tds in the ring's TD list should be freed at this point.
* Should be called with xhci->lock held if there is any chance the TT lists
* will be manipulated by the configure endpoint, allocate device, or update
* hub functions while this function is removing the TT entries from the list.
*/
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *dev;
int i;
int old_active_eps = 0;
/* Slot ID 0 is reserved */
if (slot_id == 0 || !xhci->devs[slot_id])
return;
dev = xhci->devs[slot_id];
xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
if (!dev)
return;
if (dev->tt_info)
old_active_eps = dev->tt_info->active_eps;
for (i = 0; i < 31; ++i) {
if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring);
if (dev->eps[i].stream_info)
xhci_free_stream_info(xhci,
dev->eps[i].stream_info);
/* Endpoints on the TT/root port lists should have been removed
* when usb_disable_device() was called for the device.
* We can't drop them anyway, because the udev might have gone
* away by this point, and we can't tell what speed it was.
*/
if (!list_empty(&dev->eps[i].bw_endpoint_list))
xhci_warn(xhci, "Slot %u endpoint %u "
"not removed from BW list!\n",
slot_id, i);
}
/* If this is a hub, free the TT(s) from the TT list */
xhci_free_tt_info(xhci, dev, slot_id);
/* If necessary, update the number of active TTs on this root port */
xhci_update_tt_active_eps(xhci, dev, old_active_eps);
if (dev->ring_cache) {
for (i = 0; i < dev->num_rings_cached; i++)
xhci_ring_free(xhci, dev->ring_cache[i]);
kfree(dev->ring_cache);
}
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
if (dev->out_ctx)
xhci_free_container_ctx(xhci, dev->out_ctx);
kfree(xhci->devs[slot_id]);
xhci->devs[slot_id] = NULL;
}
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
struct xhci_virt_device *dev;
int i;
/* Slot ID 0 is reserved */
if (slot_id == 0 || xhci->devs[slot_id]) {
xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
return 0;
}
xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
if (!xhci->devs[slot_id])
return 0;
dev = xhci->devs[slot_id];
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
if (!dev->out_ctx)
goto fail;
xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->out_ctx->dma);
/* Allocate the (input) device context for address device command */
dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
if (!dev->in_ctx)
goto fail;
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->in_ctx->dma);
/* Initialize the cancellation list and watchdog timers for each ep */
for (i = 0; i < 31; i++) {
xhci_init_endpoint_timer(xhci, &dev->eps[i]);
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
}
/* Allocate endpoint 0 ring */
dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
if (!dev->eps[0].ring)
goto fail;
/* Allocate pointers to the ring cache */
dev->ring_cache = kzalloc(
sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
flags);
if (!dev->ring_cache)
goto fail;
dev->num_rings_cached = 0;
init_completion(&dev->cmd_completion);
INIT_LIST_HEAD(&dev->cmd_list);
dev->udev = udev;
/* Point to output device context in dcbaa. */
xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
slot_id,
&xhci->dcbaa->dev_context_ptrs[slot_id],
le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
return 1;
fail:
xhci_free_virt_device(xhci, slot_id);
return 0;
}
void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
struct usb_device *udev)
{
struct xhci_virt_device *virt_dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_ring *ep_ring;
virt_dev = xhci->devs[udev->slot_id];
ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
ep_ring = virt_dev->eps[0].ring;
/*
* FIXME we don't keep track of the dequeue pointer very well after a
* Set TR dequeue pointer, so we're setting the dequeue pointer of the
* host to our enqueue pointer. This should only be called after a
* configured device has reset, so all control transfers should have
* been completed or cancelled before the reset.
*/
ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
ep_ring->enqueue)
| ep_ring->cycle_state);
}
/*
* The xHCI roothub may have ports of differing speeds in any order in the port
* status registers. xhci->port_array provides an array of the port speed for
* each offset into the port status registers.
*
* The xHCI hardware wants to know the roothub port number that the USB device
* is attached to (or the roothub port its ancestor hub is attached to). All we
* know is the index of that port under either the USB 2.0 or the USB 3.0
* roothub, but that doesn't give us the real index into the HW port status
* registers. Call xhci_find_raw_port_number() to get real index.
*/
static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
struct usb_device *udev)
{
struct usb_device *top_dev;
struct usb_hcd *hcd;
if (udev->speed == USB_SPEED_SUPER)
hcd = xhci->shared_hcd;
else
hcd = xhci->main_hcd;
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
/* Found device below root hub */;
return xhci_find_raw_port_number(hcd, top_dev->portnum);
}
/* Setup an xHCI virtual device for a Set Address command */
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
{
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
u32 port_num;
u32 max_packets;
struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
if (udev->slot_id == 0 || !dev) {
xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
udev->slot_id);
return -EINVAL;
}
ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
switch (udev->speed) {
case USB_SPEED_SUPER:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
max_packets = MAX_PACKET(512);
break;
case USB_SPEED_HIGH:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
max_packets = MAX_PACKET(64);
break;
/* USB core guesses at a 64-byte max packet first for FS devices */
case USB_SPEED_FULL:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
max_packets = MAX_PACKET(64);
break;
case USB_SPEED_LOW:
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
max_packets = MAX_PACKET(8);
break;
case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
default:
/* Speed was set earlier, this shouldn't happen. */
return -EINVAL;
}
/* Find the root hub port this device is under */
port_num = xhci_find_real_port_number(xhci, udev);
if (!port_num)
return -EINVAL;
slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
/* Set the port number in the virtual_device to the faked port number */
for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
top_dev = top_dev->parent)
/* Found device below root hub */;
dev->fake_port = top_dev->portnum;
dev->real_port = port_num;
xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
/* Find the right bandwidth table that this device will be a part of.
* If this is a full speed device attached directly to a root port (or a
* decendent of one), it counts as a primary bandwidth domain, not a
* secondary bandwidth domain under a TT. An xhci_tt_info structure
* will never be created for the HS root hub.
*/
if (!udev->tt || !udev->tt->hub->parent) {
dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
} else {
struct xhci_root_port_bw_info *rh_bw;
struct xhci_tt_bw_info *tt_bw;
rh_bw = &xhci->rh_bw[port_num - 1];
/* Find the right TT. */
list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
if (tt_bw->slot_id != udev->tt->hub->slot_id)
continue;
if (!dev->udev->tt->multi ||
(udev->tt->multi &&
tt_bw->ttport == dev->udev->ttport)) {
dev->bw_table = &tt_bw->bw_table;
dev->tt_info = tt_bw;
break;
}
}
if (!dev->tt_info)
xhci_warn(xhci, "WARN: Didn't find a matching TT\n");
}
/* Is this a LS/FS device under an external HS hub? */
if (udev->tt && udev->tt->hub->parent) {
slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
(udev->ttport << 8));
if (udev->tt->multi)
slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
}
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
/* Step 4 - ring already allocated */
/* Step 5 */
ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
max_packets);
ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
dev->eps[0].ring->cycle_state);
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
return 0;
}
/*
* Convert interval expressed as 2^(bInterval - 1) == interval into
* straight exponent value 2^n == interval.
*
*/
static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval;
interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
if (interval != ep->desc.bInterval - 1)
dev_warn(&udev->dev,
"ep %#x - rounding interval to %d %sframes\n",
ep->desc.bEndpointAddress,
1 << interval,
udev->speed == USB_SPEED_FULL ? "" : "micro");
if (udev->speed == USB_SPEED_FULL) {
/*
* Full speed isoc endpoints specify interval in frames,
* not microframes. We are using microframes everywhere,
* so adjust accordingly.
*/
interval += 3; /* 1 frame = 2^3 uframes */
}
return interval;
}
/*
* Convert bInterval expressed in microframes (in 1-255 range) to exponent of
* microframes, rounded down to nearest power of 2.
*/
static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
struct usb_host_endpoint *ep, unsigned int desc_interval,
unsigned int min_exponent, unsigned int max_exponent)
{
unsigned int interval;
interval = fls(desc_interval) - 1;
interval = clamp_val(interval, min_exponent, max_exponent);
if ((1 << interval) != desc_interval)
dev_warn(&udev->dev,
"ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
ep->desc.bEndpointAddress,
1 << interval,
desc_interval);
return interval;
}
static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
if (ep->desc.bInterval == 0)
return 0;
return xhci_microframes_to_exponent(udev, ep,
ep->desc.bInterval, 0, 15);
}
static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
return xhci_microframes_to_exponent(udev, ep,
ep->desc.bInterval * 8, 3, 10);
}
/* Return the polling or NAK interval.
*
* The polling interval is expressed in "microframes". If xHCI's Interval field
* is set to N, it will service the endpoint every 2^(Interval)*125us.
*
* The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval
* is set to 0.
*/
static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
unsigned int interval = 0;
switch (udev->speed) {
case USB_SPEED_HIGH:
/* Max NAK rate */
if (usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_bulk(&ep->desc)) {
interval = xhci_parse_microframe_interval(udev, ep);
break;
}
/* Fall through - SS and HS isoc/int have same decoding */
case USB_SPEED_SUPER:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_exponent_interval(udev, ep);
}
break;
case USB_SPEED_FULL:
if (usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_exponent_interval(udev, ep);
break;
}
/*
* Fall through for interrupt endpoint interval decoding
* since it uses the same rules as low speed interrupt
* endpoints.
*/
case USB_SPEED_LOW:
if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) {
interval = xhci_parse_frame_interval(udev, ep);
}
break;
default:
BUG();
}
return EP_INTERVAL(interval);
}
/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
* High speed endpoint descriptors can define "the number of additional
* transaction opportunities per microframe", but that goes in the Max Burst
* endpoint context field.
*/
static u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
if (udev->speed != USB_SPEED_SUPER ||
!usb_endpoint_xfer_isoc(&ep->desc))
return 0;
return ep->ss_ep_comp.bmAttributes;
}
static u32 xhci_get_endpoint_type(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int in;
u32 type;
in = usb_endpoint_dir_in(&ep->desc);
if (usb_endpoint_xfer_control(&ep->desc)) {
type = EP_TYPE(CTRL_EP);
} else if (usb_endpoint_xfer_bulk(&ep->desc)) {
if (in)
type = EP_TYPE(BULK_IN_EP);
else
type = EP_TYPE(BULK_OUT_EP);
} else if (usb_endpoint_xfer_isoc(&ep->desc)) {
if (in)
type = EP_TYPE(ISOC_IN_EP);
else
type = EP_TYPE(ISOC_OUT_EP);
} else if (usb_endpoint_xfer_int(&ep->desc)) {
if (in)
type = EP_TYPE(INT_IN_EP);
else
type = EP_TYPE(INT_OUT_EP);
} else {
type = 0;
}
return type;
}
/* Return the maximum endpoint service interval time (ESIT) payload.
* Basically, this is the maxpacket size, multiplied by the burst size
* and mult size.
*/
static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
struct usb_device *udev,
struct usb_host_endpoint *ep)
{
int max_burst;
int max_packet;
/* Only applies for interrupt or isochronous endpoints */
if (usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_bulk(&ep->desc))
return 0;
if (udev->speed == USB_SPEED_SUPER)
return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11;
/* A 0 in max burst means 1 transfer per ESIT */
return max_packet * (max_burst + 1);
}
/* Set up an endpoint with one ring segment. Do not allocate stream rings.
* Drivers will have to call usb_alloc_streams() to do that.
*/
int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *udev,
struct usb_host_endpoint *ep,
gfp_t mem_flags)
{
unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx;
struct xhci_ring *ep_ring;
unsigned int max_packet;
unsigned int max_burst;
enum xhci_ring_type type;
u32 max_esit_payload;
u32 endpoint_type;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
endpoint_type = xhci_get_endpoint_type(udev, ep);
if (!endpoint_type)
return -EINVAL;
ep_ctx->ep_info2 = cpu_to_le32(endpoint_type);
type = usb_endpoint_type(&ep->desc);
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
return -ENOMEM;
virt_dev->eps[ep_index].new_ring =
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1, type);
}
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);
ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
| EP_MULT(xhci_get_endpoint_mult(udev, ep)));
/* FIXME dig Mult and streams info out of ep companion desc */
/* Allow 3 retries for everything but isoc;
* CErr shall be set to 0 for Isoch endpoints.
*/
if (!usb_endpoint_xfer_isoc(&ep->desc))
ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(3));
else
ep_ctx->ep_info2 |= cpu_to_le32(ERROR_COUNT(0));
/* Set the max packet size and max burst */
max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
max_burst = 0;
switch (udev->speed) {
case USB_SPEED_SUPER:
/* dig out max burst from ep companion desc */
max_burst = ep->ss_ep_comp.bMaxBurst;
break;
case USB_SPEED_HIGH:
/* Some devices get this wrong */
if (usb_endpoint_xfer_bulk(&ep->desc))
max_packet = 512;
/* bits 11:12 specify the number of additional transaction
* opportunities per microframe (USB 2.0, section 9.6.6)
*/
if (usb_endpoint_xfer_isoc(&ep->desc) ||
usb_endpoint_xfer_int(&ep->desc)) {
max_burst = (usb_endpoint_maxp(&ep->desc)
& 0x1800) >> 11;
}
break;
case USB_SPEED_FULL:
case USB_SPEED_LOW:
break;
default:
BUG();
}
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
MAX_BURST(max_burst));
max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
/*
* XXX no idea how to calculate the average TRB buffer length for bulk
* endpoints, as the driver gives us no clue how big each scatter gather
* list entry (or buffer) is going to be.
*
* For isochronous and interrupt endpoints, we set it to the max
* available, until we have new API in the USB core to allow drivers to
* declare how much bandwidth they actually need.
*
* Normally, it would be calculated by taking the total of the buffer
* lengths in the TD and then dividing by the number of TRBs in a TD,
* including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
* use Event Data TRBs, and we don't chain in a link TRB on short
* transfers, we're basically dividing by 1.
*
* xHCI 1.0 specification indicates that the Average TRB Length should
* be set to 8 for control endpoints.
*/
if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
else
ep_ctx->tx_info |=
cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));
/* FIXME Debug endpoint context */
return 0;
}
void xhci_endpoint_zero(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_host_endpoint *ep)
{
unsigned int ep_index;
struct xhci_ep_ctx *ep_ctx;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
ep_ctx->ep_info = 0;
ep_ctx->ep_info2 = 0;
ep_ctx->deq = 0;
ep_ctx->tx_info = 0;
/* Don't free the endpoint ring until the set interface or configuration
* request succeeds.
*/
}
void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info)
{
bw_info->ep_interval = 0;
bw_info->mult = 0;
bw_info->num_packets = 0;
bw_info->max_packet_size = 0;
bw_info->type = 0;
bw_info->max_esit_payload = 0;
}
void xhci_update_bw_info(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_input_control_ctx *ctrl_ctx,
struct xhci_virt_device *virt_dev)
{
struct xhci_bw_info *bw_info;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_type;
int i;
for (i = 1; i < 31; ++i) {
bw_info = &virt_dev->eps[i].bw_info;
/* We can't tell what endpoint type is being dropped, but
* unconditionally clearing the bandwidth info for non-periodic
* endpoints should be harmless because the info will never be
* set in the first place.
*/
if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) {
/* Dropped endpoint */
xhci_clear_endpoint_bw_info(bw_info);
continue;
}
if (EP_IS_ADDED(ctrl_ctx, i)) {
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i);
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
/* Ignore non-periodic endpoints */
if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
ep_type != ISOC_IN_EP &&
ep_type != INT_IN_EP)
continue;
/* Added or changed endpoint */
bw_info->ep_interval = CTX_TO_EP_INTERVAL(
le32_to_cpu(ep_ctx->ep_info));
/* Number of packets and mult are zero-based in the
* input context, but we want one-based for the
* interval table.
*/
bw_info->mult = CTX_TO_EP_MULT(
le32_to_cpu(ep_ctx->ep_info)) + 1;
bw_info->num_packets = CTX_TO_MAX_BURST(
le32_to_cpu(ep_ctx->ep_info2)) + 1;
bw_info->max_packet_size = MAX_PACKET_DECODED(
le32_to_cpu(ep_ctx->ep_info2));
bw_info->type = ep_type;
bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD(
le32_to_cpu(ep_ctx->tx_info));
}
}
}
/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command.
*/
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
unsigned int ep_index)
{
struct xhci_ep_ctx *out_ep_ctx;
struct xhci_ep_ctx *in_ep_ctx;
out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
in_ep_ctx->ep_info = out_ep_ctx->ep_info;
in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
in_ep_ctx->deq = out_ep_ctx->deq;
in_ep_ctx->tx_info = out_ep_ctx->tx_info;
}
/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command. Only the context entries field matters,
* but we'll copy the whole thing anyway.
*/
void xhci_slot_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx)
{
struct xhci_slot_ctx *in_slot_ctx;
struct xhci_slot_ctx *out_slot_ctx;
in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
in_slot_ctx->dev_info = out_slot_ctx->dev_info;
in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
in_slot_ctx->tt_info = out_slot_ctx->tt_info;
in_slot_ctx->dev_state = out_slot_ctx->dev_state;
}
/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
int i;
struct device *dev = xhci_to_hcd(xhci)->self.controller;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocating %d scratchpad buffers", num_sp);
if (!num_sp)
return 0;
xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags);
if (!xhci->scratchpad)
goto fail_sp;
xhci->scratchpad->sp_array = dma_alloc_coherent(dev,
num_sp * sizeof(u64),
&xhci->scratchpad->sp_dma, flags);
if (!xhci->scratchpad->sp_array)
goto fail_sp2;
xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags);
if (!xhci->scratchpad->sp_buffers)
goto fail_sp3;
xhci->scratchpad->sp_dma_buffers =
kzalloc(sizeof(dma_addr_t) * num_sp, flags);
if (!xhci->scratchpad->sp_dma_buffers)
goto fail_sp4;
xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
flags);
if (!buf)
goto fail_sp5;
xhci->scratchpad->sp_array[i] = dma;
xhci->scratchpad->sp_buffers[i] = buf;
xhci->scratchpad->sp_dma_buffers[i] = dma;
}
return 0;
fail_sp5:
for (i = i - 1; i >= 0; i--) {
dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
fail_sp4:
kfree(xhci->scratchpad->sp_buffers);
fail_sp3:
dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
fail_sp2:
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
fail_sp:
return -ENOMEM;
}
static void scratchpad_free(struct xhci_hcd *xhci)
{
int num_sp;
int i;
struct device *dev = xhci_to_hcd(xhci)->self.controller;
if (!xhci->scratchpad)
return;
num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
for (i = 0; i < num_sp; i++) {
dma_free_coherent(dev, xhci->page_size,
xhci->scratchpad->sp_buffers[i],
xhci->scratchpad->sp_dma_buffers[i]);
}
kfree(xhci->scratchpad->sp_dma_buffers);
kfree(xhci->scratchpad->sp_buffers);
dma_free_coherent(dev, num_sp * sizeof(u64),
xhci->scratchpad->sp_array,
xhci->scratchpad->sp_dma);
kfree(xhci->scratchpad);
xhci->scratchpad = NULL;
}
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_in_ctx, bool allocate_completion,
gfp_t mem_flags)
{
struct xhci_command *command;
command = kzalloc(sizeof(*command), mem_flags);
if (!command)
return NULL;
if (allocate_in_ctx) {
command->in_ctx =
xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
mem_flags);
if (!command->in_ctx) {
kfree(command);
return NULL;
}
}
if (allocate_completion) {
command->completion =
kzalloc(sizeof(struct completion), mem_flags);
if (!command->completion) {
xhci_free_container_ctx(xhci, command->in_ctx);
kfree(command);
return NULL;
}
init_completion(command->completion);
}
command->status = 0;
INIT_LIST_HEAD(&command->cmd_list);
return command;
}
void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
{
if (urb_priv) {
kfree(urb_priv->td[0]);
kfree(urb_priv);
}
}
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command)
{
xhci_free_container_ctx(xhci,
command->in_ctx);
kfree(command->completion);
kfree(command);
}
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct device *dev = xhci_to_hcd(xhci)->self.controller;
struct xhci_cd *cur_cd, *next_cd;
int size;
int i, j, num_ports;
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
if (xhci->erst.entries)
dma_free_coherent(dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
xhci->cmd_ring_reserved_trbs = 0;
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
list_for_each_entry_safe(cur_cd, next_cd,
&xhci->cancel_cmd_list, cancel_cmd_list) {
list_del(&cur_cd->cancel_cmd_list);
kfree(cur_cd);
}
for (i = 1; i < MAX_HC_SLOTS; ++i)
xhci_free_virt_device(xhci, i);
if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
if (xhci->device_pool)
dma_pool_destroy(xhci->device_pool);
xhci->device_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
if (xhci->small_streams_pool)
dma_pool_destroy(xhci->small_streams_pool);
xhci->small_streams_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed small stream array pool");
if (xhci->medium_streams_pool)
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed medium stream array pool");
if (xhci->dcbaa)
dma_free_coherent(dev, sizeof(*xhci->dcbaa),
xhci->dcbaa, xhci->dcbaa->dma);
xhci->dcbaa = NULL;
scratchpad_free(xhci);
if (!xhci->rh_bw)
goto no_bw;
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
for (i = 0; i < num_ports; i++) {
struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
struct list_head *ep = &bwt->interval_bw[j].endpoints;
while (!list_empty(ep))
list_del_init(ep->next);
}
}
for (i = 0; i < num_ports; i++) {
struct xhci_tt_bw_info *tt, *n;
list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
list_del(&tt->tt_list);
kfree(tt);
}
}
no_bw:
xhci->num_usb2_ports = 0;
xhci->num_usb3_ports = 0;
xhci->num_active_eps = 0;
kfree(xhci->usb2_ports);
kfree(xhci->usb3_ports);
kfree(xhci->port_array);
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
xhci->page_size = 0;
xhci->page_shift = 0;
xhci->bus_state[0].bus_suspended = 0;
xhci->bus_state[1].bus_suspended = 0;
}
static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
struct xhci_segment *input_seg,
union xhci_trb *start_trb,
union xhci_trb *end_trb,
dma_addr_t input_dma,
struct xhci_segment *result_seg,
char *test_name, int test_number)
{
unsigned long long start_dma;
unsigned long long end_dma;
struct xhci_segment *seg;
start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
if (seg != result_seg) {
xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
test_name, test_number);
xhci_warn(xhci, "Tested TRB math w/ seg %p and "
"input DMA 0x%llx\n",
input_seg,
(unsigned long long) input_dma);
xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
"ending TRB %p (0x%llx DMA)\n",
start_trb, start_dma,
end_trb, end_dma);
xhci_warn(xhci, "Expected seg %p, got seg %p\n",
result_seg, seg);
return -1;
}
return 0;
}
/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
{
struct {
dma_addr_t input_dma;
struct xhci_segment *result_seg;
} simple_test_vector [] = {
/* A zeroed DMA field should fail */
{ 0, NULL },
/* One TRB before the ring start should fail */
{ xhci->event_ring->first_seg->dma - 16, NULL },
/* One byte before the ring start should fail */
{ xhci->event_ring->first_seg->dma - 1, NULL },
/* Starting TRB should succeed */
{ xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
/* Ending TRB should succeed */
{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
xhci->event_ring->first_seg },
/* One byte after the ring end should fail */
{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
/* One TRB after the ring end should fail */
{ xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
/* An address of all ones should fail */
{ (dma_addr_t) (~0), NULL },
};
struct {
struct xhci_segment *input_seg;
union xhci_trb *start_trb;
union xhci_trb *end_trb;
dma_addr_t input_dma;
struct xhci_segment *result_seg;
} complex_test_vector [] = {
/* Test feeding a valid DMA address from a different ring */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = xhci->event_ring->first_seg->trbs,
.end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
.input_dma = xhci->cmd_ring->first_seg->dma,
.result_seg = NULL,
},
/* Test feeding a valid end TRB from a different ring */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = xhci->event_ring->first_seg->trbs,
.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
.input_dma = xhci->cmd_ring->first_seg->dma,
.result_seg = NULL,
},
/* Test feeding a valid start and end TRB from a different ring */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = xhci->cmd_ring->first_seg->trbs,
.end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
.input_dma = xhci->cmd_ring->first_seg->dma,
.result_seg = NULL,
},
/* TRB in this ring, but after this TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[0],
.end_trb = &xhci->event_ring->first_seg->trbs[3],
.input_dma = xhci->event_ring->first_seg->dma + 4*16,
.result_seg = NULL,
},
/* TRB in this ring, but before this TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[3],
.end_trb = &xhci->event_ring->first_seg->trbs[6],
.input_dma = xhci->event_ring->first_seg->dma + 2*16,
.result_seg = NULL,
},
/* TRB in this ring, but after this wrapped TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
.end_trb = &xhci->event_ring->first_seg->trbs[1],
.input_dma = xhci->event_ring->first_seg->dma + 2*16,
.result_seg = NULL,
},
/* TRB in this ring, but before this wrapped TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
.end_trb = &xhci->event_ring->first_seg->trbs[1],
.input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
.result_seg = NULL,
},
/* TRB not in this ring, and we have a wrapped TD */
{ .input_seg = xhci->event_ring->first_seg,
.start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
.end_trb = &xhci->event_ring->first_seg->trbs[1],
.input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
.result_seg = NULL,
},
};
unsigned int num_tests;
int i, ret;
num_tests = ARRAY_SIZE(simple_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
xhci->event_ring->first_seg,
xhci->event_ring->first_seg->trbs,
&xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
simple_test_vector[i].input_dma,
simple_test_vector[i].result_seg,
"Simple", i);
if (ret < 0)
return ret;
}
num_tests = ARRAY_SIZE(complex_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
complex_test_vector[i].input_seg,
complex_test_vector[i].start_trb,
complex_test_vector[i].end_trb,
complex_test_vector[i].input_dma,
complex_test_vector[i].result_seg,
"Complex", i);
if (ret < 0)
return ret;
}
xhci_dbg(xhci, "TRB math tests passed.\n");
return 0;
}
static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
{
u64 temp;
dma_addr_t deq;
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
xhci->event_ring->dequeue);
if (deq == 0 && !in_interrupt())
xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
*/
temp &= ~ERST_EHB;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write event ring dequeue pointer, "
"preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
__le32 __iomem *addr, u8 major_revision, int max_caps)
{
u32 temp, port_offset, port_count;
int i;
if (major_revision > 0x03) {
xhci_warn(xhci, "Ignoring unknown port speed, "
"Ext Cap %p, revision = 0x%x\n",
addr, major_revision);
/* Ignoring port protocol we can't understand. FIXME */
return;
}
/* Port offset and count in the third dword, see section 7.2 */
temp = readl(addr + 2);
port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Ext Cap %p, port offset = %u, "
"count = %u, revision = 0x%x",
addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
/* WTF? "Valid values are ‘1’ to MaxPorts" */
return;
/* cache usb2 port capabilities */
if (major_revision < 0x03 && xhci->num_ext_caps < max_caps)
xhci->ext_caps[xhci->num_ext_caps++] = temp;
/* Check the host's USB2 LPM capability */
if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
(temp & XHCI_L1C)) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI 0.96: support USB2 software lpm");
xhci->sw_lpm_support = 1;
}
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI 1.0: support USB2 software lpm");
xhci->sw_lpm_support = 1;
if (temp & XHCI_HLC) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xHCI 1.0: support USB2 hardware lpm");
xhci->hw_lpm_support = 1;
}
}
port_offset--;
for (i = port_offset; i < (port_offset + port_count); i++) {
/* Duplicate entry. Ignore the port if the revisions differ. */
if (xhci->port_array[i] != 0) {
xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
" port %u\n", addr, i);
xhci_warn(xhci, "Port was marked as USB %u, "
"duplicated as USB %u\n",
xhci->port_array[i], major_revision);
/* Only adjust the roothub port counts if we haven't
* found a similar duplicate.
*/
if (xhci->port_array[i] != major_revision &&
xhci->port_array[i] != DUPLICATE_ENTRY) {
if (xhci->port_array[i] == 0x03)
xhci->num_usb3_ports--;
else
xhci->num_usb2_ports--;
xhci->port_array[i] = DUPLICATE_ENTRY;
}
/* FIXME: Should we disable the port? */
continue;
}
xhci->port_array[i] = major_revision;
if (major_revision == 0x03)
xhci->num_usb3_ports++;
else
xhci->num_usb2_ports++;
}
/* FIXME: Should we disable ports not in the Extended Capabilities? */
}
/*
* Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
* specify what speeds each port is supposed to be. We can't count on the port
* speed bits in the PORTSC register being correct until a device is connected,
* but we need to set up the two fake roothubs with the correct number of USB
* 3.0 and USB 2.0 ports at host controller initialization time.
*/
static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
{
__le32 __iomem *addr, *tmp_addr;
u32 offset, tmp_offset;
unsigned int num_ports;
int i, j, port_index;
int cap_count = 0;
addr = &xhci->cap_regs->hcc_params;
offset = XHCI_HCC_EXT_CAPS(readl(addr));
if (offset == 0) {
xhci_err(xhci, "No Extended Capability registers, "
"unable to set up roothub.\n");
return -ENODEV;
}
num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
if (!xhci->port_array)
return -ENOMEM;
xhci->rh_bw = kzalloc(sizeof(*xhci->rh_bw)*num_ports, flags);
if (!xhci->rh_bw)
return -ENOMEM;
for (i = 0; i < num_ports; i++) {
struct xhci_interval_bw_table *bw_table;
INIT_LIST_HEAD(&xhci->rh_bw[i].tts);
bw_table = &xhci->rh_bw[i].bw_table;
for (j = 0; j < XHCI_MAX_INTERVAL; j++)
INIT_LIST_HEAD(&bw_table->interval_bw[j].endpoints);
}
/*
* For whatever reason, the first capability offset is from the
* capability register base, not from the HCCPARAMS register.
* See section 5.3.6 for offset calculation.
*/
addr = &xhci->cap_regs->hc_capbase + offset;
tmp_addr = addr;
tmp_offset = offset;
/* count extended protocol capability entries for later caching */
do {
u32 cap_id;
cap_id = readl(tmp_addr);
if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
cap_count++;
tmp_offset = XHCI_EXT_CAPS_NEXT(cap_id);
tmp_addr += tmp_offset;
} while (tmp_offset);
xhci->ext_caps = kzalloc(sizeof(*xhci->ext_caps) * cap_count, flags);
if (!xhci->ext_caps)
return -ENOMEM;
while (1) {
u32 cap_id;
cap_id = readl(addr);
if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
xhci_add_in_port(xhci, num_ports, addr,
(u8) XHCI_EXT_PORT_MAJOR(cap_id),
cap_count);
offset = XHCI_EXT_CAPS_NEXT(cap_id);
if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
== num_ports)
break;
/*
* Once you're into the Extended Capabilities, the offset is
* always relative to the register holding the offset.
*/
addr += offset;
}
if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
xhci_warn(xhci, "No ports on the roothubs?\n");
return -ENODEV;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Found %u USB 2.0 ports and %u USB 3.0 ports.",
xhci->num_usb2_ports, xhci->num_usb3_ports);
/* Place limits on the number of roothub ports so that the hub
* descriptors aren't longer than the USB core will allocate.
*/
if (xhci->num_usb3_ports > 15) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 3.0 roothub ports to 15.");
xhci->num_usb3_ports = 15;
}
if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Limiting USB 2.0 roothub ports to %u.",
USB_MAXCHILDREN);
xhci->num_usb2_ports = USB_MAXCHILDREN;
}
/*
* Note we could have all USB 3.0 ports, or all USB 2.0 ports.
* Not sure how the USB core will handle a hub with no ports...
*/
if (xhci->num_usb2_ports) {
xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
xhci->num_usb2_ports, flags);
if (!xhci->usb2_ports)
return -ENOMEM;
port_index = 0;
for (i = 0; i < num_ports; i++) {
if (xhci->port_array[i] == 0x03 ||
xhci->port_array[i] == 0 ||
xhci->port_array[i] == DUPLICATE_ENTRY)
continue;
xhci->usb2_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"USB 2.0 port at index %u, "
"addr = %p", i,
xhci->usb2_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb2_ports)
break;
}
}
if (xhci->num_usb3_ports) {
xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
xhci->num_usb3_ports, flags);
if (!xhci->usb3_ports)
return -ENOMEM;
port_index = 0;
for (i = 0; i < num_ports; i++)
if (xhci->port_array[i] == 0x03) {
xhci->usb3_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"USB 3.0 port at index %u, "
"addr = %p", i,
xhci->usb3_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb3_ports)
break;
}
}
return 0;
}
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
struct device *dev = xhci_to_hcd(xhci)->self.controller;
unsigned int val, val2;
u64 val_64;
struct xhci_segment *seg;
u32 page_size, temp;
int i;
INIT_LIST_HEAD(&xhci->cancel_cmd_list);
page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size register = 0x%x", page_size);
for (i = 0; i < 16; i++) {
if ((0x1 & page_size) != 0)
break;
page_size = page_size >> 1;
}
if (i < 16)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size of %iK", (1 << (i+12)) / 1024);
else
xhci_warn(xhci, "WARN: no supported page size\n");
/* Use 4K pages, since that's common and the minimum the HC supports */
xhci->page_shift = 12;
xhci->page_size = 1 << xhci->page_shift;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"HCD page size set to %iK", xhci->page_size / 1024);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle.
*/
val = HCS_MAX_SLOTS(readl(&xhci->cap_regs->hcs_params1));
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// xHC can handle at most %d device slots.", val);
val2 = readl(&xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting Max device slots reg = 0x%x.", val);
writel(val, &xhci->op_regs->config_reg);
/*
* Section 5.4.8 - doorbell array must be
* "physically contiguous and 64-byte (cache line) aligned".
*/
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
GFP_KERNEL);
if (!xhci->dcbaa)
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
* structure comprised of TRBs. The TRBs must be 16 byte aligned,
* however, the command ring segment needs 64-byte aligned segments,
* so we pick the greater alignment need.
*/
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE, 64, xhci->page_size);
/* See Table 46 and Note on Figure 55 */
xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
2112, 64, xhci->page_size);
if (!xhci->segment_pool || !xhci->device_pool)
goto fail;
/* Linear stream context arrays don't have any boundary restrictions,
* and only need to be 16-byte aligned.
*/
xhci->small_streams_pool =
dma_pool_create("xHCI 256 byte stream ctx arrays",
dev, SMALL_STREAM_ARRAY_SIZE, 16, 0);
xhci->medium_streams_pool =
dma_pool_create("xHCI 1KB stream ctx arrays",
dev, MEDIUM_STREAM_ARRAY_SIZE, 16, 0);
/* Any stream context array bigger than MEDIUM_STREAM_ARRAY_SIZE
* will be allocated with dma_alloc_coherent()
*/
if (!xhci->small_streams_pool || !xhci->medium_streams_pool)
goto fail;
/* Set up the command ring to have one segments for now. */
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Allocated command ring at %p", xhci->cmd_ring);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%x", val);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
if (!xhci->lpm_command)
goto fail;
/* Reserve one command ring TRB for disabling LPM.
* Since the USB core grabs the shared usb_bus bandwidth mutex before
* disabling LPM, we only need to reserve one TRB for all devices.
*/
xhci->cmd_ring_reserved_trbs++;
val = readl(&xhci->cap_regs->db_off);
val &= DBOFF_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Doorbell array is located at offset 0x%x"
" from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3.
*/
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
flags);
if (!xhci->event_ring)
goto fail;
if (xhci_check_trb_in_td_math(xhci, flags) < 0)
goto fail;
xhci->erst.entries = dma_alloc_coherent(dev,
sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
GFP_KERNEL);
if (!xhci->erst.entries)
goto fail;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Allocated event ring segment table at 0x%llx",
(unsigned long long)dma);
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
xhci->erst.num_entries = ERST_NUM_SEGS;
xhci->erst.erst_dma_addr = dma;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
xhci->erst.num_entries,
xhci->erst.entries,
(unsigned long long)xhci->erst.erst_dma_addr);
/* set ring base address and size for each segment table entry */
for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
struct xhci_erst_entry *entry = &xhci->erst.entries[val];
entry->seg_addr = cpu_to_le64(seg->dma);
entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
entry->rsvd = 0;
seg = seg->next;
}
/* set ERST count with the number of entries in the segment table */
val = readl(&xhci->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write ERST size = %i to ir_set 0 (some bits preserved)",
val);
writel(val, &xhci->ir_set->erst_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST entries to point to event ring.");
/* set the segment table base address */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Wrote ERST address to ir_set 0.");
xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to
* something other than the default (~1ms minimum between interrupts).
* See section 5.5.1.2.
*/
init_completion(&xhci->addr_dev);
for (i = 0; i < MAX_HC_SLOTS; ++i)
xhci->devs[i] = NULL;
for (i = 0; i < USB_MAXCHILDREN; ++i) {
xhci->bus_state[0].resume_done[i] = 0;
xhci->bus_state[1].resume_done[i] = 0;
/* Only the USB 2.0 completions will ever be used. */
init_completion(&xhci->bus_state[1].rexit_done[i]);
}
if (scratchpad_alloc(xhci, flags))
goto fail;
if (xhci_setup_port_arrays(xhci, flags))
goto fail;
/* Enable USB 3.0 device notifications for function remote wake, which
* is necessary for allowing USB 3.0 devices to do remote wakeup from
* U3 (device suspend).
*/
temp = readl(&xhci->op_regs->dev_notification);
temp &= ~DEV_NOTE_MASK;
temp |= DEV_NOTE_FWAKE;
writel(temp, &xhci->op_regs->dev_notification);
return 0;
fail:
xhci_warn(xhci, "Couldn't initialize memory\n");
xhci_halt(xhci);
xhci_reset(xhci);
xhci_mem_cleanup(xhci);
return -ENOMEM;
}
| gpl-2.0 |
eva-oss/linux | drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c | 334 | 30238 | /******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "iwl-drv.h"
#include "iwl-modparams.h"
#include "iwl-eeprom-parse.h"
/* EEPROM offset definitions */
/* indirect access definitions */
#define ADDRESS_MSK 0x0000FFFF
#define INDIRECT_TYPE_MSK 0x000F0000
#define INDIRECT_HOST 0x00010000
#define INDIRECT_GENERAL 0x00020000
#define INDIRECT_REGULATORY 0x00030000
#define INDIRECT_CALIBRATION 0x00040000
#define INDIRECT_PROCESS_ADJST 0x00050000
#define INDIRECT_OTHERS 0x00060000
#define INDIRECT_TXP_LIMIT 0x00070000
#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
#define INDIRECT_ADDRESS 0x00100000
/* corresponding link offsets in EEPROM */
#define EEPROM_LINK_HOST (2*0x64)
#define EEPROM_LINK_GENERAL (2*0x65)
#define EEPROM_LINK_REGULATORY (2*0x66)
#define EEPROM_LINK_CALIBRATION (2*0x67)
#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
#define EEPROM_LINK_OTHERS (2*0x69)
#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
/* General */
#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
#define EEPROM_VERSION (2*0x44) /* 2 bytes */
#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
/* calibration */
struct iwl_eeprom_calib_hdr {
u8 version;
u8 pa_type;
__le16 voltage;
} __packed;
#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
/* temperature */
#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
/* SKU Capabilities (actual values from EEPROM definition) */
enum eeprom_sku_bits {
EEPROM_SKU_CAP_BAND_24GHZ = BIT(4),
EEPROM_SKU_CAP_BAND_52GHZ = BIT(5),
EEPROM_SKU_CAP_11N_ENABLE = BIT(6),
EEPROM_SKU_CAP_AMT_ENABLE = BIT(7),
EEPROM_SKU_CAP_IPAN_ENABLE = BIT(8)
};
/* radio config bits (actual values from EEPROM definition) */
#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
/*
* EEPROM bands
* These are the channel numbers from each band in the order
* that they are stored in the EEPROM band information. Note
* that EEPROM bands aren't the same as mac80211 bands, and
* there are even special "ht40 bands" in the EEPROM.
*/
static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
};
static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
};
static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
};
static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
};
static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
145, 149, 153, 157, 161, 165
};
static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
1, 2, 3, 4, 5, 6, 7
};
static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
};
#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \
ARRAY_SIZE(iwl_eeprom_band_2) + \
ARRAY_SIZE(iwl_eeprom_band_3) + \
ARRAY_SIZE(iwl_eeprom_band_4) + \
ARRAY_SIZE(iwl_eeprom_band_5))
/* rate data (static) */
static struct ieee80211_rate iwl_cfg80211_rates[] = {
{ .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
{ .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
.flags = IEEE80211_RATE_SHORT_PREAMBLE, },
{ .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
.flags = IEEE80211_RATE_SHORT_PREAMBLE, },
{ .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
.flags = IEEE80211_RATE_SHORT_PREAMBLE, },
{ .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
{ .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
{ .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
{ .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
{ .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
{ .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
{ .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
{ .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
};
#define RATES_24_OFFS 0
#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
#define RATES_52_OFFS 4
#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
/* EEPROM reading functions */
static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
{
if (WARN_ON(offset + sizeof(u16) > eeprom_size))
return 0;
return le16_to_cpup((__le16 *)(eeprom + offset));
}
static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
u32 address)
{
u16 offset = 0;
if ((address & INDIRECT_ADDRESS) == 0)
return address;
switch (address & INDIRECT_TYPE_MSK) {
case INDIRECT_HOST:
offset = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_LINK_HOST);
break;
case INDIRECT_GENERAL:
offset = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_LINK_GENERAL);
break;
case INDIRECT_REGULATORY:
offset = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_LINK_REGULATORY);
break;
case INDIRECT_TXP_LIMIT:
offset = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_LINK_TXP_LIMIT);
break;
case INDIRECT_TXP_LIMIT_SIZE:
offset = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_LINK_TXP_LIMIT_SIZE);
break;
case INDIRECT_CALIBRATION:
offset = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_LINK_CALIBRATION);
break;
case INDIRECT_PROCESS_ADJST:
offset = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_LINK_PROCESS_ADJST);
break;
case INDIRECT_OTHERS:
offset = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_LINK_OTHERS);
break;
default:
WARN_ON(1);
break;
}
/* translate the offset from words to byte */
return (address & ADDRESS_MSK) + (offset << 1);
}
static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
u32 offset)
{
u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
if (WARN_ON(address >= eeprom_size))
return NULL;
return &eeprom[address];
}
static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
struct iwl_nvm_data *data)
{
struct iwl_eeprom_calib_hdr *hdr;
hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
EEPROM_CALIB_ALL);
if (!hdr)
return -ENODATA;
data->calib_version = hdr->version;
data->calib_voltage = hdr->voltage;
return 0;
}
/**
* enum iwl_eeprom_channel_flags - channel flags in EEPROM
* @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo
* @EEPROM_CHANNEL_IBSS: usable as an IBSS channel
* @EEPROM_CHANNEL_ACTIVE: active scanning allowed
* @EEPROM_CHANNEL_RADAR: radar detection required
* @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?)
* @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
*/
enum iwl_eeprom_channel_flags {
EEPROM_CHANNEL_VALID = BIT(0),
EEPROM_CHANNEL_IBSS = BIT(1),
EEPROM_CHANNEL_ACTIVE = BIT(3),
EEPROM_CHANNEL_RADAR = BIT(4),
EEPROM_CHANNEL_WIDE = BIT(5),
EEPROM_CHANNEL_DFS = BIT(7),
};
/**
* struct iwl_eeprom_channel - EEPROM channel data
* @flags: %EEPROM_CHANNEL_* flags
* @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
*/
struct iwl_eeprom_channel {
u8 flags;
s8 max_power_avg;
} __packed;
enum iwl_eeprom_enhanced_txpwr_flags {
IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
};
/**
* iwl_eeprom_enhanced_txpwr structure
* @flags: entry flags
* @channel: channel number
* @chain_a_max_pwr: chain a max power in 1/2 dBm
* @chain_b_max_pwr: chain b max power in 1/2 dBm
* @chain_c_max_pwr: chain c max power in 1/2 dBm
* @delta_20_in_40: 20-in-40 deltas (hi/lo)
* @mimo2_max_pwr: mimo2 max power in 1/2 dBm
* @mimo3_max_pwr: mimo3 max power in 1/2 dBm
*
* This structure presents the enhanced regulatory tx power limit layout
* in an EEPROM image.
*/
struct iwl_eeprom_enhanced_txpwr {
u8 flags;
u8 channel;
s8 chain_a_max;
s8 chain_b_max;
s8 chain_c_max;
u8 delta_20_in_40;
s8 mimo2_max;
s8 mimo3_max;
} __packed;
static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
struct iwl_eeprom_enhanced_txpwr *txp)
{
s8 result = 0; /* (.5 dBm) */
/* Take the highest tx power from any valid chains */
if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result)
result = txp->chain_a_max;
if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result)
result = txp->chain_b_max;
if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result)
result = txp->chain_c_max;
if ((data->valid_tx_ant == ANT_AB ||
data->valid_tx_ant == ANT_BC ||
data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result)
result = txp->mimo2_max;
if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result)
result = txp->mimo3_max;
return result;
}
#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
#define TXP_CHECK_AND_PRINT(x) \
((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
static void
iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
struct iwl_eeprom_enhanced_txpwr *txp,
int n_channels, s8 max_txpower_avg)
{
int ch_idx;
enum nl80211_band band;
band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
struct ieee80211_channel *chan = &data->channels[ch_idx];
/* update matching channel or from common data only */
if (txp->channel != 0 && chan->hw_value != txp->channel)
continue;
/* update matching band only */
if (band != chan->band)
continue;
if (chan->max_power < max_txpower_avg &&
!(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ))
chan->max_power = max_txpower_avg;
}
}
static void iwl_eeprom_enhanced_txpower(struct device *dev,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size,
int n_channels)
{
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
int idx, entries;
__le16 *txp_len;
s8 max_txp_avg_halfdbm;
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
/* the length is in 16-bit words, but we want entries */
txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
EEPROM_TXP_SZ_OFFS);
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
EEPROM_TXP_OFFS);
for (idx = 0; idx < entries; idx++) {
txp = &txp_array[idx];
/* skip invalid entries */
if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
continue;
IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
(txp->channel && (txp->flags &
IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
"Common " : (txp->channel) ?
"Channel" : "Common",
(txp->channel),
TXP_CHECK_AND_PRINT(VALID),
TXP_CHECK_AND_PRINT(BAND_52G),
TXP_CHECK_AND_PRINT(OFDM),
TXP_CHECK_AND_PRINT(40MHZ),
TXP_CHECK_AND_PRINT(HT_AP),
TXP_CHECK_AND_PRINT(RES1),
TXP_CHECK_AND_PRINT(RES2),
TXP_CHECK_AND_PRINT(COMMON_TYPE),
txp->flags);
IWL_DEBUG_EEPROM(dev,
"\t\t chain_A: %d chain_B: %d chain_C: %d\n",
txp->chain_a_max, txp->chain_b_max,
txp->chain_c_max);
IWL_DEBUG_EEPROM(dev,
"\t\t MIMO2: %d MIMO3: %d High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
txp->mimo2_max, txp->mimo3_max,
((txp->delta_20_in_40 & 0xf0) >> 4),
(txp->delta_20_in_40 & 0x0f));
max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm)
data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
}
}
static void iwl_init_band_reference(const struct iwl_cfg *cfg,
const u8 *eeprom, size_t eeprom_size,
int eeprom_band, int *eeprom_ch_count,
const struct iwl_eeprom_channel **ch_info,
const u8 **eeprom_ch_array)
{
u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
*ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
switch (eeprom_band) {
case 1: /* 2.4GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
*eeprom_ch_array = iwl_eeprom_band_1;
break;
case 2: /* 4.9GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
*eeprom_ch_array = iwl_eeprom_band_2;
break;
case 3: /* 5.2GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
*eeprom_ch_array = iwl_eeprom_band_3;
break;
case 4: /* 5.5GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
*eeprom_ch_array = iwl_eeprom_band_4;
break;
case 5: /* 5.7GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
*eeprom_ch_array = iwl_eeprom_band_5;
break;
case 6: /* 2.4GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
*eeprom_ch_array = iwl_eeprom_band_6;
break;
case 7: /* 5 GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
*eeprom_ch_array = iwl_eeprom_band_7;
break;
default:
*eeprom_ch_count = 0;
*eeprom_ch_array = NULL;
WARN_ON(1);
}
}
#define CHECK_AND_PRINT(x) \
((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
static void iwl_mod_ht40_chan_info(struct device *dev,
struct iwl_nvm_data *data, int n_channels,
enum nl80211_band band, u16 channel,
const struct iwl_eeprom_channel *eeprom_ch,
u8 clear_ht40_extension_channel)
{
struct ieee80211_channel *chan = NULL;
int i;
for (i = 0; i < n_channels; i++) {
if (data->channels[i].band != band)
continue;
if (data->channels[i].hw_value != channel)
continue;
chan = &data->channels[i];
break;
}
if (!chan)
return;
IWL_DEBUG_EEPROM(dev,
"HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
channel,
band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
CHECK_AND_PRINT(IBSS),
CHECK_AND_PRINT(ACTIVE),
CHECK_AND_PRINT(RADAR),
CHECK_AND_PRINT(WIDE),
CHECK_AND_PRINT(DFS),
eeprom_ch->flags,
eeprom_ch->max_power_avg,
((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
!(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? ""
: "not ");
if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
chan->flags &= ~clear_ht40_extension_channel;
}
#define CHECK_AND_PRINT_I(x) \
((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
{
int band, ch_idx;
const struct iwl_eeprom_channel *eeprom_ch_info;
const u8 *eeprom_ch_array;
int eeprom_ch_count;
int n_channels = 0;
/*
* Loop through the 5 EEPROM bands and add them to the parse list
*/
for (band = 1; band <= 5; band++) {
struct ieee80211_channel *channel;
iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
&eeprom_ch_count, &eeprom_ch_info,
&eeprom_ch_array);
/* Loop through each band adding each of the channels */
for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
const struct iwl_eeprom_channel *eeprom_ch;
eeprom_ch = &eeprom_ch_info[ch_idx];
if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
IWL_DEBUG_EEPROM(dev,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
eeprom_ch_array[ch_idx],
eeprom_ch_info[ch_idx].flags,
(band != 1) ? "5.2" : "2.4");
continue;
}
channel = &data->channels[n_channels];
n_channels++;
channel->hw_value = eeprom_ch_array[ch_idx];
channel->band = (band == 1) ? NL80211_BAND_2GHZ
: NL80211_BAND_5GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
/* set no-HT40, will enable as appropriate later */
channel->flags = IEEE80211_CHAN_NO_HT40;
if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
channel->flags |= IEEE80211_CHAN_NO_IR;
if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
channel->flags |= IEEE80211_CHAN_NO_IR;
if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
channel->flags |= IEEE80211_CHAN_RADAR;
/* Initialize regulatory-based run-time data */
channel->max_power =
eeprom_ch_info[ch_idx].max_power_avg;
IWL_DEBUG_EEPROM(dev,
"Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
channel->hw_value,
(band != 1) ? "5.2" : "2.4",
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
CHECK_AND_PRINT_I(ACTIVE),
CHECK_AND_PRINT_I(RADAR),
CHECK_AND_PRINT_I(WIDE),
CHECK_AND_PRINT_I(DFS),
eeprom_ch_info[ch_idx].flags,
eeprom_ch_info[ch_idx].max_power_avg,
((eeprom_ch_info[ch_idx].flags &
EEPROM_CHANNEL_IBSS) &&
!(eeprom_ch_info[ch_idx].flags &
EEPROM_CHANNEL_RADAR))
? "" : "not ");
}
}
if (cfg->eeprom_params->enhanced_txpower) {
/*
* for newer device (6000 series and up)
* EEPROM contain enhanced tx power information
* driver need to process addition information
* to determine the max channel tx power limits
*/
iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size,
n_channels);
} else {
/* All others use data from channel map */
int i;
data->max_tx_pwr_half_dbm = -128;
for (i = 0; i < n_channels; i++)
data->max_tx_pwr_half_dbm =
max_t(s8, data->max_tx_pwr_half_dbm,
data->channels[i].max_power * 2);
}
/* Check if we do have HT40 channels */
if (cfg->eeprom_params->regulatory_bands[5] ==
EEPROM_REGULATORY_BAND_NO_HT40 &&
cfg->eeprom_params->regulatory_bands[6] ==
EEPROM_REGULATORY_BAND_NO_HT40)
return n_channels;
/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
for (band = 6; band <= 7; band++) {
enum nl80211_band ieeeband;
iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
&eeprom_ch_count, &eeprom_ch_info,
&eeprom_ch_array);
/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
ieeeband = (band == 6) ? NL80211_BAND_2GHZ
: NL80211_BAND_5GHZ;
/* Loop through each band adding each of the channels */
for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
/* Set up driver's info for lower half */
iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
eeprom_ch_array[ch_idx],
&eeprom_ch_info[ch_idx],
IEEE80211_CHAN_NO_HT40PLUS);
/* Set up driver's info for upper half */
iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
eeprom_ch_array[ch_idx] + 4,
&eeprom_ch_info[ch_idx],
IEEE80211_CHAN_NO_HT40MINUS);
}
}
return n_channels;
}
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
int n_channels, enum nl80211_band band)
{
struct ieee80211_channel *chan = &data->channels[0];
int n = 0, idx = 0;
while (idx < n_channels && chan->band != band)
chan = &data->channels[++idx];
sband->channels = &data->channels[idx];
while (idx < n_channels && chan->band == band) {
chan = &data->channels[++idx];
n++;
}
sband->n_channels = n;
return n;
}
#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_ht_cap *ht_info,
enum nl80211_band band,
u8 tx_chains, u8 rx_chains)
{
int max_bit_rate = 0;
tx_chains = hweight8(tx_chains);
if (cfg->rx_with_siso_diversity)
rx_chains = 1;
else
rx_chains = hweight8(rx_chains);
if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
ht_info->ht_supported = false;
return;
}
if (data->sku_cap_mimo_disabled)
rx_chains = 1;
ht_info->ht_supported = true;
ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
if (cfg->ht_params->stbc) {
ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
if (tx_chains > 1)
ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
}
if (cfg->ht_params->ldpc)
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
if ((cfg->mq_rx_supported &&
iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
ht_info->mcs.rx_mask[0] = 0xFF;
if (rx_chains >= 2)
ht_info->mcs.rx_mask[1] = 0xFF;
if (rx_chains >= 3)
ht_info->mcs.rx_mask[2] = 0xFF;
if (cfg->ht_params->ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
max_bit_rate = MAX_BIT_RATE_20_MHZ;
if (cfg->ht_params->ht40_bands & BIT(band)) {
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
max_bit_rate = MAX_BIT_RATE_40_MHZ;
}
/* Highest supported Rx data rate */
max_bit_rate *= rx_chains;
WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
/* Tx MCS capabilities */
ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
if (tx_chains != rx_chains) {
ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
ht_info->mcs.tx_params |= ((tx_chains - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
}
}
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
{
int n_channels = iwl_init_channel_map(dev, cfg, data,
eeprom, eeprom_size);
int n_used = 0;
struct ieee80211_supported_band *sband;
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
sband->n_bitrates = N_RATES_24;
n_used += iwl_init_sband_channels(data, sband, n_channels,
NL80211_BAND_2GHZ);
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
data->valid_tx_ant, data->valid_rx_ant);
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
sband->n_bitrates = N_RATES_52;
n_used += iwl_init_sband_channels(data, sband, n_channels,
NL80211_BAND_5GHZ);
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
data->valid_tx_ant, data->valid_rx_ant);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
n_used, n_channels);
}
/* EEPROM data functions */
struct iwl_nvm_data *
iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
const u8 *eeprom, size_t eeprom_size)
{
struct iwl_nvm_data *data;
const void *tmp;
u16 radio_cfg, sku;
if (WARN_ON(!cfg || !cfg->eeprom_params))
return NULL;
data = kzalloc(sizeof(*data) +
sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
GFP_KERNEL);
if (!data)
return NULL;
/* get MAC address(es) */
tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
if (!tmp)
goto err_free;
memcpy(data->hw_addr, tmp, ETH_ALEN);
data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_NUM_MAC_ADDRESS);
if (iwl_eeprom_read_calib(eeprom, eeprom_size, data))
goto err_free;
tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
if (!tmp)
goto err_free;
memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
EEPROM_RAW_TEMPERATURE);
if (!tmp)
goto err_free;
data->raw_temperature = *(__le16 *)tmp;
tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
EEPROM_KELVIN_TEMPERATURE);
if (!tmp)
goto err_free;
data->kelvin_temperature = *(__le16 *)tmp;
data->kelvin_voltage = *((__le16 *)tmp + 1);
radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_RADIO_CONFIG);
data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg);
data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
sku = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_SKU_CAP);
data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
data->sku_cap_band_24GHz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52GHz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
data->sku_cap_11n_enable = false;
data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size,
EEPROM_VERSION);
/* check overrides (some devices have wrong EEPROM) */
if (cfg->valid_tx_ant)
data->valid_tx_ant = cfg->valid_tx_ant;
if (cfg->valid_rx_ant)
data->valid_rx_ant = cfg->valid_rx_ant;
if (!data->valid_tx_ant || !data->valid_rx_ant) {
IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
data->valid_tx_ant, data->valid_rx_ant);
goto err_free;
}
iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
return data;
err_free:
kfree(data);
return NULL;
}
IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
/* helper functions */
int iwl_nvm_check_version(struct iwl_nvm_data *data,
struct iwl_trans *trans)
{
if (data->nvm_version >= trans->cfg->nvm_ver ||
data->calib_version >= trans->cfg->nvm_calib_ver) {
IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
data->nvm_version, data->calib_version);
return 0;
}
IWL_ERR(trans,
"Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
data->nvm_version, trans->cfg->nvm_ver,
data->calib_version, trans->cfg->nvm_calib_ver);
return -EINVAL;
}
IWL_EXPORT_SYMBOL(iwl_nvm_check_version);
| gpl-2.0 |
liudanking/linux-kernel | drivers/tty/synclink.c | 846 | 235362 | /*
* $Id: synclink.c,v 4.38 2005/11/07 16:30:34 paulkf Exp $
*
* Device driver for Microgate SyncLink ISA and PCI
* high speed multiprotocol serial adapters.
*
* written by Paul Fulghum for Microgate Corporation
* paulkf@microgate.com
*
* Microgate and SyncLink are trademarks of Microgate Corporation
*
* Derived from serial.c written by Theodore Ts'o and Linus Torvalds
*
* Original release 01/11/99
*
* This code is released under the GNU General Public License (GPL)
*
* This driver is primarily intended for use in synchronous
* HDLC mode. Asynchronous mode is also provided.
*
* When operating in synchronous mode, each call to mgsl_write()
* contains exactly one complete HDLC frame. Calling mgsl_put_char
* will start assembling an HDLC frame that will not be sent until
* mgsl_flush_chars or mgsl_write is called.
*
* Synchronous receive data is reported as complete frames. To accomplish
* this, the TTY flip buffer is bypassed (too small to hold largest
* frame and may fragment frames) and the line discipline
* receive entry point is called directly.
*
* This driver has been tested with a slightly modified ppp.c driver
* for synchronous PPP.
*
* 2000/02/16
* Added interface for syncppp.c driver (an alternate synchronous PPP
* implementation that also supports Cisco HDLC). Each device instance
* registers as a tty device AND a network device (if dosyncppp option
* is set for the device). The functionality is determined by which
* device interface is opened.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#if defined(__i386__)
# define BREAKPOINT() asm(" int $3");
#else
# define BREAKPOINT() { }
#endif
#define MAX_ISA_DEVICES 10
#define MAX_PCI_DEVICES 10
#define MAX_TOTAL_DEVICES 20
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/synclink.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/dma.h>
#include <linux/bitops.h>
#include <asm/types.h>
#include <linux/termios.h>
#include <linux/workqueue.h>
#include <linux/hdlc.h>
#include <linux/dma-mapping.h>
#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_MODULE))
#define SYNCLINK_GENERIC_HDLC 1
#else
#define SYNCLINK_GENERIC_HDLC 0
#endif
#define GET_USER(error,value,addr) error = get_user(value,addr)
#define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0
#define PUT_USER(error,value,addr) error = put_user(value,addr)
#define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0
#include <asm/uaccess.h>
#define RCLRVALUE 0xffff
static MGSL_PARAMS default_params = {
MGSL_MODE_HDLC, /* unsigned long mode */
0, /* unsigned char loopback; */
HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */
HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */
0, /* unsigned long clock_speed; */
0xff, /* unsigned char addr_filter; */
HDLC_CRC_16_CCITT, /* unsigned short crc_type; */
HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */
HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */
9600, /* unsigned long data_rate; */
8, /* unsigned char data_bits; */
1, /* unsigned char stop_bits; */
ASYNC_PARITY_NONE /* unsigned char parity; */
};
#define SHARED_MEM_ADDRESS_SIZE 0x40000
#define BUFFERLISTSIZE 4096
#define DMABUFFERSIZE 4096
#define MAXRXFRAMES 7
typedef struct _DMABUFFERENTRY
{
u32 phys_addr; /* 32-bit flat physical address of data buffer */
volatile u16 count; /* buffer size/data count */
volatile u16 status; /* Control/status field */
volatile u16 rcc; /* character count field */
u16 reserved; /* padding required by 16C32 */
u32 link; /* 32-bit flat link to next buffer entry */
char *virt_addr; /* virtual address of data buffer */
u32 phys_entry; /* physical address of this buffer entry */
dma_addr_t dma_addr;
} DMABUFFERENTRY, *DMAPBUFFERENTRY;
/* The queue of BH actions to be performed */
#define BH_RECEIVE 1
#define BH_TRANSMIT 2
#define BH_STATUS 4
#define IO_PIN_SHUTDOWN_LIMIT 100
struct _input_signal_events {
int ri_up;
int ri_down;
int dsr_up;
int dsr_down;
int dcd_up;
int dcd_down;
int cts_up;
int cts_down;
};
/* transmit holding buffer definitions*/
#define MAX_TX_HOLDING_BUFFERS 5
struct tx_holding_buffer {
int buffer_size;
unsigned char * buffer;
};
/*
* Device instance data structure
*/
struct mgsl_struct {
int magic;
struct tty_port port;
int line;
int hw_version;
struct mgsl_icount icount;
int timeout;
int x_char; /* xon/xoff character */
u16 read_status_mask;
u16 ignore_status_mask;
unsigned char *xmit_buf;
int xmit_head;
int xmit_tail;
int xmit_cnt;
wait_queue_head_t status_event_wait_q;
wait_queue_head_t event_wait_q;
struct timer_list tx_timer; /* HDLC transmit timeout timer */
struct mgsl_struct *next_device; /* device list link */
spinlock_t irq_spinlock; /* spinlock for synchronizing with ISR */
struct work_struct task; /* task structure for scheduling bh */
u32 EventMask; /* event trigger mask */
u32 RecordedEvents; /* pending events */
u32 max_frame_size; /* as set by device config */
u32 pending_bh;
bool bh_running; /* Protection from multiple */
int isr_overflow;
bool bh_requested;
int dcd_chkcount; /* check counts to prevent */
int cts_chkcount; /* too many IRQs if a signal */
int dsr_chkcount; /* is floating */
int ri_chkcount;
char *buffer_list; /* virtual address of Rx & Tx buffer lists */
u32 buffer_list_phys;
dma_addr_t buffer_list_dma_addr;
unsigned int rx_buffer_count; /* count of total allocated Rx buffers */
DMABUFFERENTRY *rx_buffer_list; /* list of receive buffer entries */
unsigned int current_rx_buffer;
int num_tx_dma_buffers; /* number of tx dma frames required */
int tx_dma_buffers_used;
unsigned int tx_buffer_count; /* count of total allocated Tx buffers */
DMABUFFERENTRY *tx_buffer_list; /* list of transmit buffer entries */
int start_tx_dma_buffer; /* tx dma buffer to start tx dma operation */
int current_tx_buffer; /* next tx dma buffer to be loaded */
unsigned char *intermediate_rxbuffer;
int num_tx_holding_buffers; /* number of tx holding buffer allocated */
int get_tx_holding_index; /* next tx holding buffer for adapter to load */
int put_tx_holding_index; /* next tx holding buffer to store user request */
int tx_holding_count; /* number of tx holding buffers waiting */
struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
bool rx_enabled;
bool rx_overflow;
bool rx_rcc_underrun;
bool tx_enabled;
bool tx_active;
u32 idle_mode;
u16 cmr_value;
u16 tcsr_value;
char device_name[25]; /* device instance name */
unsigned int bus_type; /* expansion bus type (ISA,EISA,PCI) */
unsigned char bus; /* expansion bus number (zero based) */
unsigned char function; /* PCI device number */
unsigned int io_base; /* base I/O address of adapter */
unsigned int io_addr_size; /* size of the I/O address range */
bool io_addr_requested; /* true if I/O address requested */
unsigned int irq_level; /* interrupt level */
unsigned long irq_flags;
bool irq_requested; /* true if IRQ requested */
unsigned int dma_level; /* DMA channel */
bool dma_requested; /* true if dma channel requested */
u16 mbre_bit;
u16 loopback_bits;
u16 usc_idle_mode;
MGSL_PARAMS params; /* communications parameters */
unsigned char serial_signals; /* current serial signal states */
bool irq_occurred; /* for diagnostics use */
unsigned int init_error; /* Initialization startup error (DIAGS) */
int fDiagnosticsmode; /* Driver in Diagnostic mode? (DIAGS) */
u32 last_mem_alloc;
unsigned char* memory_base; /* shared memory address (PCI only) */
u32 phys_memory_base;
bool shared_mem_requested;
unsigned char* lcr_base; /* local config registers (PCI only) */
u32 phys_lcr_base;
u32 lcr_offset;
bool lcr_mem_requested;
u32 misc_ctrl_value;
char *flag_buf;
bool drop_rts_on_tx_done;
bool loopmode_insert_requested;
bool loopmode_send_done_requested;
struct _input_signal_events input_signal_events;
/* generic HDLC device parts */
int netcount;
spinlock_t netlock;
#if SYNCLINK_GENERIC_HDLC
struct net_device *netdev;
#endif
};
#define MGSL_MAGIC 0x5401
/*
* The size of the serial xmit buffer is 1 page, or 4096 bytes
*/
#ifndef SERIAL_XMIT_SIZE
#define SERIAL_XMIT_SIZE 4096
#endif
/*
* These macros define the offsets used in calculating the
* I/O address of the specified USC registers.
*/
#define DCPIN 2 /* Bit 1 of I/O address */
#define SDPIN 4 /* Bit 2 of I/O address */
#define DCAR 0 /* DMA command/address register */
#define CCAR SDPIN /* channel command/address register */
#define DATAREG DCPIN + SDPIN /* serial data register */
#define MSBONLY 0x41
#define LSBONLY 0x40
/*
* These macros define the register address (ordinal number)
* used for writing address/value pairs to the USC.
*/
#define CMR 0x02 /* Channel mode Register */
#define CCSR 0x04 /* Channel Command/status Register */
#define CCR 0x06 /* Channel Control Register */
#define PSR 0x08 /* Port status Register */
#define PCR 0x0a /* Port Control Register */
#define TMDR 0x0c /* Test mode Data Register */
#define TMCR 0x0e /* Test mode Control Register */
#define CMCR 0x10 /* Clock mode Control Register */
#define HCR 0x12 /* Hardware Configuration Register */
#define IVR 0x14 /* Interrupt Vector Register */
#define IOCR 0x16 /* Input/Output Control Register */
#define ICR 0x18 /* Interrupt Control Register */
#define DCCR 0x1a /* Daisy Chain Control Register */
#define MISR 0x1c /* Misc Interrupt status Register */
#define SICR 0x1e /* status Interrupt Control Register */
#define RDR 0x20 /* Receive Data Register */
#define RMR 0x22 /* Receive mode Register */
#define RCSR 0x24 /* Receive Command/status Register */
#define RICR 0x26 /* Receive Interrupt Control Register */
#define RSR 0x28 /* Receive Sync Register */
#define RCLR 0x2a /* Receive count Limit Register */
#define RCCR 0x2c /* Receive Character count Register */
#define TC0R 0x2e /* Time Constant 0 Register */
#define TDR 0x30 /* Transmit Data Register */
#define TMR 0x32 /* Transmit mode Register */
#define TCSR 0x34 /* Transmit Command/status Register */
#define TICR 0x36 /* Transmit Interrupt Control Register */
#define TSR 0x38 /* Transmit Sync Register */
#define TCLR 0x3a /* Transmit count Limit Register */
#define TCCR 0x3c /* Transmit Character count Register */
#define TC1R 0x3e /* Time Constant 1 Register */
/*
* MACRO DEFINITIONS FOR DMA REGISTERS
*/
#define DCR 0x06 /* DMA Control Register (shared) */
#define DACR 0x08 /* DMA Array count Register (shared) */
#define BDCR 0x12 /* Burst/Dwell Control Register (shared) */
#define DIVR 0x14 /* DMA Interrupt Vector Register (shared) */
#define DICR 0x18 /* DMA Interrupt Control Register (shared) */
#define CDIR 0x1a /* Clear DMA Interrupt Register (shared) */
#define SDIR 0x1c /* Set DMA Interrupt Register (shared) */
#define TDMR 0x02 /* Transmit DMA mode Register */
#define TDIAR 0x1e /* Transmit DMA Interrupt Arm Register */
#define TBCR 0x2a /* Transmit Byte count Register */
#define TARL 0x2c /* Transmit Address Register (low) */
#define TARU 0x2e /* Transmit Address Register (high) */
#define NTBCR 0x3a /* Next Transmit Byte count Register */
#define NTARL 0x3c /* Next Transmit Address Register (low) */
#define NTARU 0x3e /* Next Transmit Address Register (high) */
#define RDMR 0x82 /* Receive DMA mode Register (non-shared) */
#define RDIAR 0x9e /* Receive DMA Interrupt Arm Register */
#define RBCR 0xaa /* Receive Byte count Register */
#define RARL 0xac /* Receive Address Register (low) */
#define RARU 0xae /* Receive Address Register (high) */
#define NRBCR 0xba /* Next Receive Byte count Register */
#define NRARL 0xbc /* Next Receive Address Register (low) */
#define NRARU 0xbe /* Next Receive Address Register (high) */
/*
* MACRO DEFINITIONS FOR MODEM STATUS BITS
*/
#define MODEMSTATUS_DTR 0x80
#define MODEMSTATUS_DSR 0x40
#define MODEMSTATUS_RTS 0x20
#define MODEMSTATUS_CTS 0x10
#define MODEMSTATUS_RI 0x04
#define MODEMSTATUS_DCD 0x01
/*
* Channel Command/Address Register (CCAR) Command Codes
*/
#define RTCmd_Null 0x0000
#define RTCmd_ResetHighestIus 0x1000
#define RTCmd_TriggerChannelLoadDma 0x2000
#define RTCmd_TriggerRxDma 0x2800
#define RTCmd_TriggerTxDma 0x3000
#define RTCmd_TriggerRxAndTxDma 0x3800
#define RTCmd_PurgeRxFifo 0x4800
#define RTCmd_PurgeTxFifo 0x5000
#define RTCmd_PurgeRxAndTxFifo 0x5800
#define RTCmd_LoadRcc 0x6800
#define RTCmd_LoadTcc 0x7000
#define RTCmd_LoadRccAndTcc 0x7800
#define RTCmd_LoadTC0 0x8800
#define RTCmd_LoadTC1 0x9000
#define RTCmd_LoadTC0AndTC1 0x9800
#define RTCmd_SerialDataLSBFirst 0xa000
#define RTCmd_SerialDataMSBFirst 0xa800
#define RTCmd_SelectBigEndian 0xb000
#define RTCmd_SelectLittleEndian 0xb800
/*
* DMA Command/Address Register (DCAR) Command Codes
*/
#define DmaCmd_Null 0x0000
#define DmaCmd_ResetTxChannel 0x1000
#define DmaCmd_ResetRxChannel 0x1200
#define DmaCmd_StartTxChannel 0x2000
#define DmaCmd_StartRxChannel 0x2200
#define DmaCmd_ContinueTxChannel 0x3000
#define DmaCmd_ContinueRxChannel 0x3200
#define DmaCmd_PauseTxChannel 0x4000
#define DmaCmd_PauseRxChannel 0x4200
#define DmaCmd_AbortTxChannel 0x5000
#define DmaCmd_AbortRxChannel 0x5200
#define DmaCmd_InitTxChannel 0x7000
#define DmaCmd_InitRxChannel 0x7200
#define DmaCmd_ResetHighestDmaIus 0x8000
#define DmaCmd_ResetAllChannels 0x9000
#define DmaCmd_StartAllChannels 0xa000
#define DmaCmd_ContinueAllChannels 0xb000
#define DmaCmd_PauseAllChannels 0xc000
#define DmaCmd_AbortAllChannels 0xd000
#define DmaCmd_InitAllChannels 0xf000
#define TCmd_Null 0x0000
#define TCmd_ClearTxCRC 0x2000
#define TCmd_SelectTicrTtsaData 0x4000
#define TCmd_SelectTicrTxFifostatus 0x5000
#define TCmd_SelectTicrIntLevel 0x6000
#define TCmd_SelectTicrdma_level 0x7000
#define TCmd_SendFrame 0x8000
#define TCmd_SendAbort 0x9000
#define TCmd_EnableDleInsertion 0xc000
#define TCmd_DisableDleInsertion 0xd000
#define TCmd_ClearEofEom 0xe000
#define TCmd_SetEofEom 0xf000
#define RCmd_Null 0x0000
#define RCmd_ClearRxCRC 0x2000
#define RCmd_EnterHuntmode 0x3000
#define RCmd_SelectRicrRtsaData 0x4000
#define RCmd_SelectRicrRxFifostatus 0x5000
#define RCmd_SelectRicrIntLevel 0x6000
#define RCmd_SelectRicrdma_level 0x7000
/*
* Bits for enabling and disabling IRQs in Interrupt Control Register (ICR)
*/
#define RECEIVE_STATUS BIT5
#define RECEIVE_DATA BIT4
#define TRANSMIT_STATUS BIT3
#define TRANSMIT_DATA BIT2
#define IO_PIN BIT1
#define MISC BIT0
/*
* Receive status Bits in Receive Command/status Register RCSR
*/
#define RXSTATUS_SHORT_FRAME BIT8
#define RXSTATUS_CODE_VIOLATION BIT8
#define RXSTATUS_EXITED_HUNT BIT7
#define RXSTATUS_IDLE_RECEIVED BIT6
#define RXSTATUS_BREAK_RECEIVED BIT5
#define RXSTATUS_ABORT_RECEIVED BIT5
#define RXSTATUS_RXBOUND BIT4
#define RXSTATUS_CRC_ERROR BIT3
#define RXSTATUS_FRAMING_ERROR BIT3
#define RXSTATUS_ABORT BIT2
#define RXSTATUS_PARITY_ERROR BIT2
#define RXSTATUS_OVERRUN BIT1
#define RXSTATUS_DATA_AVAILABLE BIT0
#define RXSTATUS_ALL 0x01f6
#define usc_UnlatchRxstatusBits(a,b) usc_OutReg( (a), RCSR, (u16)((b) & RXSTATUS_ALL) )
/*
* Values for setting transmit idle mode in
* Transmit Control/status Register (TCSR)
*/
#define IDLEMODE_FLAGS 0x0000
#define IDLEMODE_ALT_ONE_ZERO 0x0100
#define IDLEMODE_ZERO 0x0200
#define IDLEMODE_ONE 0x0300
#define IDLEMODE_ALT_MARK_SPACE 0x0500
#define IDLEMODE_SPACE 0x0600
#define IDLEMODE_MARK 0x0700
#define IDLEMODE_MASK 0x0700
/*
* IUSC revision identifiers
*/
#define IUSC_SL1660 0x4d44
#define IUSC_PRE_SL1660 0x4553
/*
* Transmit status Bits in Transmit Command/status Register (TCSR)
*/
#define TCSR_PRESERVE 0x0F00
#define TCSR_UNDERWAIT BIT11
#define TXSTATUS_PREAMBLE_SENT BIT7
#define TXSTATUS_IDLE_SENT BIT6
#define TXSTATUS_ABORT_SENT BIT5
#define TXSTATUS_EOF_SENT BIT4
#define TXSTATUS_EOM_SENT BIT4
#define TXSTATUS_CRC_SENT BIT3
#define TXSTATUS_ALL_SENT BIT2
#define TXSTATUS_UNDERRUN BIT1
#define TXSTATUS_FIFO_EMPTY BIT0
#define TXSTATUS_ALL 0x00fa
#define usc_UnlatchTxstatusBits(a,b) usc_OutReg( (a), TCSR, (u16)((a)->tcsr_value + ((b) & 0x00FF)) )
#define MISCSTATUS_RXC_LATCHED BIT15
#define MISCSTATUS_RXC BIT14
#define MISCSTATUS_TXC_LATCHED BIT13
#define MISCSTATUS_TXC BIT12
#define MISCSTATUS_RI_LATCHED BIT11
#define MISCSTATUS_RI BIT10
#define MISCSTATUS_DSR_LATCHED BIT9
#define MISCSTATUS_DSR BIT8
#define MISCSTATUS_DCD_LATCHED BIT7
#define MISCSTATUS_DCD BIT6
#define MISCSTATUS_CTS_LATCHED BIT5
#define MISCSTATUS_CTS BIT4
#define MISCSTATUS_RCC_UNDERRUN BIT3
#define MISCSTATUS_DPLL_NO_SYNC BIT2
#define MISCSTATUS_BRG1_ZERO BIT1
#define MISCSTATUS_BRG0_ZERO BIT0
#define usc_UnlatchIostatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0xaaa0))
#define usc_UnlatchMiscstatusBits(a,b) usc_OutReg((a),MISR,(u16)((b) & 0x000f))
#define SICR_RXC_ACTIVE BIT15
#define SICR_RXC_INACTIVE BIT14
#define SICR_RXC (BIT15|BIT14)
#define SICR_TXC_ACTIVE BIT13
#define SICR_TXC_INACTIVE BIT12
#define SICR_TXC (BIT13|BIT12)
#define SICR_RI_ACTIVE BIT11
#define SICR_RI_INACTIVE BIT10
#define SICR_RI (BIT11|BIT10)
#define SICR_DSR_ACTIVE BIT9
#define SICR_DSR_INACTIVE BIT8
#define SICR_DSR (BIT9|BIT8)
#define SICR_DCD_ACTIVE BIT7
#define SICR_DCD_INACTIVE BIT6
#define SICR_DCD (BIT7|BIT6)
#define SICR_CTS_ACTIVE BIT5
#define SICR_CTS_INACTIVE BIT4
#define SICR_CTS (BIT5|BIT4)
#define SICR_RCC_UNDERFLOW BIT3
#define SICR_DPLL_NO_SYNC BIT2
#define SICR_BRG1_ZERO BIT1
#define SICR_BRG0_ZERO BIT0
void usc_DisableMasterIrqBit( struct mgsl_struct *info );
void usc_EnableMasterIrqBit( struct mgsl_struct *info );
void usc_EnableInterrupts( struct mgsl_struct *info, u16 IrqMask );
void usc_DisableInterrupts( struct mgsl_struct *info, u16 IrqMask );
void usc_ClearIrqPendingBits( struct mgsl_struct *info, u16 IrqMask );
#define usc_EnableInterrupts( a, b ) \
usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0xc0 + (b)) )
#define usc_DisableInterrupts( a, b ) \
usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0xff00) + 0x80 + (b)) )
#define usc_EnableMasterIrqBit(a) \
usc_OutReg( (a), ICR, (u16)((usc_InReg((a),ICR) & 0x0f00) + 0xb000) )
#define usc_DisableMasterIrqBit(a) \
usc_OutReg( (a), ICR, (u16)(usc_InReg((a),ICR) & 0x7f00) )
#define usc_ClearIrqPendingBits( a, b ) usc_OutReg( (a), DCCR, 0x40 + (b) )
/*
* Transmit status Bits in Transmit Control status Register (TCSR)
* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0)
*/
#define TXSTATUS_PREAMBLE_SENT BIT7
#define TXSTATUS_IDLE_SENT BIT6
#define TXSTATUS_ABORT_SENT BIT5
#define TXSTATUS_EOF BIT4
#define TXSTATUS_CRC_SENT BIT3
#define TXSTATUS_ALL_SENT BIT2
#define TXSTATUS_UNDERRUN BIT1
#define TXSTATUS_FIFO_EMPTY BIT0
#define DICR_MASTER BIT15
#define DICR_TRANSMIT BIT0
#define DICR_RECEIVE BIT1
#define usc_EnableDmaInterrupts(a,b) \
usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) | (b)) )
#define usc_DisableDmaInterrupts(a,b) \
usc_OutDmaReg( (a), DICR, (u16)(usc_InDmaReg((a),DICR) & ~(b)) )
#define usc_EnableStatusIrqs(a,b) \
usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) | (b)) )
#define usc_DisablestatusIrqs(a,b) \
usc_OutReg( (a), SICR, (u16)(usc_InReg((a),SICR) & ~(b)) )
/* Transmit status Bits in Transmit Control status Register (TCSR) */
/* and Transmit Interrupt Control Register (TICR) (except BIT2, BIT0) */
#define DISABLE_UNCONDITIONAL 0
#define DISABLE_END_OF_FRAME 1
#define ENABLE_UNCONDITIONAL 2
#define ENABLE_AUTO_CTS 3
#define ENABLE_AUTO_DCD 3
#define usc_EnableTransmitter(a,b) \
usc_OutReg( (a), TMR, (u16)((usc_InReg((a),TMR) & 0xfffc) | (b)) )
#define usc_EnableReceiver(a,b) \
usc_OutReg( (a), RMR, (u16)((usc_InReg((a),RMR) & 0xfffc) | (b)) )
static u16 usc_InDmaReg( struct mgsl_struct *info, u16 Port );
static void usc_OutDmaReg( struct mgsl_struct *info, u16 Port, u16 Value );
static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd );
static u16 usc_InReg( struct mgsl_struct *info, u16 Port );
static void usc_OutReg( struct mgsl_struct *info, u16 Port, u16 Value );
static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd );
void usc_RCmd( struct mgsl_struct *info, u16 Cmd );
void usc_TCmd( struct mgsl_struct *info, u16 Cmd );
#define usc_TCmd(a,b) usc_OutReg((a), TCSR, (u16)((a)->tcsr_value + (b)))
#define usc_RCmd(a,b) usc_OutReg((a), RCSR, (b))
#define usc_SetTransmitSyncChars(a,s0,s1) usc_OutReg((a), TSR, (u16)(((u16)s0<<8)|(u16)s1))
static void usc_process_rxoverrun_sync( struct mgsl_struct *info );
static void usc_start_receiver( struct mgsl_struct *info );
static void usc_stop_receiver( struct mgsl_struct *info );
static void usc_start_transmitter( struct mgsl_struct *info );
static void usc_stop_transmitter( struct mgsl_struct *info );
static void usc_set_txidle( struct mgsl_struct *info );
static void usc_load_txfifo( struct mgsl_struct *info );
static void usc_enable_aux_clock( struct mgsl_struct *info, u32 DataRate );
static void usc_enable_loopback( struct mgsl_struct *info, int enable );
static void usc_get_serial_signals( struct mgsl_struct *info );
static void usc_set_serial_signals( struct mgsl_struct *info );
static void usc_reset( struct mgsl_struct *info );
static void usc_set_sync_mode( struct mgsl_struct *info );
static void usc_set_sdlc_mode( struct mgsl_struct *info );
static void usc_set_async_mode( struct mgsl_struct *info );
static void usc_enable_async_clock( struct mgsl_struct *info, u32 DataRate );
static void usc_loopback_frame( struct mgsl_struct *info );
static void mgsl_tx_timeout(unsigned long context);
static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
static void usc_loopmode_insert_request( struct mgsl_struct * info );
static int usc_loopmode_active( struct mgsl_struct * info);
static void usc_loopmode_send_done( struct mgsl_struct * info );
static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg);
#if SYNCLINK_GENERIC_HDLC
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
static void hdlcdev_tx_done(struct mgsl_struct *info);
static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size);
static int hdlcdev_init(struct mgsl_struct *info);
static void hdlcdev_exit(struct mgsl_struct *info);
#endif
/*
* Defines a BUS descriptor value for the PCI adapter
* local bus address ranges.
*/
#define BUS_DESCRIPTOR( WrHold, WrDly, RdDly, Nwdd, Nwad, Nxda, Nrdd, Nrad ) \
(0x00400020 + \
((WrHold) << 30) + \
((WrDly) << 28) + \
((RdDly) << 26) + \
((Nwdd) << 20) + \
((Nwad) << 15) + \
((Nxda) << 13) + \
((Nrdd) << 11) + \
((Nrad) << 6) )
static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit);
/*
* Adapter diagnostic routines
*/
static bool mgsl_register_test( struct mgsl_struct *info );
static bool mgsl_irq_test( struct mgsl_struct *info );
static bool mgsl_dma_test( struct mgsl_struct *info );
static bool mgsl_memory_test( struct mgsl_struct *info );
static int mgsl_adapter_test( struct mgsl_struct *info );
/*
* device and resource management routines
*/
static int mgsl_claim_resources(struct mgsl_struct *info);
static void mgsl_release_resources(struct mgsl_struct *info);
static void mgsl_add_device(struct mgsl_struct *info);
static struct mgsl_struct* mgsl_allocate_device(void);
/*
* DMA buffer manupulation functions.
*/
static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
static bool mgsl_get_rx_frame( struct mgsl_struct *info );
static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
static int num_free_tx_dma_buffers(struct mgsl_struct *info);
static void mgsl_load_tx_dma_buffer( struct mgsl_struct *info, const char *Buffer, unsigned int BufferSize);
static void mgsl_load_pci_memory(char* TargetPtr, const char* SourcePtr, unsigned short count);
/*
* DMA and Shared Memory buffer allocation and formatting
*/
static int mgsl_allocate_dma_buffers(struct mgsl_struct *info);
static void mgsl_free_dma_buffers(struct mgsl_struct *info);
static int mgsl_alloc_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList,int Buffercount);
static int mgsl_alloc_buffer_list_memory(struct mgsl_struct *info);
static void mgsl_free_buffer_list_memory(struct mgsl_struct *info);
static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
/*
* Bottom half interrupt handlers
*/
static void mgsl_bh_handler(struct work_struct *work);
static void mgsl_bh_receive(struct mgsl_struct *info);
static void mgsl_bh_transmit(struct mgsl_struct *info);
static void mgsl_bh_status(struct mgsl_struct *info);
/*
* Interrupt handler routines and dispatch table.
*/
static void mgsl_isr_null( struct mgsl_struct *info );
static void mgsl_isr_transmit_data( struct mgsl_struct *info );
static void mgsl_isr_receive_data( struct mgsl_struct *info );
static void mgsl_isr_receive_status( struct mgsl_struct *info );
static void mgsl_isr_transmit_status( struct mgsl_struct *info );
static void mgsl_isr_io_pin( struct mgsl_struct *info );
static void mgsl_isr_misc( struct mgsl_struct *info );
static void mgsl_isr_receive_dma( struct mgsl_struct *info );
static void mgsl_isr_transmit_dma( struct mgsl_struct *info );
typedef void (*isr_dispatch_func)(struct mgsl_struct *);
static isr_dispatch_func UscIsrTable[7] =
{
mgsl_isr_null,
mgsl_isr_misc,
mgsl_isr_io_pin,
mgsl_isr_transmit_data,
mgsl_isr_transmit_status,
mgsl_isr_receive_data,
mgsl_isr_receive_status
};
/*
* ioctl call handlers
*/
static int tiocmget(struct tty_struct *tty);
static int tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount
__user *user_icount);
static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params);
static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params);
static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode);
static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode);
static int mgsl_txenable(struct mgsl_struct * info, int enable);
static int mgsl_txabort(struct mgsl_struct * info);
static int mgsl_rxenable(struct mgsl_struct * info, int enable);
static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
static int mgsl_loopmode_send_done( struct mgsl_struct * info );
/* set non-zero on successful registration with PCI subsystem */
static bool pci_registered;
/*
* Global linked list of SyncLink devices
*/
static struct mgsl_struct *mgsl_device_list;
static int mgsl_device_count;
/*
* Set this param to non-zero to load eax with the
* .text section address and breakpoint on module load.
* This is useful for use with gdb and add-symbol-file command.
*/
static bool break_on_load;
/*
* Driver major number, defaults to zero to get auto
* assigned major number. May be forced as module parameter.
*/
static int ttymajor;
/*
* Array of user specified options for ISA adapters.
*/
static int io[MAX_ISA_DEVICES];
static int irq[MAX_ISA_DEVICES];
static int dma[MAX_ISA_DEVICES];
static int debug_level;
static int maxframe[MAX_TOTAL_DEVICES];
static int txdmabufs[MAX_TOTAL_DEVICES];
static int txholdbufs[MAX_TOTAL_DEVICES];
module_param(break_on_load, bool, 0);
module_param(ttymajor, int, 0);
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(dma, int, NULL, 0);
module_param(debug_level, int, 0);
module_param_array(maxframe, int, NULL, 0);
module_param_array(txdmabufs, int, NULL, 0);
module_param_array(txholdbufs, int, NULL, 0);
static char *driver_name = "SyncLink serial driver";
static char *driver_version = "$Revision: 4.38 $";
static int synclink_init_one (struct pci_dev *dev,
const struct pci_device_id *ent);
static void synclink_remove_one (struct pci_dev *dev);
static struct pci_device_id synclink_pci_tbl[] = {
{ PCI_VENDOR_ID_MICROGATE, PCI_DEVICE_ID_MICROGATE_USC, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_MICROGATE, 0x0210, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
MODULE_LICENSE("GPL");
static struct pci_driver synclink_pci_driver = {
.name = "synclink",
.id_table = synclink_pci_tbl,
.probe = synclink_init_one,
.remove = synclink_remove_one,
};
static struct tty_driver *serial_driver;
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
static void mgsl_change_params(struct mgsl_struct *info);
static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout);
/*
* 1st function defined in .text section. Calling this function in
* init_module() followed by a breakpoint allows a remote debugger
* (gdb) to get the .text address for the add-symbol-file command.
* This allows remote debugging of dynamically loadable modules.
*/
static void* mgsl_get_text_ptr(void)
{
return mgsl_get_text_ptr;
}
static inline int mgsl_paranoia_check(struct mgsl_struct *info,
char *name, const char *routine)
{
#ifdef MGSL_PARANOIA_CHECK
static const char *badmagic =
"Warning: bad magic number for mgsl struct (%s) in %s\n";
static const char *badinfo =
"Warning: null mgsl_struct for (%s) in %s\n";
if (!info) {
printk(badinfo, name, routine);
return 1;
}
if (info->magic != MGSL_MAGIC) {
printk(badmagic, name, routine);
return 1;
}
#else
if (!info)
return 1;
#endif
return 0;
}
/**
* line discipline callback wrappers
*
* The wrappers maintain line discipline references
* while calling into the line discipline.
*
* ldisc_receive_buf - pass receive data to line discipline
*/
static void ldisc_receive_buf(struct tty_struct *tty,
const __u8 *data, char *flags, int count)
{
struct tty_ldisc *ld;
if (!tty)
return;
ld = tty_ldisc_ref(tty);
if (ld) {
if (ld->ops->receive_buf)
ld->ops->receive_buf(tty, data, flags, count);
tty_ldisc_deref(ld);
}
}
/* mgsl_stop() throttle (stop) transmitter
*
* Arguments: tty pointer to tty info structure
* Return Value: None
*/
static void mgsl_stop(struct tty_struct *tty)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if (mgsl_paranoia_check(info, tty->name, "mgsl_stop"))
return;
if ( debug_level >= DEBUG_LEVEL_INFO )
printk("mgsl_stop(%s)\n",info->device_name);
spin_lock_irqsave(&info->irq_spinlock,flags);
if (info->tx_enabled)
usc_stop_transmitter(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
} /* end of mgsl_stop() */
/* mgsl_start() release (start) transmitter
*
* Arguments: tty pointer to tty info structure
* Return Value: None
*/
static void mgsl_start(struct tty_struct *tty)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if (mgsl_paranoia_check(info, tty->name, "mgsl_start"))
return;
if ( debug_level >= DEBUG_LEVEL_INFO )
printk("mgsl_start(%s)\n",info->device_name);
spin_lock_irqsave(&info->irq_spinlock,flags);
if (!info->tx_enabled)
usc_start_transmitter(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
} /* end of mgsl_start() */
/*
* Bottom half work queue access functions
*/
/* mgsl_bh_action() Return next bottom half action to perform.
* Return Value: BH action code or 0 if nothing to do.
*/
static int mgsl_bh_action(struct mgsl_struct *info)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&info->irq_spinlock,flags);
if (info->pending_bh & BH_RECEIVE) {
info->pending_bh &= ~BH_RECEIVE;
rc = BH_RECEIVE;
} else if (info->pending_bh & BH_TRANSMIT) {
info->pending_bh &= ~BH_TRANSMIT;
rc = BH_TRANSMIT;
} else if (info->pending_bh & BH_STATUS) {
info->pending_bh &= ~BH_STATUS;
rc = BH_STATUS;
}
if (!rc) {
/* Mark BH routine as complete */
info->bh_running = false;
info->bh_requested = false;
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return rc;
}
/*
* Perform bottom half processing of work items queued by ISR.
*/
static void mgsl_bh_handler(struct work_struct *work)
{
struct mgsl_struct *info =
container_of(work, struct mgsl_struct, task);
int action;
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
__FILE__,__LINE__,info->device_name);
info->bh_running = true;
while((action = mgsl_bh_action(info)) != 0) {
/* Process work item */
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):mgsl_bh_handler() work item action=%d\n",
__FILE__,__LINE__,action);
switch (action) {
case BH_RECEIVE:
mgsl_bh_receive(info);
break;
case BH_TRANSMIT:
mgsl_bh_transmit(info);
break;
case BH_STATUS:
mgsl_bh_status(info);
break;
default:
/* unknown work item ID */
printk("Unknown work item ID=%08X!\n", action);
break;
}
}
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):mgsl_bh_handler(%s) exit\n",
__FILE__,__LINE__,info->device_name);
}
static void mgsl_bh_receive(struct mgsl_struct *info)
{
bool (*get_rx_frame)(struct mgsl_struct *info) =
(info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):mgsl_bh_receive(%s)\n",
__FILE__,__LINE__,info->device_name);
do
{
if (info->rx_rcc_underrun) {
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_start_receiver(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return;
}
} while(get_rx_frame(info));
}
static void mgsl_bh_transmit(struct mgsl_struct *info)
{
struct tty_struct *tty = info->port.tty;
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):mgsl_bh_transmit() entry on %s\n",
__FILE__,__LINE__,info->device_name);
if (tty)
tty_wakeup(tty);
/* if transmitter idle and loopmode_send_done_requested
* then start echoing RxD to TxD
*/
spin_lock_irqsave(&info->irq_spinlock,flags);
if ( !info->tx_active && info->loopmode_send_done_requested )
usc_loopmode_send_done( info );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
static void mgsl_bh_status(struct mgsl_struct *info)
{
if ( debug_level >= DEBUG_LEVEL_BH )
printk( "%s(%d):mgsl_bh_status() entry on %s\n",
__FILE__,__LINE__,info->device_name);
info->ri_chkcount = 0;
info->dsr_chkcount = 0;
info->dcd_chkcount = 0;
info->cts_chkcount = 0;
}
/* mgsl_isr_receive_status()
*
* Service a receive status interrupt. The type of status
* interrupt is indicated by the state of the RCSR.
* This is only used for HDLC mode.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_isr_receive_status( struct mgsl_struct *info )
{
u16 status = usc_InReg( info, RCSR );
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
__FILE__,__LINE__,status);
if ( (status & RXSTATUS_ABORT_RECEIVED) &&
info->loopmode_insert_requested &&
usc_loopmode_active(info) )
{
++info->icount.rxabort;
info->loopmode_insert_requested = false;
/* clear CMR:13 to start echoing RxD to TxD */
info->cmr_value &= ~BIT13;
usc_OutReg(info, CMR, info->cmr_value);
/* disable received abort irq (no longer required) */
usc_OutReg(info, RICR,
(usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
}
if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) {
if (status & RXSTATUS_EXITED_HUNT)
info->icount.exithunt++;
if (status & RXSTATUS_IDLE_RECEIVED)
info->icount.rxidle++;
wake_up_interruptible(&info->event_wait_q);
}
if (status & RXSTATUS_OVERRUN){
info->icount.rxover++;
usc_process_rxoverrun_sync( info );
}
usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
usc_UnlatchRxstatusBits( info, status );
} /* end of mgsl_isr_receive_status() */
/* mgsl_isr_transmit_status()
*
* Service a transmit status interrupt
* HDLC mode :end of transmit frame
* Async mode:all data is sent
* transmit status is indicated by bits in the TCSR.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_isr_transmit_status( struct mgsl_struct *info )
{
u16 status = usc_InReg( info, TCSR );
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):mgsl_isr_transmit_status status=%04X\n",
__FILE__,__LINE__,status);
usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
usc_UnlatchTxstatusBits( info, status );
if ( status & (TXSTATUS_UNDERRUN | TXSTATUS_ABORT_SENT) )
{
/* finished sending HDLC abort. This may leave */
/* the TxFifo with data from the aborted frame */
/* so purge the TxFifo. Also shutdown the DMA */
/* channel in case there is data remaining in */
/* the DMA buffer */
usc_DmaCmd( info, DmaCmd_ResetTxChannel );
usc_RTCmd( info, RTCmd_PurgeTxFifo );
}
if ( status & TXSTATUS_EOF_SENT )
info->icount.txok++;
else if ( status & TXSTATUS_UNDERRUN )
info->icount.txunder++;
else if ( status & TXSTATUS_ABORT_SENT )
info->icount.txabort++;
else
info->icount.txunder++;
info->tx_active = false;
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
del_timer(&info->tx_timer);
if ( info->drop_rts_on_tx_done ) {
usc_get_serial_signals( info );
if ( info->serial_signals & SerialSignal_RTS ) {
info->serial_signals &= ~SerialSignal_RTS;
usc_set_serial_signals( info );
}
info->drop_rts_on_tx_done = false;
}
#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
#endif
{
if (info->port.tty->stopped || info->port.tty->hw_stopped) {
usc_stop_transmitter(info);
return;
}
info->pending_bh |= BH_TRANSMIT;
}
} /* end of mgsl_isr_transmit_status() */
/* mgsl_isr_io_pin()
*
* Service an Input/Output pin interrupt. The type of
* interrupt is indicated by bits in the MISR
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_isr_io_pin( struct mgsl_struct *info )
{
struct mgsl_icount *icount;
u16 status = usc_InReg( info, MISR );
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):mgsl_isr_io_pin status=%04X\n",
__FILE__,__LINE__,status);
usc_ClearIrqPendingBits( info, IO_PIN );
usc_UnlatchIostatusBits( info, status );
if (status & (MISCSTATUS_CTS_LATCHED | MISCSTATUS_DCD_LATCHED |
MISCSTATUS_DSR_LATCHED | MISCSTATUS_RI_LATCHED) ) {
icount = &info->icount;
/* update input line counters */
if (status & MISCSTATUS_RI_LATCHED) {
if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
usc_DisablestatusIrqs(info,SICR_RI);
icount->rng++;
if ( status & MISCSTATUS_RI )
info->input_signal_events.ri_up++;
else
info->input_signal_events.ri_down++;
}
if (status & MISCSTATUS_DSR_LATCHED) {
if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
usc_DisablestatusIrqs(info,SICR_DSR);
icount->dsr++;
if ( status & MISCSTATUS_DSR )
info->input_signal_events.dsr_up++;
else
info->input_signal_events.dsr_down++;
}
if (status & MISCSTATUS_DCD_LATCHED) {
if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
usc_DisablestatusIrqs(info,SICR_DCD);
icount->dcd++;
if (status & MISCSTATUS_DCD) {
info->input_signal_events.dcd_up++;
} else
info->input_signal_events.dcd_down++;
#if SYNCLINK_GENERIC_HDLC
if (info->netcount) {
if (status & MISCSTATUS_DCD)
netif_carrier_on(info->netdev);
else
netif_carrier_off(info->netdev);
}
#endif
}
if (status & MISCSTATUS_CTS_LATCHED)
{
if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT)
usc_DisablestatusIrqs(info,SICR_CTS);
icount->cts++;
if ( status & MISCSTATUS_CTS )
info->input_signal_events.cts_up++;
else
info->input_signal_events.cts_down++;
}
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
if ( (info->port.flags & ASYNC_CHECK_CD) &&
(status & MISCSTATUS_DCD_LATCHED) ) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s CD now %s...", info->device_name,
(status & MISCSTATUS_DCD) ? "on" : "off");
if (status & MISCSTATUS_DCD)
wake_up_interruptible(&info->port.open_wait);
else {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("doing serial hangup...");
if (info->port.tty)
tty_hangup(info->port.tty);
}
}
if (tty_port_cts_enabled(&info->port) &&
(status & MISCSTATUS_CTS_LATCHED) ) {
if (info->port.tty->hw_stopped) {
if (status & MISCSTATUS_CTS) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("CTS tx start...");
if (info->port.tty)
info->port.tty->hw_stopped = 0;
usc_start_transmitter(info);
info->pending_bh |= BH_TRANSMIT;
return;
}
} else {
if (!(status & MISCSTATUS_CTS)) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("CTS tx stop...");
if (info->port.tty)
info->port.tty->hw_stopped = 1;
usc_stop_transmitter(info);
}
}
}
}
info->pending_bh |= BH_STATUS;
/* for diagnostics set IRQ flag */
if ( status & MISCSTATUS_TXC_LATCHED ){
usc_OutReg( info, SICR,
(unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
info->irq_occurred = true;
}
} /* end of mgsl_isr_io_pin() */
/* mgsl_isr_transmit_data()
*
* Service a transmit data interrupt (async mode only).
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_isr_transmit_data( struct mgsl_struct *info )
{
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):mgsl_isr_transmit_data xmit_cnt=%d\n",
__FILE__,__LINE__,info->xmit_cnt);
usc_ClearIrqPendingBits( info, TRANSMIT_DATA );
if (info->port.tty->stopped || info->port.tty->hw_stopped) {
usc_stop_transmitter(info);
return;
}
if ( info->xmit_cnt )
usc_load_txfifo( info );
else
info->tx_active = false;
if (info->xmit_cnt < WAKEUP_CHARS)
info->pending_bh |= BH_TRANSMIT;
} /* end of mgsl_isr_transmit_data() */
/* mgsl_isr_receive_data()
*
* Service a receive data interrupt. This occurs
* when operating in asynchronous interrupt transfer mode.
* The receive data FIFO is flushed to the receive data buffers.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_isr_receive_data( struct mgsl_struct *info )
{
int Fifocount;
u16 status;
int work = 0;
unsigned char DataByte;
struct mgsl_icount *icount = &info->icount;
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):mgsl_isr_receive_data\n",
__FILE__,__LINE__);
usc_ClearIrqPendingBits( info, RECEIVE_DATA );
/* select FIFO status for RICR readback */
usc_RCmd( info, RCmd_SelectRicrRxFifostatus );
/* clear the Wordstatus bit so that status readback */
/* only reflects the status of this byte */
usc_OutReg( info, RICR+LSBONLY, (u16)(usc_InReg(info, RICR+LSBONLY) & ~BIT3 ));
/* flush the receive FIFO */
while( (Fifocount = (usc_InReg(info,RICR) >> 8)) ) {
int flag;
/* read one byte from RxFIFO */
outw( (inw(info->io_base + CCAR) & 0x0780) | (RDR+LSBONLY),
info->io_base + CCAR );
DataByte = inb( info->io_base + CCAR );
/* get the status of the received byte */
status = usc_InReg(info, RCSR);
if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) )
usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
icount->rx++;
flag = 0;
if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) {
printk("rxerr=%04X\n",status);
/* update error statistics */
if ( status & RXSTATUS_BREAK_RECEIVED ) {
status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR);
icount->brk++;
} else if (status & RXSTATUS_PARITY_ERROR)
icount->parity++;
else if (status & RXSTATUS_FRAMING_ERROR)
icount->frame++;
else if (status & RXSTATUS_OVERRUN) {
/* must issue purge fifo cmd before */
/* 16C32 accepts more receive chars */
usc_RTCmd(info,RTCmd_PurgeRxFifo);
icount->overrun++;
}
/* discard char if tty control flags say so */
if (status & info->ignore_status_mask)
continue;
status &= info->read_status_mask;
if (status & RXSTATUS_BREAK_RECEIVED) {
flag = TTY_BREAK;
if (info->port.flags & ASYNC_SAK)
do_SAK(info->port.tty);
} else if (status & RXSTATUS_PARITY_ERROR)
flag = TTY_PARITY;
else if (status & RXSTATUS_FRAMING_ERROR)
flag = TTY_FRAME;
} /* end of if (error) */
tty_insert_flip_char(&info->port, DataByte, flag);
if (status & RXSTATUS_OVERRUN) {
/* Overrun is special, since it's
* reported immediately, and doesn't
* affect the current character
*/
work += tty_insert_flip_char(&info->port, 0, TTY_OVERRUN);
}
}
if ( debug_level >= DEBUG_LEVEL_ISR ) {
printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n",
__FILE__,__LINE__,icount->rx,icount->brk,
icount->parity,icount->frame,icount->overrun);
}
if(work)
tty_flip_buffer_push(&info->port);
}
/* mgsl_isr_misc()
*
* Service a miscellaneous interrupt source.
*
* Arguments: info pointer to device extension (instance data)
* Return Value: None
*/
static void mgsl_isr_misc( struct mgsl_struct *info )
{
u16 status = usc_InReg( info, MISR );
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):mgsl_isr_misc status=%04X\n",
__FILE__,__LINE__,status);
if ((status & MISCSTATUS_RCC_UNDERRUN) &&
(info->params.mode == MGSL_MODE_HDLC)) {
/* turn off receiver and rx DMA */
usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
usc_DmaCmd(info, DmaCmd_ResetRxChannel);
usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS);
/* schedule BH handler to restart receiver */
info->pending_bh |= BH_RECEIVE;
info->rx_rcc_underrun = true;
}
usc_ClearIrqPendingBits( info, MISC );
usc_UnlatchMiscstatusBits( info, status );
} /* end of mgsl_isr_misc() */
/* mgsl_isr_null()
*
* Services undefined interrupt vectors from the
* USC. (hence this function SHOULD never be called)
*
* Arguments: info pointer to device extension (instance data)
* Return Value: None
*/
static void mgsl_isr_null( struct mgsl_struct *info )
{
} /* end of mgsl_isr_null() */
/* mgsl_isr_receive_dma()
*
* Service a receive DMA channel interrupt.
* For this driver there are two sources of receive DMA interrupts
* as identified in the Receive DMA mode Register (RDMR):
*
* BIT3 EOA/EOL End of List, all receive buffers in receive
* buffer list have been filled (no more free buffers
* available). The DMA controller has shut down.
*
* BIT2 EOB End of Buffer. This interrupt occurs when a receive
* DMA buffer is terminated in response to completion
* of a good frame or a frame with errors. The status
* of the frame is stored in the buffer entry in the
* list of receive buffer entries.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_isr_receive_dma( struct mgsl_struct *info )
{
u16 status;
/* clear interrupt pending and IUS bit for Rx DMA IRQ */
usc_OutDmaReg( info, CDIR, BIT9 | BIT1 );
/* Read the receive DMA status to identify interrupt type. */
/* This also clears the status bits. */
status = usc_InDmaReg( info, RDMR );
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):mgsl_isr_receive_dma(%s) status=%04X\n",
__FILE__,__LINE__,info->device_name,status);
info->pending_bh |= BH_RECEIVE;
if ( status & BIT3 ) {
info->rx_overflow = true;
info->icount.buf_overrun++;
}
} /* end of mgsl_isr_receive_dma() */
/* mgsl_isr_transmit_dma()
*
* This function services a transmit DMA channel interrupt.
*
* For this driver there is one source of transmit DMA interrupts
* as identified in the Transmit DMA Mode Register (TDMR):
*
* BIT2 EOB End of Buffer. This interrupt occurs when a
* transmit DMA buffer has been emptied.
*
* The driver maintains enough transmit DMA buffers to hold at least
* one max frame size transmit frame. When operating in a buffered
* transmit mode, there may be enough transmit DMA buffers to hold at
* least two or more max frame size frames. On an EOB condition,
* determine if there are any queued transmit buffers and copy into
* transmit DMA buffers if we have room.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
{
u16 status;
/* clear interrupt pending and IUS bit for Tx DMA IRQ */
usc_OutDmaReg(info, CDIR, BIT8 | BIT0 );
/* Read the transmit DMA status to identify interrupt type. */
/* This also clears the status bits. */
status = usc_InDmaReg( info, TDMR );
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):mgsl_isr_transmit_dma(%s) status=%04X\n",
__FILE__,__LINE__,info->device_name,status);
if ( status & BIT2 ) {
--info->tx_dma_buffers_used;
/* if there are transmit frames queued,
* try to load the next one
*/
if ( load_next_tx_holding_buffer(info) ) {
/* if call returns non-zero value, we have
* at least one free tx holding buffer
*/
info->pending_bh |= BH_TRANSMIT;
}
}
} /* end of mgsl_isr_transmit_dma() */
/* mgsl_interrupt()
*
* Interrupt service routine entry point.
*
* Arguments:
*
* irq interrupt number that caused interrupt
* dev_id device ID supplied during interrupt registration
*
* Return Value: None
*/
static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
{
struct mgsl_struct *info = dev_id;
u16 UscVector;
u16 DmaVector;
if ( debug_level >= DEBUG_LEVEL_ISR )
printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)entry.\n",
__FILE__, __LINE__, info->irq_level);
spin_lock(&info->irq_spinlock);
for(;;) {
/* Read the interrupt vectors from hardware. */
UscVector = usc_InReg(info, IVR) >> 9;
DmaVector = usc_InDmaReg(info, DIVR);
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):%s UscVector=%08X DmaVector=%08X\n",
__FILE__,__LINE__,info->device_name,UscVector,DmaVector);
if ( !UscVector && !DmaVector )
break;
/* Dispatch interrupt vector */
if ( UscVector )
(*UscIsrTable[UscVector])(info);
else if ( (DmaVector&(BIT10|BIT9)) == BIT10)
mgsl_isr_transmit_dma(info);
else
mgsl_isr_receive_dma(info);
if ( info->isr_overflow ) {
printk(KERN_ERR "%s(%d):%s isr overflow irq=%d\n",
__FILE__, __LINE__, info->device_name, info->irq_level);
usc_DisableMasterIrqBit(info);
usc_DisableDmaInterrupts(info,DICR_MASTER);
break;
}
}
/* Request bottom half processing if there's something
* for it to do and the bh is not already running
*/
if ( info->pending_bh && !info->bh_running && !info->bh_requested ) {
if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):%s queueing bh task.\n",
__FILE__,__LINE__,info->device_name);
schedule_work(&info->task);
info->bh_requested = true;
}
spin_unlock(&info->irq_spinlock);
if ( debug_level >= DEBUG_LEVEL_ISR )
printk(KERN_DEBUG "%s(%d):mgsl_interrupt(%d)exit.\n",
__FILE__, __LINE__, info->irq_level);
return IRQ_HANDLED;
} /* end of mgsl_interrupt() */
/* startup()
*
* Initialize and start device.
*
* Arguments: info pointer to device instance data
* Return Value: 0 if success, otherwise error code
*/
static int startup(struct mgsl_struct * info)
{
int retval = 0;
if ( debug_level >= DEBUG_LEVEL_INFO )
printk("%s(%d):mgsl_startup(%s)\n",__FILE__,__LINE__,info->device_name);
if (info->port.flags & ASYNC_INITIALIZED)
return 0;
if (!info->xmit_buf) {
/* allocate a page of memory for a transmit buffer */
info->xmit_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
if (!info->xmit_buf) {
printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
__FILE__,__LINE__,info->device_name);
return -ENOMEM;
}
}
info->pending_bh = 0;
memset(&info->icount, 0, sizeof(info->icount));
setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
/* Allocate and claim adapter resources */
retval = mgsl_claim_resources(info);
/* perform existence check and diagnostics */
if ( !retval )
retval = mgsl_adapter_test(info);
if ( retval ) {
if (capable(CAP_SYS_ADMIN) && info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
mgsl_release_resources(info);
return retval;
}
/* program hardware for current parameters */
mgsl_change_params(info);
if (info->port.tty)
clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
info->port.flags |= ASYNC_INITIALIZED;
return 0;
} /* end of startup() */
/* shutdown()
*
* Called by mgsl_close() and mgsl_hangup() to shutdown hardware
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void shutdown(struct mgsl_struct * info)
{
unsigned long flags;
if (!(info->port.flags & ASYNC_INITIALIZED))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_shutdown(%s)\n",
__FILE__,__LINE__, info->device_name );
/* clear status wait queue because status changes */
/* can't happen after shutting down the hardware */
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
del_timer_sync(&info->tx_timer);
if (info->xmit_buf) {
free_page((unsigned long) info->xmit_buf);
info->xmit_buf = NULL;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_DisableMasterIrqBit(info);
usc_stop_receiver(info);
usc_stop_transmitter(info);
usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS |
TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC );
usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
/* Disable DMAEN (Port 7, Bit 14) */
/* This disconnects the DMA request signal from the ISA bus */
/* on the ISA adapter. This has no effect for the PCI adapter */
usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) | BIT14));
/* Disable INTEN (Port 6, Bit12) */
/* This disconnects the IRQ request signal to the ISA bus */
/* on the ISA adapter. This has no effect for the PCI adapter */
usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) | BIT12));
if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
usc_set_serial_signals(info);
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
mgsl_release_resources(info);
if (info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
info->port.flags &= ~ASYNC_INITIALIZED;
} /* end of shutdown() */
static void mgsl_program_hw(struct mgsl_struct *info)
{
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_stop_receiver(info);
usc_stop_transmitter(info);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
if (info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW ||
info->netcount)
usc_set_sync_mode(info);
else
usc_set_async_mode(info);
usc_set_serial_signals(info);
info->dcd_chkcount = 0;
info->cts_chkcount = 0;
info->ri_chkcount = 0;
info->dsr_chkcount = 0;
usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
usc_EnableInterrupts(info, IO_PIN);
usc_get_serial_signals(info);
if (info->netcount || info->port.tty->termios.c_cflag & CREAD)
usc_start_receiver(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
/* Reconfigure adapter based on new parameters
*/
static void mgsl_change_params(struct mgsl_struct *info)
{
unsigned cflag;
int bits_per_char;
if (!info->port.tty)
return;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_change_params(%s)\n",
__FILE__,__LINE__, info->device_name );
cflag = info->port.tty->termios.c_cflag;
/* if B0 rate (hangup) specified then negate RTS and DTR */
/* otherwise assert RTS and DTR */
if (cflag & CBAUD)
info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
/* byte size and parity */
switch (cflag & CSIZE) {
case CS5: info->params.data_bits = 5; break;
case CS6: info->params.data_bits = 6; break;
case CS7: info->params.data_bits = 7; break;
case CS8: info->params.data_bits = 8; break;
/* Never happens, but GCC is too dumb to figure it out */
default: info->params.data_bits = 7; break;
}
if (cflag & CSTOPB)
info->params.stop_bits = 2;
else
info->params.stop_bits = 1;
info->params.parity = ASYNC_PARITY_NONE;
if (cflag & PARENB) {
if (cflag & PARODD)
info->params.parity = ASYNC_PARITY_ODD;
else
info->params.parity = ASYNC_PARITY_EVEN;
#ifdef CMSPAR
if (cflag & CMSPAR)
info->params.parity = ASYNC_PARITY_SPACE;
#endif
}
/* calculate number of jiffies to transmit a full
* FIFO (32 bytes) at specified data rate
*/
bits_per_char = info->params.data_bits +
info->params.stop_bits + 1;
/* if port data rate is set to 460800 or less then
* allow tty settings to override, otherwise keep the
* current data rate.
*/
if (info->params.data_rate <= 460800)
info->params.data_rate = tty_get_baud_rate(info->port.tty);
if ( info->params.data_rate ) {
info->timeout = (32*HZ*bits_per_char) /
info->params.data_rate;
}
info->timeout += HZ/50; /* Add .02 seconds of slop */
if (cflag & CRTSCTS)
info->port.flags |= ASYNC_CTS_FLOW;
else
info->port.flags &= ~ASYNC_CTS_FLOW;
if (cflag & CLOCAL)
info->port.flags &= ~ASYNC_CHECK_CD;
else
info->port.flags |= ASYNC_CHECK_CD;
/* process tty input control flags */
info->read_status_mask = RXSTATUS_OVERRUN;
if (I_INPCK(info->port.tty))
info->read_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
if (I_BRKINT(info->port.tty) || I_PARMRK(info->port.tty))
info->read_status_mask |= RXSTATUS_BREAK_RECEIVED;
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask |= RXSTATUS_PARITY_ERROR | RXSTATUS_FRAMING_ERROR;
if (I_IGNBRK(info->port.tty)) {
info->ignore_status_mask |= RXSTATUS_BREAK_RECEIVED;
/* If ignoring parity and break indicators, ignore
* overruns too. (For real raw support).
*/
if (I_IGNPAR(info->port.tty))
info->ignore_status_mask |= RXSTATUS_OVERRUN;
}
mgsl_program_hw(info);
} /* end of mgsl_change_params() */
/* mgsl_put_char()
*
* Add a character to the transmit buffer.
*
* Arguments: tty pointer to tty information structure
* ch character to add to transmit buffer
*
* Return Value: None
*/
static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
int ret = 0;
if (debug_level >= DEBUG_LEVEL_INFO) {
printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
__FILE__, __LINE__, ch, info->device_name);
}
if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
return 0;
if (!info->xmit_buf)
return 0;
spin_lock_irqsave(&info->irq_spinlock, flags);
if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
info->xmit_buf[info->xmit_head++] = ch;
info->xmit_head &= SERIAL_XMIT_SIZE-1;
info->xmit_cnt++;
ret = 1;
}
}
spin_unlock_irqrestore(&info->irq_spinlock, flags);
return ret;
} /* end of mgsl_put_char() */
/* mgsl_flush_chars()
*
* Enable transmitter so remaining characters in the
* transmit buffer are sent.
*
* Arguments: tty pointer to tty information structure
* Return Value: None
*/
static void mgsl_flush_chars(struct tty_struct *tty)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_flush_chars() entry on %s xmit_cnt=%d\n",
__FILE__,__LINE__,info->device_name,info->xmit_cnt);
if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_chars"))
return;
if (info->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
!info->xmit_buf)
return;
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_flush_chars() entry on %s starting transmitter\n",
__FILE__,__LINE__,info->device_name );
spin_lock_irqsave(&info->irq_spinlock,flags);
if (!info->tx_active) {
if ( (info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW) && info->xmit_cnt ) {
/* operating in synchronous (frame oriented) mode */
/* copy data from circular xmit_buf to */
/* transmit DMA buffer. */
mgsl_load_tx_dma_buffer(info,
info->xmit_buf,info->xmit_cnt);
}
usc_start_transmitter(info);
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
} /* end of mgsl_flush_chars() */
/* mgsl_write()
*
* Send a block of data
*
* Arguments:
*
* tty pointer to tty information structure
* buf pointer to buffer containing send data
* count size of send data in bytes
*
* Return Value: number of characters written
*/
static int mgsl_write(struct tty_struct * tty,
const unsigned char *buf, int count)
{
int c, ret = 0;
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_write(%s) count=%d\n",
__FILE__,__LINE__,info->device_name,count);
if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
goto cleanup;
if (!info->xmit_buf)
goto cleanup;
if ( info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW ) {
/* operating in synchronous (frame oriented) mode */
if (info->tx_active) {
if ( info->params.mode == MGSL_MODE_HDLC ) {
ret = 0;
goto cleanup;
}
/* transmitter is actively sending data -
* if we have multiple transmit dma and
* holding buffers, attempt to queue this
* frame for transmission at a later time.
*/
if (info->tx_holding_count >= info->num_tx_holding_buffers ) {
/* no tx holding buffers available */
ret = 0;
goto cleanup;
}
/* queue transmit frame request */
ret = count;
save_tx_buffer_request(info,buf,count);
/* if we have sufficient tx dma buffers,
* load the next buffered tx request
*/
spin_lock_irqsave(&info->irq_spinlock,flags);
load_next_tx_holding_buffer(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
goto cleanup;
}
/* if operating in HDLC LoopMode and the adapter */
/* has yet to be inserted into the loop, we can't */
/* transmit */
if ( (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) &&
!usc_loopmode_active(info) )
{
ret = 0;
goto cleanup;
}
if ( info->xmit_cnt ) {
/* Send accumulated from send_char() calls */
/* as frame and wait before accepting more data. */
ret = 0;
/* copy data from circular xmit_buf to */
/* transmit DMA buffer. */
mgsl_load_tx_dma_buffer(info,
info->xmit_buf,info->xmit_cnt);
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_write(%s) sync xmit_cnt flushing\n",
__FILE__,__LINE__,info->device_name);
} else {
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_write(%s) sync transmit accepted\n",
__FILE__,__LINE__,info->device_name);
ret = count;
info->xmit_cnt = count;
mgsl_load_tx_dma_buffer(info,buf,count);
}
} else {
while (1) {
spin_lock_irqsave(&info->irq_spinlock,flags);
c = min_t(int, count,
min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
SERIAL_XMIT_SIZE - info->xmit_head));
if (c <= 0) {
spin_unlock_irqrestore(&info->irq_spinlock,flags);
break;
}
memcpy(info->xmit_buf + info->xmit_head, buf, c);
info->xmit_head = ((info->xmit_head + c) &
(SERIAL_XMIT_SIZE-1));
info->xmit_cnt += c;
spin_unlock_irqrestore(&info->irq_spinlock,flags);
buf += c;
count -= c;
ret += c;
}
}
if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
spin_lock_irqsave(&info->irq_spinlock,flags);
if (!info->tx_active)
usc_start_transmitter(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
cleanup:
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_write(%s) returning=%d\n",
__FILE__,__LINE__,info->device_name,ret);
return ret;
} /* end of mgsl_write() */
/* mgsl_write_room()
*
* Return the count of free bytes in transmit buffer
*
* Arguments: tty pointer to tty info structure
* Return Value: None
*/
static int mgsl_write_room(struct tty_struct *tty)
{
struct mgsl_struct *info = tty->driver_data;
int ret;
if (mgsl_paranoia_check(info, tty->name, "mgsl_write_room"))
return 0;
ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
if (ret < 0)
ret = 0;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_write_room(%s)=%d\n",
__FILE__,__LINE__, info->device_name,ret );
if ( info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW ) {
/* operating in synchronous (frame oriented) mode */
if ( info->tx_active )
return 0;
else
return HDLC_MAX_FRAME_SIZE;
}
return ret;
} /* end of mgsl_write_room() */
/* mgsl_chars_in_buffer()
*
* Return the count of bytes in transmit buffer
*
* Arguments: tty pointer to tty info structure
* Return Value: None
*/
static int mgsl_chars_in_buffer(struct tty_struct *tty)
{
struct mgsl_struct *info = tty->driver_data;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_chars_in_buffer(%s)\n",
__FILE__,__LINE__, info->device_name );
if (mgsl_paranoia_check(info, tty->name, "mgsl_chars_in_buffer"))
return 0;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_chars_in_buffer(%s)=%d\n",
__FILE__,__LINE__, info->device_name,info->xmit_cnt );
if ( info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW ) {
/* operating in synchronous (frame oriented) mode */
if ( info->tx_active )
return info->max_frame_size;
else
return 0;
}
return info->xmit_cnt;
} /* end of mgsl_chars_in_buffer() */
/* mgsl_flush_buffer()
*
* Discard all data in the send buffer
*
* Arguments: tty pointer to tty info structure
* Return Value: None
*/
static void mgsl_flush_buffer(struct tty_struct *tty)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_flush_buffer(%s) entry\n",
__FILE__,__LINE__, info->device_name );
if (mgsl_paranoia_check(info, tty->name, "mgsl_flush_buffer"))
return;
spin_lock_irqsave(&info->irq_spinlock,flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
del_timer(&info->tx_timer);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
tty_wakeup(tty);
}
/* mgsl_send_xchar()
*
* Send a high-priority XON/XOFF character
*
* Arguments: tty pointer to tty info structure
* ch character to send
* Return Value: None
*/
static void mgsl_send_xchar(struct tty_struct *tty, char ch)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_send_xchar(%s,%d)\n",
__FILE__,__LINE__, info->device_name, ch );
if (mgsl_paranoia_check(info, tty->name, "mgsl_send_xchar"))
return;
info->x_char = ch;
if (ch) {
/* Make sure transmit interrupts are on */
spin_lock_irqsave(&info->irq_spinlock,flags);
if (!info->tx_enabled)
usc_start_transmitter(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
} /* end of mgsl_send_xchar() */
/* mgsl_throttle()
*
* Signal remote device to throttle send data (our receive data)
*
* Arguments: tty pointer to tty info structure
* Return Value: None
*/
static void mgsl_throttle(struct tty_struct * tty)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_throttle(%s) entry\n",
__FILE__,__LINE__, info->device_name );
if (mgsl_paranoia_check(info, tty->name, "mgsl_throttle"))
return;
if (I_IXOFF(tty))
mgsl_send_xchar(tty, STOP_CHAR(tty));
if (tty->termios.c_cflag & CRTSCTS) {
spin_lock_irqsave(&info->irq_spinlock,flags);
info->serial_signals &= ~SerialSignal_RTS;
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
} /* end of mgsl_throttle() */
/* mgsl_unthrottle()
*
* Signal remote device to stop throttling send data (our receive data)
*
* Arguments: tty pointer to tty info structure
* Return Value: None
*/
static void mgsl_unthrottle(struct tty_struct * tty)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_unthrottle(%s) entry\n",
__FILE__,__LINE__, info->device_name );
if (mgsl_paranoia_check(info, tty->name, "mgsl_unthrottle"))
return;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else
mgsl_send_xchar(tty, START_CHAR(tty));
}
if (tty->termios.c_cflag & CRTSCTS) {
spin_lock_irqsave(&info->irq_spinlock,flags);
info->serial_signals |= SerialSignal_RTS;
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
} /* end of mgsl_unthrottle() */
/* mgsl_get_stats()
*
* get the current serial parameters information
*
* Arguments: info pointer to device instance data
* user_icount pointer to buffer to hold returned stats
*
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_get_stats(struct mgsl_struct * info, struct mgsl_icount __user *user_icount)
{
int err;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_get_params(%s)\n",
__FILE__,__LINE__, info->device_name);
if (!user_icount) {
memset(&info->icount, 0, sizeof(info->icount));
} else {
mutex_lock(&info->port.mutex);
COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount));
mutex_unlock(&info->port.mutex);
if (err)
return -EFAULT;
}
return 0;
} /* end of mgsl_get_stats() */
/* mgsl_get_params()
*
* get the current serial parameters information
*
* Arguments: info pointer to device instance data
* user_params pointer to buffer to hold returned params
*
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_get_params(struct mgsl_struct * info, MGSL_PARAMS __user *user_params)
{
int err;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_get_params(%s)\n",
__FILE__,__LINE__, info->device_name);
mutex_lock(&info->port.mutex);
COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS));
mutex_unlock(&info->port.mutex);
if (err) {
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_get_params(%s) user buffer copy failed\n",
__FILE__,__LINE__,info->device_name);
return -EFAULT;
}
return 0;
} /* end of mgsl_get_params() */
/* mgsl_set_params()
*
* set the serial parameters
*
* Arguments:
*
* info pointer to device instance data
* new_params user buffer containing new serial params
*
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_set_params(struct mgsl_struct * info, MGSL_PARAMS __user *new_params)
{
unsigned long flags;
MGSL_PARAMS tmp_params;
int err;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_set_params %s\n", __FILE__,__LINE__,
info->device_name );
COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
if (err) {
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_set_params(%s) user buffer copy failed\n",
__FILE__,__LINE__,info->device_name);
return -EFAULT;
}
mutex_lock(&info->port.mutex);
spin_lock_irqsave(&info->irq_spinlock,flags);
memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
spin_unlock_irqrestore(&info->irq_spinlock,flags);
mgsl_change_params(info);
mutex_unlock(&info->port.mutex);
return 0;
} /* end of mgsl_set_params() */
/* mgsl_get_txidle()
*
* get the current transmit idle mode
*
* Arguments: info pointer to device instance data
* idle_mode pointer to buffer to hold returned idle mode
*
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_get_txidle(struct mgsl_struct * info, int __user *idle_mode)
{
int err;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_get_txidle(%s)=%d\n",
__FILE__,__LINE__, info->device_name, info->idle_mode);
COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int));
if (err) {
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_get_txidle(%s) user buffer copy failed\n",
__FILE__,__LINE__,info->device_name);
return -EFAULT;
}
return 0;
} /* end of mgsl_get_txidle() */
/* mgsl_set_txidle() service ioctl to set transmit idle mode
*
* Arguments: info pointer to device instance data
* idle_mode new idle mode
*
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_set_txidle(struct mgsl_struct * info, int idle_mode)
{
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_set_txidle(%s,%d)\n", __FILE__,__LINE__,
info->device_name, idle_mode );
spin_lock_irqsave(&info->irq_spinlock,flags);
info->idle_mode = idle_mode;
usc_set_txidle( info );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return 0;
} /* end of mgsl_set_txidle() */
/* mgsl_txenable()
*
* enable or disable the transmitter
*
* Arguments:
*
* info pointer to device instance data
* enable 1 = enable, 0 = disable
*
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_txenable(struct mgsl_struct * info, int enable)
{
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_txenable(%s,%d)\n", __FILE__,__LINE__,
info->device_name, enable);
spin_lock_irqsave(&info->irq_spinlock,flags);
if ( enable ) {
if ( !info->tx_enabled ) {
usc_start_transmitter(info);
/*--------------------------------------------------
* if HDLC/SDLC Loop mode, attempt to insert the
* station in the 'loop' by setting CMR:13. Upon
* receipt of the next GoAhead (RxAbort) sequence,
* the OnLoop indicator (CCSR:7) should go active
* to indicate that we are on the loop
*--------------------------------------------------*/
if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
usc_loopmode_insert_request( info );
}
} else {
if ( info->tx_enabled )
usc_stop_transmitter(info);
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return 0;
} /* end of mgsl_txenable() */
/* mgsl_txabort() abort send HDLC frame
*
* Arguments: info pointer to device instance data
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_txabort(struct mgsl_struct * info)
{
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_txabort(%s)\n", __FILE__,__LINE__,
info->device_name);
spin_lock_irqsave(&info->irq_spinlock,flags);
if ( info->tx_active && info->params.mode == MGSL_MODE_HDLC )
{
if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
usc_loopmode_cancel_transmit( info );
else
usc_TCmd(info,TCmd_SendAbort);
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return 0;
} /* end of mgsl_txabort() */
/* mgsl_rxenable() enable or disable the receiver
*
* Arguments: info pointer to device instance data
* enable 1 = enable, 0 = disable
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_rxenable(struct mgsl_struct * info, int enable)
{
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_rxenable(%s,%d)\n", __FILE__,__LINE__,
info->device_name, enable);
spin_lock_irqsave(&info->irq_spinlock,flags);
if ( enable ) {
if ( !info->rx_enabled )
usc_start_receiver(info);
} else {
if ( info->rx_enabled )
usc_stop_receiver(info);
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return 0;
} /* end of mgsl_rxenable() */
/* mgsl_wait_event() wait for specified event to occur
*
* Arguments: info pointer to device instance data
* mask pointer to bitmask of events to wait for
* Return Value: 0 if successful and bit mask updated with
* of events triggerred,
* otherwise error code
*/
static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
{
unsigned long flags;
int s;
int rc=0;
struct mgsl_icount cprev, cnow;
int events;
int mask;
struct _input_signal_events oldsigs, newsigs;
DECLARE_WAITQUEUE(wait, current);
COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int));
if (rc) {
return -EFAULT;
}
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_wait_event(%s,%d)\n", __FILE__,__LINE__,
info->device_name, mask);
spin_lock_irqsave(&info->irq_spinlock,flags);
/* return immediately if state matches requested events */
usc_get_serial_signals(info);
s = info->serial_signals;
events = mask &
( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) );
if (events) {
spin_unlock_irqrestore(&info->irq_spinlock,flags);
goto exit;
}
/* save current irq counts */
cprev = info->icount;
oldsigs = info->input_signal_events;
/* enable hunt and idle irqs if needed */
if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
u16 oldreg = usc_InReg(info,RICR);
u16 newreg = oldreg +
(mask & MgslEvent_ExitHuntMode ? RXSTATUS_EXITED_HUNT:0) +
(mask & MgslEvent_IdleReceived ? RXSTATUS_IDLE_RECEIVED:0);
if (oldreg != newreg)
usc_OutReg(info, RICR, newreg);
}
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&info->event_wait_q, &wait);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
for(;;) {
schedule();
if (signal_pending(current)) {
rc = -ERESTARTSYS;
break;
}
/* get current irq counts */
spin_lock_irqsave(&info->irq_spinlock,flags);
cnow = info->icount;
newsigs = info->input_signal_events;
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
/* if no change, wait aborted for some reason */
if (newsigs.dsr_up == oldsigs.dsr_up &&
newsigs.dsr_down == oldsigs.dsr_down &&
newsigs.dcd_up == oldsigs.dcd_up &&
newsigs.dcd_down == oldsigs.dcd_down &&
newsigs.cts_up == oldsigs.cts_up &&
newsigs.cts_down == oldsigs.cts_down &&
newsigs.ri_up == oldsigs.ri_up &&
newsigs.ri_down == oldsigs.ri_down &&
cnow.exithunt == cprev.exithunt &&
cnow.rxidle == cprev.rxidle) {
rc = -EIO;
break;
}
events = mask &
( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) +
(newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) +
(newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) +
(newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) +
(newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) +
(newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) +
(newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) +
(newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) +
(cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) +
(cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) );
if (events)
break;
cprev = cnow;
oldsigs = newsigs;
}
remove_wait_queue(&info->event_wait_q, &wait);
set_current_state(TASK_RUNNING);
if (mask & (MgslEvent_ExitHuntMode + MgslEvent_IdleReceived)) {
spin_lock_irqsave(&info->irq_spinlock,flags);
if (!waitqueue_active(&info->event_wait_q)) {
/* disable enable exit hunt mode/idle rcvd IRQs */
usc_OutReg(info, RICR, usc_InReg(info,RICR) &
~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED));
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
exit:
if ( rc == 0 )
PUT_USER(rc, events, mask_ptr);
return rc;
} /* end of mgsl_wait_event() */
static int modem_input_wait(struct mgsl_struct *info,int arg)
{
unsigned long flags;
int rc;
struct mgsl_icount cprev, cnow;
DECLARE_WAITQUEUE(wait, current);
/* save current irq counts */
spin_lock_irqsave(&info->irq_spinlock,flags);
cprev = info->icount;
add_wait_queue(&info->status_event_wait_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
for(;;) {
schedule();
if (signal_pending(current)) {
rc = -ERESTARTSYS;
break;
}
/* get new irq counts */
spin_lock_irqsave(&info->irq_spinlock,flags);
cnow = info->icount;
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
/* if no change, wait aborted for some reason */
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
rc = -EIO;
break;
}
/* check for change in caller specified modem input */
if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) ||
(arg & TIOCM_DSR && cnow.dsr != cprev.dsr) ||
(arg & TIOCM_CD && cnow.dcd != cprev.dcd) ||
(arg & TIOCM_CTS && cnow.cts != cprev.cts)) {
rc = 0;
break;
}
cprev = cnow;
}
remove_wait_queue(&info->status_event_wait_q, &wait);
set_current_state(TASK_RUNNING);
return rc;
}
/* return the state of the serial control and status signals
*/
static int tiocmget(struct tty_struct *tty)
{
struct mgsl_struct *info = tty->driver_data;
unsigned int result;
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_get_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) +
((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) +
((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) +
((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0);
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmget() value=%08X\n",
__FILE__,__LINE__, info->device_name, result );
return result;
}
/* set modem control signals (DTR/RTS)
*/
static int tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):%s tiocmset(%x,%x)\n",
__FILE__,__LINE__,info->device_name, set, clear);
if (set & TIOCM_RTS)
info->serial_signals |= SerialSignal_RTS;
if (set & TIOCM_DTR)
info->serial_signals |= SerialSignal_DTR;
if (clear & TIOCM_RTS)
info->serial_signals &= ~SerialSignal_RTS;
if (clear & TIOCM_DTR)
info->serial_signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return 0;
}
/* mgsl_break() Set or clear transmit break condition
*
* Arguments: tty pointer to tty instance data
* break_state -1=set break condition, 0=clear
* Return Value: error code
*/
static int mgsl_break(struct tty_struct *tty, int break_state)
{
struct mgsl_struct * info = tty->driver_data;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_break(%s,%d)\n",
__FILE__,__LINE__, info->device_name, break_state);
if (mgsl_paranoia_check(info, tty->name, "mgsl_break"))
return -EINVAL;
spin_lock_irqsave(&info->irq_spinlock,flags);
if (break_state == -1)
usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) | BIT7));
else
usc_OutReg(info,IOCR,(u16)(usc_InReg(info,IOCR) & ~BIT7));
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return 0;
} /* end of mgsl_break() */
/*
* Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
* Return: write counters to the user passed counter struct
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
static int msgl_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct mgsl_struct * info = tty->driver_data;
struct mgsl_icount cnow; /* kernel counter temps */
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
cnow = info->icount;
spin_unlock_irqrestore(&info->irq_spinlock,flags);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
/* mgsl_ioctl() Service an IOCTL request
*
* Arguments:
*
* tty pointer to tty instance data
* cmd IOCTL command code
* arg command argument/context
*
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct mgsl_struct * info = tty->driver_data;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
info->device_name, cmd );
if (mgsl_paranoia_check(info, tty->name, "mgsl_ioctl"))
return -ENODEV;
if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
(cmd != TIOCMIWAIT)) {
if (tty->flags & (1 << TTY_IO_ERROR))
return -EIO;
}
return mgsl_ioctl_common(info, cmd, arg);
}
static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
switch (cmd) {
case MGSL_IOCGPARAMS:
return mgsl_get_params(info, argp);
case MGSL_IOCSPARAMS:
return mgsl_set_params(info, argp);
case MGSL_IOCGTXIDLE:
return mgsl_get_txidle(info, argp);
case MGSL_IOCSTXIDLE:
return mgsl_set_txidle(info,(int)arg);
case MGSL_IOCTXENABLE:
return mgsl_txenable(info,(int)arg);
case MGSL_IOCRXENABLE:
return mgsl_rxenable(info,(int)arg);
case MGSL_IOCTXABORT:
return mgsl_txabort(info);
case MGSL_IOCGSTATS:
return mgsl_get_stats(info, argp);
case MGSL_IOCWAITEVENT:
return mgsl_wait_event(info, argp);
case MGSL_IOCLOOPTXDONE:
return mgsl_loopmode_send_done(info);
/* Wait for modem input (DCD,RI,DSR,CTS) change
* as specified by mask in arg (TIOCM_RNG/DSR/CD/CTS)
*/
case TIOCMIWAIT:
return modem_input_wait(info,(int)arg);
default:
return -ENOIOCTLCMD;
}
return 0;
}
/* mgsl_set_termios()
*
* Set new termios settings
*
* Arguments:
*
* tty pointer to tty structure
* termios pointer to buffer to hold returned old termios
*
* Return Value: None
*/
static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_set_termios %s\n", __FILE__,__LINE__,
tty->driver->name );
mgsl_change_params(info);
/* Handle transition to B0 status */
if (old_termios->c_cflag & CBAUD &&
!(tty->termios.c_cflag & CBAUD)) {
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
/* Handle transition away from B0 status */
if (!(old_termios->c_cflag & CBAUD) &&
tty->termios.c_cflag & CBAUD) {
info->serial_signals |= SerialSignal_DTR;
if (!(tty->termios.c_cflag & CRTSCTS) ||
!test_bit(TTY_THROTTLED, &tty->flags)) {
info->serial_signals |= SerialSignal_RTS;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
/* Handle turning off CRTSCTS */
if (old_termios->c_cflag & CRTSCTS &&
!(tty->termios.c_cflag & CRTSCTS)) {
tty->hw_stopped = 0;
mgsl_start(tty);
}
} /* end of mgsl_set_termios() */
/* mgsl_close()
*
* Called when port is closed. Wait for remaining data to be
* sent. Disable port and free resources.
*
* Arguments:
*
* tty pointer to open tty structure
* filp pointer to open file object
*
* Return Value: None
*/
static void mgsl_close(struct tty_struct *tty, struct file * filp)
{
struct mgsl_struct * info = tty->driver_data;
if (mgsl_paranoia_check(info, tty->name, "mgsl_close"))
return;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
__FILE__,__LINE__, info->device_name, info->port.count);
if (tty_port_close_start(&info->port, tty, filp) == 0)
goto cleanup;
mutex_lock(&info->port.mutex);
if (info->port.flags & ASYNC_INITIALIZED)
mgsl_wait_until_sent(tty, info->timeout);
mgsl_flush_buffer(tty);
tty_ldisc_flush(tty);
shutdown(info);
mutex_unlock(&info->port.mutex);
tty_port_close_end(&info->port, tty);
info->port.tty = NULL;
cleanup:
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
tty->driver->name, info->port.count);
} /* end of mgsl_close() */
/* mgsl_wait_until_sent()
*
* Wait until the transmitter is empty.
*
* Arguments:
*
* tty pointer to tty info structure
* timeout time to wait for send completion
*
* Return Value: None
*/
static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct mgsl_struct * info = tty->driver_data;
unsigned long orig_jiffies, char_time;
if (!info )
return;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_wait_until_sent(%s) entry\n",
__FILE__,__LINE__, info->device_name );
if (mgsl_paranoia_check(info, tty->name, "mgsl_wait_until_sent"))
return;
if (!(info->port.flags & ASYNC_INITIALIZED))
goto exit;
orig_jiffies = jiffies;
/* Set check interval to 1/5 of estimated time to
* send a character, and make it at least 1. The check
* interval should also be less than the timeout.
* Note: use tight timings here to satisfy the NIST-PCTS.
*/
if ( info->params.data_rate ) {
char_time = info->timeout/(32 * 5);
if (!char_time)
char_time++;
} else
char_time = 1;
if (timeout)
char_time = min_t(unsigned long, char_time, timeout);
if ( info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW ) {
while (info->tx_active) {
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
}
} else {
while (!(usc_InReg(info,TCSR) & TXSTATUS_ALL_SENT) &&
info->tx_enabled) {
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (timeout && time_after(jiffies, orig_jiffies + timeout))
break;
}
}
exit:
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_wait_until_sent(%s) exit\n",
__FILE__,__LINE__, info->device_name );
} /* end of mgsl_wait_until_sent() */
/* mgsl_hangup()
*
* Called by tty_hangup() when a hangup is signaled.
* This is the same as to closing all open files for the port.
*
* Arguments: tty pointer to associated tty object
* Return Value: None
*/
static void mgsl_hangup(struct tty_struct *tty)
{
struct mgsl_struct * info = tty->driver_data;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_hangup(%s)\n",
__FILE__,__LINE__, info->device_name );
if (mgsl_paranoia_check(info, tty->name, "mgsl_hangup"))
return;
mgsl_flush_buffer(tty);
shutdown(info);
info->port.count = 0;
info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
info->port.tty = NULL;
wake_up_interruptible(&info->port.open_wait);
} /* end of mgsl_hangup() */
/*
* carrier_raised()
*
* Return true if carrier is raised
*/
static int carrier_raised(struct tty_port *port)
{
unsigned long flags;
struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
spin_lock_irqsave(&info->irq_spinlock, flags);
usc_get_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock, flags);
return (info->serial_signals & SerialSignal_DCD) ? 1 : 0;
}
static void dtr_rts(struct tty_port *port, int on)
{
struct mgsl_struct *info = container_of(port, struct mgsl_struct, port);
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
if (on)
info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
usc_set_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
/* block_til_ready()
*
* Block the current process until the specified port
* is ready to be opened.
*
* Arguments:
*
* tty pointer to tty info structure
* filp pointer to open file object
* info pointer to device instance data
*
* Return Value: 0 if success, otherwise error code
*/
static int block_til_ready(struct tty_struct *tty, struct file * filp,
struct mgsl_struct *info)
{
DECLARE_WAITQUEUE(wait, current);
int retval;
bool do_clocal = false;
unsigned long flags;
int dcd;
struct tty_port *port = &info->port;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):block_til_ready on %s\n",
__FILE__,__LINE__, tty->driver->name );
if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){
/* nonblock mode is set or port is not enabled */
port->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
}
if (tty->termios.c_cflag & CLOCAL)
do_clocal = true;
/* Wait for carrier detect and the line to become
* free (i.e., not in use by the callout). While we are in
* this loop, port->count is dropped by one, so that
* mgsl_close() knows when to free things. We restore it upon
* exit, either normal or abnormal.
*/
retval = 0;
add_wait_queue(&port->open_wait, &wait);
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):block_til_ready before block on %s count=%d\n",
__FILE__,__LINE__, tty->driver->name, port->count );
spin_lock_irqsave(&info->irq_spinlock, flags);
port->count--;
spin_unlock_irqrestore(&info->irq_spinlock, flags);
port->blocked_open++;
while (1) {
if (C_BAUD(tty) && test_bit(ASYNCB_INITIALIZED, &port->flags))
tty_port_raise_dtr_rts(port);
set_current_state(TASK_INTERRUPTIBLE);
if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)){
retval = (port->flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS;
break;
}
dcd = tty_port_carrier_raised(&info->port);
if (!(port->flags & ASYNC_CLOSING) && (do_clocal || dcd))
break;
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):block_til_ready blocking on %s count=%d\n",
__FILE__,__LINE__, tty->driver->name, port->count );
tty_unlock(tty);
schedule();
tty_lock(tty);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&port->open_wait, &wait);
/* FIXME: Racy on hangup during close wait */
if (!tty_hung_up_p(filp))
port->count++;
port->blocked_open--;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
__FILE__,__LINE__, tty->driver->name, port->count );
if (!retval)
port->flags |= ASYNC_NORMAL_ACTIVE;
return retval;
} /* end of block_til_ready() */
static int mgsl_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct mgsl_struct *info;
int line = tty->index;
/* verify range of specified line number */
if (line >= mgsl_device_count) {
printk("%s(%d):mgsl_open with invalid line #%d.\n",
__FILE__, __LINE__, line);
return -ENODEV;
}
/* find the info structure for the specified line */
info = mgsl_device_list;
while (info && info->line != line)
info = info->next_device;
if (mgsl_paranoia_check(info, tty->name, "mgsl_open"))
return -ENODEV;
tty->driver_data = info;
return tty_port_install(&info->port, driver, tty);
}
/* mgsl_open()
*
* Called when a port is opened. Init and enable port.
* Perform serial-specific initialization for the tty structure.
*
* Arguments: tty pointer to tty info structure
* filp associated file pointer
*
* Return Value: 0 if success, otherwise error code
*/
static int mgsl_open(struct tty_struct *tty, struct file * filp)
{
struct mgsl_struct *info = tty->driver_data;
unsigned long flags;
int retval;
info->port.tty = tty;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
__FILE__,__LINE__,tty->driver->name, info->port.count);
/* If port is closing, signal caller to try again */
if (info->port.flags & ASYNC_CLOSING){
wait_event_interruptible_tty(tty, info->port.close_wait,
!(info->port.flags & ASYNC_CLOSING));
retval = ((info->port.flags & ASYNC_HUP_NOTIFY) ?
-EAGAIN : -ERESTARTSYS);
goto cleanup;
}
info->port.low_latency = (info->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
spin_lock_irqsave(&info->netlock, flags);
if (info->netcount) {
retval = -EBUSY;
spin_unlock_irqrestore(&info->netlock, flags);
goto cleanup;
}
info->port.count++;
spin_unlock_irqrestore(&info->netlock, flags);
if (info->port.count == 1) {
/* 1st open on this device, init hardware */
retval = startup(info);
if (retval < 0)
goto cleanup;
}
retval = block_til_ready(tty, filp, info);
if (retval) {
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):block_til_ready(%s) returned %d\n",
__FILE__,__LINE__, info->device_name, retval);
goto cleanup;
}
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s(%d):mgsl_open(%s) success\n",
__FILE__,__LINE__, info->device_name);
retval = 0;
cleanup:
if (retval) {
if (tty->count == 1)
info->port.tty = NULL; /* tty layer will release tty struct */
if(info->port.count)
info->port.count--;
}
return retval;
} /* end of mgsl_open() */
/*
* /proc fs routines....
*/
static inline void line_info(struct seq_file *m, struct mgsl_struct *info)
{
char stat_buf[30];
unsigned long flags;
if (info->bus_type == MGSL_BUS_TYPE_PCI) {
seq_printf(m, "%s:PCI io:%04X irq:%d mem:%08X lcr:%08X",
info->device_name, info->io_base, info->irq_level,
info->phys_memory_base, info->phys_lcr_base);
} else {
seq_printf(m, "%s:(E)ISA io:%04X irq:%d dma:%d",
info->device_name, info->io_base,
info->irq_level, info->dma_level);
}
/* output current serial signal states */
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_get_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
stat_buf[0] = 0;
stat_buf[1] = 0;
if (info->serial_signals & SerialSignal_RTS)
strcat(stat_buf, "|RTS");
if (info->serial_signals & SerialSignal_CTS)
strcat(stat_buf, "|CTS");
if (info->serial_signals & SerialSignal_DTR)
strcat(stat_buf, "|DTR");
if (info->serial_signals & SerialSignal_DSR)
strcat(stat_buf, "|DSR");
if (info->serial_signals & SerialSignal_DCD)
strcat(stat_buf, "|CD");
if (info->serial_signals & SerialSignal_RI)
strcat(stat_buf, "|RI");
if (info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW ) {
seq_printf(m, " HDLC txok:%d rxok:%d",
info->icount.txok, info->icount.rxok);
if (info->icount.txunder)
seq_printf(m, " txunder:%d", info->icount.txunder);
if (info->icount.txabort)
seq_printf(m, " txabort:%d", info->icount.txabort);
if (info->icount.rxshort)
seq_printf(m, " rxshort:%d", info->icount.rxshort);
if (info->icount.rxlong)
seq_printf(m, " rxlong:%d", info->icount.rxlong);
if (info->icount.rxover)
seq_printf(m, " rxover:%d", info->icount.rxover);
if (info->icount.rxcrc)
seq_printf(m, " rxcrc:%d", info->icount.rxcrc);
} else {
seq_printf(m, " ASYNC tx:%d rx:%d",
info->icount.tx, info->icount.rx);
if (info->icount.frame)
seq_printf(m, " fe:%d", info->icount.frame);
if (info->icount.parity)
seq_printf(m, " pe:%d", info->icount.parity);
if (info->icount.brk)
seq_printf(m, " brk:%d", info->icount.brk);
if (info->icount.overrun)
seq_printf(m, " oe:%d", info->icount.overrun);
}
/* Append serial signal status to end */
seq_printf(m, " %s\n", stat_buf+1);
seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n",
info->tx_active,info->bh_requested,info->bh_running,
info->pending_bh);
spin_lock_irqsave(&info->irq_spinlock,flags);
{
u16 Tcsr = usc_InReg( info, TCSR );
u16 Tdmr = usc_InDmaReg( info, TDMR );
u16 Ticr = usc_InReg( info, TICR );
u16 Rscr = usc_InReg( info, RCSR );
u16 Rdmr = usc_InDmaReg( info, RDMR );
u16 Ricr = usc_InReg( info, RICR );
u16 Icr = usc_InReg( info, ICR );
u16 Dccr = usc_InReg( info, DCCR );
u16 Tmr = usc_InReg( info, TMR );
u16 Tccr = usc_InReg( info, TCCR );
u16 Ccar = inw( info->io_base + CCAR );
seq_printf(m, "tcsr=%04X tdmr=%04X ticr=%04X rcsr=%04X rdmr=%04X\n"
"ricr=%04X icr =%04X dccr=%04X tmr=%04X tccr=%04X ccar=%04X\n",
Tcsr,Tdmr,Ticr,Rscr,Rdmr,Ricr,Icr,Dccr,Tmr,Tccr,Ccar );
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
/* Called to print information about devices */
static int mgsl_proc_show(struct seq_file *m, void *v)
{
struct mgsl_struct *info;
seq_printf(m, "synclink driver:%s\n", driver_version);
info = mgsl_device_list;
while( info ) {
line_info(m, info);
info = info->next_device;
}
return 0;
}
static int mgsl_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, mgsl_proc_show, NULL);
}
static const struct file_operations mgsl_proc_fops = {
.owner = THIS_MODULE,
.open = mgsl_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* mgsl_allocate_dma_buffers()
*
* Allocate and format DMA buffers (ISA adapter)
* or format shared memory buffers (PCI adapter).
*
* Arguments: info pointer to device instance data
* Return Value: 0 if success, otherwise error
*/
static int mgsl_allocate_dma_buffers(struct mgsl_struct *info)
{
unsigned short BuffersPerFrame;
info->last_mem_alloc = 0;
/* Calculate the number of DMA buffers necessary to hold the */
/* largest allowable frame size. Note: If the max frame size is */
/* not an even multiple of the DMA buffer size then we need to */
/* round the buffer count per frame up one. */
BuffersPerFrame = (unsigned short)(info->max_frame_size/DMABUFFERSIZE);
if ( info->max_frame_size % DMABUFFERSIZE )
BuffersPerFrame++;
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
/*
* The PCI adapter has 256KBytes of shared memory to use.
* This is 64 PAGE_SIZE buffers.
*
* The first page is used for padding at this time so the
* buffer list does not begin at offset 0 of the PCI
* adapter's shared memory.
*
* The 2nd page is used for the buffer list. A 4K buffer
* list can hold 128 DMA_BUFFER structures at 32 bytes
* each.
*
* This leaves 62 4K pages.
*
* The next N pages are used for transmit frame(s). We
* reserve enough 4K page blocks to hold the required
* number of transmit dma buffers (num_tx_dma_buffers),
* each of MaxFrameSize size.
*
* Of the remaining pages (62-N), determine how many can
* be used to receive full MaxFrameSize inbound frames
*/
info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
info->rx_buffer_count = 62 - info->tx_buffer_count;
} else {
/* Calculate the number of PAGE_SIZE buffers needed for */
/* receive and transmit DMA buffers. */
/* Calculate the number of DMA buffers necessary to */
/* hold 7 max size receive frames and one max size transmit frame. */
/* The receive buffer count is bumped by one so we avoid an */
/* End of List condition if all receive buffers are used when */
/* using linked list DMA buffers. */
info->tx_buffer_count = info->num_tx_dma_buffers * BuffersPerFrame;
info->rx_buffer_count = (BuffersPerFrame * MAXRXFRAMES) + 6;
/*
* limit total TxBuffers & RxBuffers to 62 4K total
* (ala PCI Allocation)
*/
if ( (info->tx_buffer_count + info->rx_buffer_count) > 62 )
info->rx_buffer_count = 62 - info->tx_buffer_count;
}
if ( debug_level >= DEBUG_LEVEL_INFO )
printk("%s(%d):Allocating %d TX and %d RX DMA buffers.\n",
__FILE__,__LINE__, info->tx_buffer_count,info->rx_buffer_count);
if ( mgsl_alloc_buffer_list_memory( info ) < 0 ||
mgsl_alloc_frame_memory(info, info->rx_buffer_list, info->rx_buffer_count) < 0 ||
mgsl_alloc_frame_memory(info, info->tx_buffer_list, info->tx_buffer_count) < 0 ||
mgsl_alloc_intermediate_rxbuffer_memory(info) < 0 ||
mgsl_alloc_intermediate_txbuffer_memory(info) < 0 ) {
printk("%s(%d):Can't allocate DMA buffer memory\n",__FILE__,__LINE__);
return -ENOMEM;
}
mgsl_reset_rx_dma_buffers( info );
mgsl_reset_tx_dma_buffers( info );
return 0;
} /* end of mgsl_allocate_dma_buffers() */
/*
* mgsl_alloc_buffer_list_memory()
*
* Allocate a common DMA buffer for use as the
* receive and transmit buffer lists.
*
* A buffer list is a set of buffer entries where each entry contains
* a pointer to an actual buffer and a pointer to the next buffer entry
* (plus some other info about the buffer).
*
* The buffer entries for a list are built to form a circular list so
* that when the entire list has been traversed you start back at the
* beginning.
*
* This function allocates memory for just the buffer entries.
* The links (pointer to next entry) are filled in with the physical
* address of the next entry so the adapter can navigate the list
* using bus master DMA. The pointers to the actual buffers are filled
* out later when the actual buffers are allocated.
*
* Arguments: info pointer to device instance data
* Return Value: 0 if success, otherwise error
*/
static int mgsl_alloc_buffer_list_memory( struct mgsl_struct *info )
{
unsigned int i;
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
/* PCI adapter uses shared memory. */
info->buffer_list = info->memory_base + info->last_mem_alloc;
info->buffer_list_phys = info->last_mem_alloc;
info->last_mem_alloc += BUFFERLISTSIZE;
} else {
/* ISA adapter uses system memory. */
/* The buffer lists are allocated as a common buffer that both */
/* the processor and adapter can access. This allows the driver to */
/* inspect portions of the buffer while other portions are being */
/* updated by the adapter using Bus Master DMA. */
info->buffer_list = dma_alloc_coherent(NULL, BUFFERLISTSIZE, &info->buffer_list_dma_addr, GFP_KERNEL);
if (info->buffer_list == NULL)
return -ENOMEM;
info->buffer_list_phys = (u32)(info->buffer_list_dma_addr);
}
/* We got the memory for the buffer entry lists. */
/* Initialize the memory block to all zeros. */
memset( info->buffer_list, 0, BUFFERLISTSIZE );
/* Save virtual address pointers to the receive and */
/* transmit buffer lists. (Receive 1st). These pointers will */
/* be used by the processor to access the lists. */
info->rx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
info->tx_buffer_list = (DMABUFFERENTRY *)info->buffer_list;
info->tx_buffer_list += info->rx_buffer_count;
/*
* Build the links for the buffer entry lists such that
* two circular lists are built. (Transmit and Receive).
*
* Note: the links are physical addresses
* which are read by the adapter to determine the next
* buffer entry to use.
*/
for ( i = 0; i < info->rx_buffer_count; i++ ) {
/* calculate and store physical address of this buffer entry */
info->rx_buffer_list[i].phys_entry =
info->buffer_list_phys + (i * sizeof(DMABUFFERENTRY));
/* calculate and store physical address of */
/* next entry in cirular list of entries */
info->rx_buffer_list[i].link = info->buffer_list_phys;
if ( i < info->rx_buffer_count - 1 )
info->rx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
}
for ( i = 0; i < info->tx_buffer_count; i++ ) {
/* calculate and store physical address of this buffer entry */
info->tx_buffer_list[i].phys_entry = info->buffer_list_phys +
((info->rx_buffer_count + i) * sizeof(DMABUFFERENTRY));
/* calculate and store physical address of */
/* next entry in cirular list of entries */
info->tx_buffer_list[i].link = info->buffer_list_phys +
info->rx_buffer_count * sizeof(DMABUFFERENTRY);
if ( i < info->tx_buffer_count - 1 )
info->tx_buffer_list[i].link += (i + 1) * sizeof(DMABUFFERENTRY);
}
return 0;
} /* end of mgsl_alloc_buffer_list_memory() */
/* Free DMA buffers allocated for use as the
* receive and transmit buffer lists.
* Warning:
*
* The data transfer buffers associated with the buffer list
* MUST be freed before freeing the buffer list itself because
* the buffer list contains the information necessary to free
* the individual buffers!
*/
static void mgsl_free_buffer_list_memory( struct mgsl_struct *info )
{
if (info->buffer_list && info->bus_type != MGSL_BUS_TYPE_PCI)
dma_free_coherent(NULL, BUFFERLISTSIZE, info->buffer_list, info->buffer_list_dma_addr);
info->buffer_list = NULL;
info->rx_buffer_list = NULL;
info->tx_buffer_list = NULL;
} /* end of mgsl_free_buffer_list_memory() */
/*
* mgsl_alloc_frame_memory()
*
* Allocate the frame DMA buffers used by the specified buffer list.
* Each DMA buffer will be one memory page in size. This is necessary
* because memory can fragment enough that it may be impossible
* contiguous pages.
*
* Arguments:
*
* info pointer to device instance data
* BufferList pointer to list of buffer entries
* Buffercount count of buffer entries in buffer list
*
* Return Value: 0 if success, otherwise -ENOMEM
*/
static int mgsl_alloc_frame_memory(struct mgsl_struct *info,DMABUFFERENTRY *BufferList,int Buffercount)
{
int i;
u32 phys_addr;
/* Allocate page sized buffers for the receive buffer list */
for ( i = 0; i < Buffercount; i++ ) {
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
/* PCI adapter uses shared memory buffers. */
BufferList[i].virt_addr = info->memory_base + info->last_mem_alloc;
phys_addr = info->last_mem_alloc;
info->last_mem_alloc += DMABUFFERSIZE;
} else {
/* ISA adapter uses system memory. */
BufferList[i].virt_addr = dma_alloc_coherent(NULL, DMABUFFERSIZE, &BufferList[i].dma_addr, GFP_KERNEL);
if (BufferList[i].virt_addr == NULL)
return -ENOMEM;
phys_addr = (u32)(BufferList[i].dma_addr);
}
BufferList[i].phys_addr = phys_addr;
}
return 0;
} /* end of mgsl_alloc_frame_memory() */
/*
* mgsl_free_frame_memory()
*
* Free the buffers associated with
* each buffer entry of a buffer list.
*
* Arguments:
*
* info pointer to device instance data
* BufferList pointer to list of buffer entries
* Buffercount count of buffer entries in buffer list
*
* Return Value: None
*/
static void mgsl_free_frame_memory(struct mgsl_struct *info, DMABUFFERENTRY *BufferList, int Buffercount)
{
int i;
if ( BufferList ) {
for ( i = 0 ; i < Buffercount ; i++ ) {
if ( BufferList[i].virt_addr ) {
if ( info->bus_type != MGSL_BUS_TYPE_PCI )
dma_free_coherent(NULL, DMABUFFERSIZE, BufferList[i].virt_addr, BufferList[i].dma_addr);
BufferList[i].virt_addr = NULL;
}
}
}
} /* end of mgsl_free_frame_memory() */
/* mgsl_free_dma_buffers()
*
* Free DMA buffers
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_free_dma_buffers( struct mgsl_struct *info )
{
mgsl_free_frame_memory( info, info->rx_buffer_list, info->rx_buffer_count );
mgsl_free_frame_memory( info, info->tx_buffer_list, info->tx_buffer_count );
mgsl_free_buffer_list_memory( info );
} /* end of mgsl_free_dma_buffers() */
/*
* mgsl_alloc_intermediate_rxbuffer_memory()
*
* Allocate a buffer large enough to hold max_frame_size. This buffer
* is used to pass an assembled frame to the line discipline.
*
* Arguments:
*
* info pointer to device instance data
*
* Return Value: 0 if success, otherwise -ENOMEM
*/
static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info)
{
info->intermediate_rxbuffer = kmalloc(info->max_frame_size, GFP_KERNEL | GFP_DMA);
if ( info->intermediate_rxbuffer == NULL )
return -ENOMEM;
/* unused flag buffer to satisfy receive_buf calling interface */
info->flag_buf = kzalloc(info->max_frame_size, GFP_KERNEL);
if (!info->flag_buf) {
kfree(info->intermediate_rxbuffer);
info->intermediate_rxbuffer = NULL;
return -ENOMEM;
}
return 0;
} /* end of mgsl_alloc_intermediate_rxbuffer_memory() */
/*
* mgsl_free_intermediate_rxbuffer_memory()
*
*
* Arguments:
*
* info pointer to device instance data
*
* Return Value: None
*/
static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info)
{
kfree(info->intermediate_rxbuffer);
info->intermediate_rxbuffer = NULL;
kfree(info->flag_buf);
info->flag_buf = NULL;
} /* end of mgsl_free_intermediate_rxbuffer_memory() */
/*
* mgsl_alloc_intermediate_txbuffer_memory()
*
* Allocate intermdiate transmit buffer(s) large enough to hold max_frame_size.
* This buffer is used to load transmit frames into the adapter's dma transfer
* buffers when there is sufficient space.
*
* Arguments:
*
* info pointer to device instance data
*
* Return Value: 0 if success, otherwise -ENOMEM
*/
static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info)
{
int i;
if ( debug_level >= DEBUG_LEVEL_INFO )
printk("%s %s(%d) allocating %d tx holding buffers\n",
info->device_name, __FILE__,__LINE__,info->num_tx_holding_buffers);
memset(info->tx_holding_buffers,0,sizeof(info->tx_holding_buffers));
for ( i=0; i<info->num_tx_holding_buffers; ++i) {
info->tx_holding_buffers[i].buffer =
kmalloc(info->max_frame_size, GFP_KERNEL);
if (info->tx_holding_buffers[i].buffer == NULL) {
for (--i; i >= 0; i--) {
kfree(info->tx_holding_buffers[i].buffer);
info->tx_holding_buffers[i].buffer = NULL;
}
return -ENOMEM;
}
}
return 0;
} /* end of mgsl_alloc_intermediate_txbuffer_memory() */
/*
* mgsl_free_intermediate_txbuffer_memory()
*
*
* Arguments:
*
* info pointer to device instance data
*
* Return Value: None
*/
static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
{
int i;
for ( i=0; i<info->num_tx_holding_buffers; ++i ) {
kfree(info->tx_holding_buffers[i].buffer);
info->tx_holding_buffers[i].buffer = NULL;
}
info->get_tx_holding_index = 0;
info->put_tx_holding_index = 0;
info->tx_holding_count = 0;
} /* end of mgsl_free_intermediate_txbuffer_memory() */
/*
* load_next_tx_holding_buffer()
*
* attempts to load the next buffered tx request into the
* tx dma buffers
*
* Arguments:
*
* info pointer to device instance data
*
* Return Value: true if next buffered tx request loaded
* into adapter's tx dma buffer,
* false otherwise
*/
static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
{
bool ret = false;
if ( info->tx_holding_count ) {
/* determine if we have enough tx dma buffers
* to accommodate the next tx frame
*/
struct tx_holding_buffer *ptx =
&info->tx_holding_buffers[info->get_tx_holding_index];
int num_free = num_free_tx_dma_buffers(info);
int num_needed = ptx->buffer_size / DMABUFFERSIZE;
if ( ptx->buffer_size % DMABUFFERSIZE )
++num_needed;
if (num_needed <= num_free) {
info->xmit_cnt = ptx->buffer_size;
mgsl_load_tx_dma_buffer(info,ptx->buffer,ptx->buffer_size);
--info->tx_holding_count;
if ( ++info->get_tx_holding_index >= info->num_tx_holding_buffers)
info->get_tx_holding_index=0;
/* restart transmit timer */
mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
ret = true;
}
}
return ret;
}
/*
* save_tx_buffer_request()
*
* attempt to store transmit frame request for later transmission
*
* Arguments:
*
* info pointer to device instance data
* Buffer pointer to buffer containing frame to load
* BufferSize size in bytes of frame in Buffer
*
* Return Value: 1 if able to store, 0 otherwise
*/
static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize)
{
struct tx_holding_buffer *ptx;
if ( info->tx_holding_count >= info->num_tx_holding_buffers ) {
return 0; /* all buffers in use */
}
ptx = &info->tx_holding_buffers[info->put_tx_holding_index];
ptx->buffer_size = BufferSize;
memcpy( ptx->buffer, Buffer, BufferSize);
++info->tx_holding_count;
if ( ++info->put_tx_holding_index >= info->num_tx_holding_buffers)
info->put_tx_holding_index=0;
return 1;
}
static int mgsl_claim_resources(struct mgsl_struct *info)
{
if (request_region(info->io_base,info->io_addr_size,"synclink") == NULL) {
printk( "%s(%d):I/O address conflict on device %s Addr=%08X\n",
__FILE__,__LINE__,info->device_name, info->io_base);
return -ENODEV;
}
info->io_addr_requested = true;
if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
info->device_name, info ) < 0 ) {
printk( "%s(%d):Can't request interrupt on device %s IRQ=%d\n",
__FILE__,__LINE__,info->device_name, info->irq_level );
goto errout;
}
info->irq_requested = true;
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
printk( "%s(%d):mem addr conflict device %s Addr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_memory_base);
goto errout;
}
info->shared_mem_requested = true;
if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
goto errout;
}
info->lcr_mem_requested = true;
info->memory_base = ioremap_nocache(info->phys_memory_base,
0x40000);
if (!info->memory_base) {
printk( "%s(%d):Can't map shared memory on device %s MemAddr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_memory_base );
goto errout;
}
if ( !mgsl_memory_test(info) ) {
printk( "%s(%d):Failed shared memory test %s MemAddr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_memory_base );
goto errout;
}
info->lcr_base = ioremap_nocache(info->phys_lcr_base,
PAGE_SIZE);
if (!info->lcr_base) {
printk( "%s(%d):Can't map LCR memory on device %s MemAddr=%08X\n",
__FILE__,__LINE__,info->device_name, info->phys_lcr_base );
goto errout;
}
info->lcr_base += info->lcr_offset;
} else {
/* claim DMA channel */
if (request_dma(info->dma_level,info->device_name) < 0){
printk( "%s(%d):Can't request DMA channel on device %s DMA=%d\n",
__FILE__,__LINE__,info->device_name, info->dma_level );
mgsl_release_resources( info );
return -ENODEV;
}
info->dma_requested = true;
/* ISA adapter uses bus master DMA */
set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
enable_dma(info->dma_level);
}
if ( mgsl_allocate_dma_buffers(info) < 0 ) {
printk( "%s(%d):Can't allocate DMA buffers on device %s DMA=%d\n",
__FILE__,__LINE__,info->device_name, info->dma_level );
goto errout;
}
return 0;
errout:
mgsl_release_resources(info);
return -ENODEV;
} /* end of mgsl_claim_resources() */
static void mgsl_release_resources(struct mgsl_struct *info)
{
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_release_resources(%s) entry\n",
__FILE__,__LINE__,info->device_name );
if ( info->irq_requested ) {
free_irq(info->irq_level, info);
info->irq_requested = false;
}
if ( info->dma_requested ) {
disable_dma(info->dma_level);
free_dma(info->dma_level);
info->dma_requested = false;
}
mgsl_free_dma_buffers(info);
mgsl_free_intermediate_rxbuffer_memory(info);
mgsl_free_intermediate_txbuffer_memory(info);
if ( info->io_addr_requested ) {
release_region(info->io_base,info->io_addr_size);
info->io_addr_requested = false;
}
if ( info->shared_mem_requested ) {
release_mem_region(info->phys_memory_base,0x40000);
info->shared_mem_requested = false;
}
if ( info->lcr_mem_requested ) {
release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
info->lcr_mem_requested = false;
}
if (info->memory_base){
iounmap(info->memory_base);
info->memory_base = NULL;
}
if (info->lcr_base){
iounmap(info->lcr_base - info->lcr_offset);
info->lcr_base = NULL;
}
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_release_resources(%s) exit\n",
__FILE__,__LINE__,info->device_name );
} /* end of mgsl_release_resources() */
/* mgsl_add_device()
*
* Add the specified device instance data structure to the
* global linked list of devices and increment the device count.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_add_device( struct mgsl_struct *info )
{
info->next_device = NULL;
info->line = mgsl_device_count;
sprintf(info->device_name,"ttySL%d",info->line);
if (info->line < MAX_TOTAL_DEVICES) {
if (maxframe[info->line])
info->max_frame_size = maxframe[info->line];
if (txdmabufs[info->line]) {
info->num_tx_dma_buffers = txdmabufs[info->line];
if (info->num_tx_dma_buffers < 1)
info->num_tx_dma_buffers = 1;
}
if (txholdbufs[info->line]) {
info->num_tx_holding_buffers = txholdbufs[info->line];
if (info->num_tx_holding_buffers < 1)
info->num_tx_holding_buffers = 1;
else if (info->num_tx_holding_buffers > MAX_TX_HOLDING_BUFFERS)
info->num_tx_holding_buffers = MAX_TX_HOLDING_BUFFERS;
}
}
mgsl_device_count++;
if ( !mgsl_device_list )
mgsl_device_list = info;
else {
struct mgsl_struct *current_dev = mgsl_device_list;
while( current_dev->next_device )
current_dev = current_dev->next_device;
current_dev->next_device = info;
}
if ( info->max_frame_size < 4096 )
info->max_frame_size = 4096;
else if ( info->max_frame_size > 65535 )
info->max_frame_size = 65535;
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
printk( "SyncLink PCI v%d %s: IO=%04X IRQ=%d Mem=%08X,%08X MaxFrameSize=%u\n",
info->hw_version + 1, info->device_name, info->io_base, info->irq_level,
info->phys_memory_base, info->phys_lcr_base,
info->max_frame_size );
} else {
printk( "SyncLink ISA %s: IO=%04X IRQ=%d DMA=%d MaxFrameSize=%u\n",
info->device_name, info->io_base, info->irq_level, info->dma_level,
info->max_frame_size );
}
#if SYNCLINK_GENERIC_HDLC
hdlcdev_init(info);
#endif
} /* end of mgsl_add_device() */
static const struct tty_port_operations mgsl_port_ops = {
.carrier_raised = carrier_raised,
.dtr_rts = dtr_rts,
};
/* mgsl_allocate_device()
*
* Allocate and initialize a device instance structure
*
* Arguments: none
* Return Value: pointer to mgsl_struct if success, otherwise NULL
*/
static struct mgsl_struct* mgsl_allocate_device(void)
{
struct mgsl_struct *info;
info = kzalloc(sizeof(struct mgsl_struct),
GFP_KERNEL);
if (!info) {
printk("Error can't allocate device instance data\n");
} else {
tty_port_init(&info->port);
info->port.ops = &mgsl_port_ops;
info->magic = MGSL_MAGIC;
INIT_WORK(&info->task, mgsl_bh_handler);
info->max_frame_size = 4096;
info->port.close_delay = 5*HZ/10;
info->port.closing_wait = 30*HZ;
init_waitqueue_head(&info->status_event_wait_q);
init_waitqueue_head(&info->event_wait_q);
spin_lock_init(&info->irq_spinlock);
spin_lock_init(&info->netlock);
memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
info->idle_mode = HDLC_TXIDLE_FLAGS;
info->num_tx_dma_buffers = 1;
info->num_tx_holding_buffers = 0;
}
return info;
} /* end of mgsl_allocate_device()*/
static const struct tty_operations mgsl_ops = {
.install = mgsl_install,
.open = mgsl_open,
.close = mgsl_close,
.write = mgsl_write,
.put_char = mgsl_put_char,
.flush_chars = mgsl_flush_chars,
.write_room = mgsl_write_room,
.chars_in_buffer = mgsl_chars_in_buffer,
.flush_buffer = mgsl_flush_buffer,
.ioctl = mgsl_ioctl,
.throttle = mgsl_throttle,
.unthrottle = mgsl_unthrottle,
.send_xchar = mgsl_send_xchar,
.break_ctl = mgsl_break,
.wait_until_sent = mgsl_wait_until_sent,
.set_termios = mgsl_set_termios,
.stop = mgsl_stop,
.start = mgsl_start,
.hangup = mgsl_hangup,
.tiocmget = tiocmget,
.tiocmset = tiocmset,
.get_icount = msgl_get_icount,
.proc_fops = &mgsl_proc_fops,
};
/*
* perform tty device initialization
*/
static int mgsl_init_tty(void)
{
int rc;
serial_driver = alloc_tty_driver(128);
if (!serial_driver)
return -ENOMEM;
serial_driver->driver_name = "synclink";
serial_driver->name = "ttySL";
serial_driver->major = ttymajor;
serial_driver->minor_start = 64;
serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
serial_driver->subtype = SERIAL_TYPE_NORMAL;
serial_driver->init_termios = tty_std_termios;
serial_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL;
serial_driver->init_termios.c_ispeed = 9600;
serial_driver->init_termios.c_ospeed = 9600;
serial_driver->flags = TTY_DRIVER_REAL_RAW;
tty_set_operations(serial_driver, &mgsl_ops);
if ((rc = tty_register_driver(serial_driver)) < 0) {
printk("%s(%d):Couldn't register serial driver\n",
__FILE__,__LINE__);
put_tty_driver(serial_driver);
serial_driver = NULL;
return rc;
}
printk("%s %s, tty major#%d\n",
driver_name, driver_version,
serial_driver->major);
return 0;
}
/* enumerate user specified ISA adapters
*/
static void mgsl_enum_isa_devices(void)
{
struct mgsl_struct *info;
int i;
/* Check for user specified ISA devices */
for (i=0 ;(i < MAX_ISA_DEVICES) && io[i] && irq[i]; i++){
if ( debug_level >= DEBUG_LEVEL_INFO )
printk("ISA device specified io=%04X,irq=%d,dma=%d\n",
io[i], irq[i], dma[i] );
info = mgsl_allocate_device();
if ( !info ) {
/* error allocating device instance data */
if ( debug_level >= DEBUG_LEVEL_ERROR )
printk( "can't allocate device instance data.\n");
continue;
}
/* Copy user configuration info to device instance data */
info->io_base = (unsigned int)io[i];
info->irq_level = (unsigned int)irq[i];
info->irq_level = irq_canonicalize(info->irq_level);
info->dma_level = (unsigned int)dma[i];
info->bus_type = MGSL_BUS_TYPE_ISA;
info->io_addr_size = 16;
info->irq_flags = 0;
mgsl_add_device( info );
}
}
static void synclink_cleanup(void)
{
int rc;
struct mgsl_struct *info;
struct mgsl_struct *tmp;
printk("Unloading %s: %s\n", driver_name, driver_version);
if (serial_driver) {
if ((rc = tty_unregister_driver(serial_driver)))
printk("%s(%d) failed to unregister tty driver err=%d\n",
__FILE__,__LINE__,rc);
put_tty_driver(serial_driver);
}
info = mgsl_device_list;
while(info) {
#if SYNCLINK_GENERIC_HDLC
hdlcdev_exit(info);
#endif
mgsl_release_resources(info);
tmp = info;
info = info->next_device;
tty_port_destroy(&tmp->port);
kfree(tmp);
}
if (pci_registered)
pci_unregister_driver(&synclink_pci_driver);
}
static int __init synclink_init(void)
{
int rc;
if (break_on_load) {
mgsl_get_text_ptr();
BREAKPOINT();
}
printk("%s %s\n", driver_name, driver_version);
mgsl_enum_isa_devices();
if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
else
pci_registered = true;
if ((rc = mgsl_init_tty()) < 0)
goto error;
return 0;
error:
synclink_cleanup();
return rc;
}
static void __exit synclink_exit(void)
{
synclink_cleanup();
}
module_init(synclink_init);
module_exit(synclink_exit);
/*
* usc_RTCmd()
*
* Issue a USC Receive/Transmit command to the
* Channel Command/Address Register (CCAR).
*
* Notes:
*
* The command is encoded in the most significant 5 bits <15..11>
* of the CCAR value. Bits <10..7> of the CCAR must be preserved
* and Bits <6..0> must be written as zeros.
*
* Arguments:
*
* info pointer to device information structure
* Cmd command mask (use symbolic macros)
*
* Return Value:
*
* None
*/
static void usc_RTCmd( struct mgsl_struct *info, u16 Cmd )
{
/* output command to CCAR in bits <15..11> */
/* preserve bits <10..7>, bits <6..0> must be zero */
outw( Cmd + info->loopback_bits, info->io_base + CCAR );
/* Read to flush write to CCAR */
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
inw( info->io_base + CCAR );
} /* end of usc_RTCmd() */
/*
* usc_DmaCmd()
*
* Issue a DMA command to the DMA Command/Address Register (DCAR).
*
* Arguments:
*
* info pointer to device information structure
* Cmd DMA command mask (usc_DmaCmd_XX Macros)
*
* Return Value:
*
* None
*/
static void usc_DmaCmd( struct mgsl_struct *info, u16 Cmd )
{
/* write command mask to DCAR */
outw( Cmd + info->mbre_bit, info->io_base );
/* Read to flush write to DCAR */
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
inw( info->io_base );
} /* end of usc_DmaCmd() */
/*
* usc_OutDmaReg()
*
* Write a 16-bit value to a USC DMA register
*
* Arguments:
*
* info pointer to device info structure
* RegAddr register address (number) for write
* RegValue 16-bit value to write to register
*
* Return Value:
*
* None
*
*/
static void usc_OutDmaReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
{
/* Note: The DCAR is located at the adapter base address */
/* Note: must preserve state of BIT8 in DCAR */
outw( RegAddr + info->mbre_bit, info->io_base );
outw( RegValue, info->io_base );
/* Read to flush write to DCAR */
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
inw( info->io_base );
} /* end of usc_OutDmaReg() */
/*
* usc_InDmaReg()
*
* Read a 16-bit value from a DMA register
*
* Arguments:
*
* info pointer to device info structure
* RegAddr register address (number) to read from
*
* Return Value:
*
* The 16-bit value read from register
*
*/
static u16 usc_InDmaReg( struct mgsl_struct *info, u16 RegAddr )
{
/* Note: The DCAR is located at the adapter base address */
/* Note: must preserve state of BIT8 in DCAR */
outw( RegAddr + info->mbre_bit, info->io_base );
return inw( info->io_base );
} /* end of usc_InDmaReg() */
/*
*
* usc_OutReg()
*
* Write a 16-bit value to a USC serial channel register
*
* Arguments:
*
* info pointer to device info structure
* RegAddr register address (number) to write to
* RegValue 16-bit value to write to register
*
* Return Value:
*
* None
*
*/
static void usc_OutReg( struct mgsl_struct *info, u16 RegAddr, u16 RegValue )
{
outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
outw( RegValue, info->io_base + CCAR );
/* Read to flush write to CCAR */
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
inw( info->io_base + CCAR );
} /* end of usc_OutReg() */
/*
* usc_InReg()
*
* Reads a 16-bit value from a USC serial channel register
*
* Arguments:
*
* info pointer to device extension
* RegAddr register address (number) to read from
*
* Return Value:
*
* 16-bit value read from register
*/
static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
{
outw( RegAddr + info->loopback_bits, info->io_base + CCAR );
return inw( info->io_base + CCAR );
} /* end of usc_InReg() */
/* usc_set_sdlc_mode()
*
* Set up the adapter for SDLC DMA communications.
*
* Arguments: info pointer to device instance data
* Return Value: NONE
*/
static void usc_set_sdlc_mode( struct mgsl_struct *info )
{
u16 RegValue;
bool PreSL1660;
/*
* determine if the IUSC on the adapter is pre-SL1660. If
* not, take advantage of the UnderWait feature of more
* modern chips. If an underrun occurs and this bit is set,
* the transmitter will idle the programmed idle pattern
* until the driver has time to service the underrun. Otherwise,
* the dma controller may get the cycles previously requested
* and begin transmitting queued tx data.
*/
usc_OutReg(info,TMCR,0x1f);
RegValue=usc_InReg(info,TMDR);
PreSL1660 = (RegValue == IUSC_PRE_SL1660);
if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
{
/*
** Channel Mode Register (CMR)
**
** <15..14> 10 Tx Sub Modes, Send Flag on Underrun
** <13> 0 0 = Transmit Disabled (initially)
** <12> 0 1 = Consecutive Idles share common 0
** <11..8> 1110 Transmitter Mode = HDLC/SDLC Loop
** <7..4> 0000 Rx Sub Modes, addr/ctrl field handling
** <3..0> 0110 Receiver Mode = HDLC/SDLC
**
** 1000 1110 0000 0110 = 0x8e06
*/
RegValue = 0x8e06;
/*--------------------------------------------------
* ignore user options for UnderRun Actions and
* preambles
*--------------------------------------------------*/
}
else
{
/* Channel mode Register (CMR)
*
* <15..14> 00 Tx Sub modes, Underrun Action
* <13> 0 1 = Send Preamble before opening flag
* <12> 0 1 = Consecutive Idles share common 0
* <11..8> 0110 Transmitter mode = HDLC/SDLC
* <7..4> 0000 Rx Sub modes, addr/ctrl field handling
* <3..0> 0110 Receiver mode = HDLC/SDLC
*
* 0000 0110 0000 0110 = 0x0606
*/
if (info->params.mode == MGSL_MODE_RAW) {
RegValue = 0x0001; /* Set Receive mode = external sync */
usc_OutReg( info, IOCR, /* Set IOCR DCD is RxSync Detect Input */
(unsigned short)((usc_InReg(info, IOCR) & ~(BIT13|BIT12)) | BIT12));
/*
* TxSubMode:
* CMR <15> 0 Don't send CRC on Tx Underrun
* CMR <14> x undefined
* CMR <13> 0 Send preamble before openning sync
* CMR <12> 0 Send 8-bit syncs, 1=send Syncs per TxLength
*
* TxMode:
* CMR <11-8) 0100 MonoSync
*
* 0x00 0100 xxxx xxxx 04xx
*/
RegValue |= 0x0400;
}
else {
RegValue = 0x0606;
if ( info->params.flags & HDLC_FLAG_UNDERRUN_ABORT15 )
RegValue |= BIT14;
else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
RegValue |= BIT15;
else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
RegValue |= BIT15 | BIT14;
}
if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
RegValue |= BIT13;
}
if ( info->params.mode == MGSL_MODE_HDLC &&
(info->params.flags & HDLC_FLAG_SHARE_ZERO) )
RegValue |= BIT12;
if ( info->params.addr_filter != 0xff )
{
/* set up receive address filtering */
usc_OutReg( info, RSR, info->params.addr_filter );
RegValue |= BIT4;
}
usc_OutReg( info, CMR, RegValue );
info->cmr_value = RegValue;
/* Receiver mode Register (RMR)
*
* <15..13> 000 encoding
* <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
* <10> 1 1 = Set CRC to all 1s (use for SDLC/HDLC)
* <9> 0 1 = Include Receive chars in CRC
* <8> 1 1 = Use Abort/PE bit as abort indicator
* <7..6> 00 Even parity
* <5> 0 parity disabled
* <4..2> 000 Receive Char Length = 8 bits
* <1..0> 00 Disable Receiver
*
* 0000 0101 0000 0000 = 0x0500
*/
RegValue = 0x0500;
switch ( info->params.encoding ) {
case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
}
if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
RegValue |= BIT9;
else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
RegValue |= ( BIT12 | BIT10 | BIT9 );
usc_OutReg( info, RMR, RegValue );
/* Set the Receive count Limit Register (RCLR) to 0xffff. */
/* When an opening flag of an SDLC frame is recognized the */
/* Receive Character count (RCC) is loaded with the value in */
/* RCLR. The RCC is decremented for each received byte. The */
/* value of RCC is stored after the closing flag of the frame */
/* allowing the frame size to be computed. */
usc_OutReg( info, RCLR, RCLRVALUE );
usc_RCmd( info, RCmd_SelectRicrdma_level );
/* Receive Interrupt Control Register (RICR)
*
* <15..8> ? RxFIFO DMA Request Level
* <7> 0 Exited Hunt IA (Interrupt Arm)
* <6> 0 Idle Received IA
* <5> 0 Break/Abort IA
* <4> 0 Rx Bound IA
* <3> 1 Queued status reflects oldest 2 bytes in FIFO
* <2> 0 Abort/PE IA
* <1> 1 Rx Overrun IA
* <0> 0 Select TC0 value for readback
*
* 0000 0000 0000 1000 = 0x000a
*/
/* Carry over the Exit Hunt and Idle Received bits */
/* in case they have been armed by usc_ArmEvents. */
RegValue = usc_InReg( info, RICR ) & 0xc0;
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
usc_OutReg( info, RICR, (u16)(0x030a | RegValue) );
else
usc_OutReg( info, RICR, (u16)(0x140a | RegValue) );
/* Unlatch all Rx status bits and clear Rx status IRQ Pending */
usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
/* Transmit mode Register (TMR)
*
* <15..13> 000 encoding
* <12..11> 00 FCS = 16bit CRC CCITT (x15 + x12 + x5 + 1)
* <10> 1 1 = Start CRC as all 1s (use for SDLC/HDLC)
* <9> 0 1 = Tx CRC Enabled
* <8> 0 1 = Append CRC to end of transmit frame
* <7..6> 00 Transmit parity Even
* <5> 0 Transmit parity Disabled
* <4..2> 000 Tx Char Length = 8 bits
* <1..0> 00 Disable Transmitter
*
* 0000 0100 0000 0000 = 0x0400
*/
RegValue = 0x0400;
switch ( info->params.encoding ) {
case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
}
if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
RegValue |= BIT9 | BIT8;
else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
usc_OutReg( info, TMR, RegValue );
usc_set_txidle( info );
usc_TCmd( info, TCmd_SelectTicrdma_level );
/* Transmit Interrupt Control Register (TICR)
*
* <15..8> ? Transmit FIFO DMA Level
* <7> 0 Present IA (Interrupt Arm)
* <6> 0 Idle Sent IA
* <5> 1 Abort Sent IA
* <4> 1 EOF/EOM Sent IA
* <3> 0 CRC Sent IA
* <2> 1 1 = Wait for SW Trigger to Start Frame
* <1> 1 Tx Underrun IA
* <0> 0 TC0 constant on read back
*
* 0000 0000 0011 0110 = 0x0036
*/
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
usc_OutReg( info, TICR, 0x0736 );
else
usc_OutReg( info, TICR, 0x1436 );
usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
/*
** Transmit Command/Status Register (TCSR)
**
** <15..12> 0000 TCmd
** <11> 0/1 UnderWait
** <10..08> 000 TxIdle
** <7> x PreSent
** <6> x IdleSent
** <5> x AbortSent
** <4> x EOF/EOM Sent
** <3> x CRC Sent
** <2> x All Sent
** <1> x TxUnder
** <0> x TxEmpty
**
** 0000 0000 0000 0000 = 0x0000
*/
info->tcsr_value = 0;
if ( !PreSL1660 )
info->tcsr_value |= TCSR_UNDERWAIT;
usc_OutReg( info, TCSR, info->tcsr_value );
/* Clock mode Control Register (CMCR)
*
* <15..14> 00 counter 1 Source = Disabled
* <13..12> 00 counter 0 Source = Disabled
* <11..10> 11 BRG1 Input is TxC Pin
* <9..8> 11 BRG0 Input is TxC Pin
* <7..6> 01 DPLL Input is BRG1 Output
* <5..3> XXX TxCLK comes from Port 0
* <2..0> XXX RxCLK comes from Port 1
*
* 0000 1111 0111 0111 = 0x0f77
*/
RegValue = 0x0f40;
if ( info->params.flags & HDLC_FLAG_RXC_DPLL )
RegValue |= 0x0003; /* RxCLK from DPLL */
else if ( info->params.flags & HDLC_FLAG_RXC_BRG )
RegValue |= 0x0004; /* RxCLK from BRG0 */
else if ( info->params.flags & HDLC_FLAG_RXC_TXCPIN)
RegValue |= 0x0006; /* RxCLK from TXC Input */
else
RegValue |= 0x0007; /* RxCLK from Port1 */
if ( info->params.flags & HDLC_FLAG_TXC_DPLL )
RegValue |= 0x0018; /* TxCLK from DPLL */
else if ( info->params.flags & HDLC_FLAG_TXC_BRG )
RegValue |= 0x0020; /* TxCLK from BRG0 */
else if ( info->params.flags & HDLC_FLAG_TXC_RXCPIN)
RegValue |= 0x0038; /* RxCLK from TXC Input */
else
RegValue |= 0x0030; /* TxCLK from Port0 */
usc_OutReg( info, CMCR, RegValue );
/* Hardware Configuration Register (HCR)
*
* <15..14> 00 CTR0 Divisor:00=32,01=16,10=8,11=4
* <13> 0 CTR1DSel:0=CTR0Div determines CTR0Div
* <12> 0 CVOK:0=report code violation in biphase
* <11..10> 00 DPLL Divisor:00=32,01=16,10=8,11=4
* <9..8> XX DPLL mode:00=disable,01=NRZ,10=Biphase,11=Biphase Level
* <7..6> 00 reserved
* <5> 0 BRG1 mode:0=continuous,1=single cycle
* <4> X BRG1 Enable
* <3..2> 00 reserved
* <1> 0 BRG0 mode:0=continuous,1=single cycle
* <0> 0 BRG0 Enable
*/
RegValue = 0x0000;
if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) {
u32 XtalSpeed;
u32 DpllDivisor;
u16 Tc;
/* DPLL is enabled. Use BRG1 to provide continuous reference clock */
/* for DPLL. DPLL mode in HCR is dependent on the encoding used. */
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
XtalSpeed = 11059200;
else
XtalSpeed = 14745600;
if ( info->params.flags & HDLC_FLAG_DPLL_DIV16 ) {
DpllDivisor = 16;
RegValue |= BIT10;
}
else if ( info->params.flags & HDLC_FLAG_DPLL_DIV8 ) {
DpllDivisor = 8;
RegValue |= BIT11;
}
else
DpllDivisor = 32;
/* Tc = (Xtal/Speed) - 1 */
/* If twice the remainder of (Xtal/Speed) is greater than Speed */
/* then rounding up gives a more precise time constant. Instead */
/* of rounding up and then subtracting 1 we just don't subtract */
/* the one in this case. */
/*--------------------------------------------------
* ejz: for DPLL mode, application should use the
* same clock speed as the partner system, even
* though clocking is derived from the input RxData.
* In case the user uses a 0 for the clock speed,
* default to 0xffffffff and don't try to divide by
* zero
*--------------------------------------------------*/
if ( info->params.clock_speed )
{
Tc = (u16)((XtalSpeed/DpllDivisor)/info->params.clock_speed);
if ( !((((XtalSpeed/DpllDivisor) % info->params.clock_speed) * 2)
/ info->params.clock_speed) )
Tc--;
}
else
Tc = -1;
/* Write 16-bit Time Constant for BRG1 */
usc_OutReg( info, TC1R, Tc );
RegValue |= BIT4; /* enable BRG1 */
switch ( info->params.encoding ) {
case HDLC_ENCODING_NRZ:
case HDLC_ENCODING_NRZB:
case HDLC_ENCODING_NRZI_MARK:
case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT8; break;
case HDLC_ENCODING_BIPHASE_MARK:
case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
case HDLC_ENCODING_BIPHASE_LEVEL:
case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break;
}
}
usc_OutReg( info, HCR, RegValue );
/* Channel Control/status Register (CCSR)
*
* <15> X RCC FIFO Overflow status (RO)
* <14> X RCC FIFO Not Empty status (RO)
* <13> 0 1 = Clear RCC FIFO (WO)
* <12> X DPLL Sync (RW)
* <11> X DPLL 2 Missed Clocks status (RO)
* <10> X DPLL 1 Missed Clock status (RO)
* <9..8> 00 DPLL Resync on rising and falling edges (RW)
* <7> X SDLC Loop On status (RO)
* <6> X SDLC Loop Send status (RO)
* <5> 1 Bypass counters for TxClk and RxClk (RW)
* <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
* <1..0> 00 reserved
*
* 0000 0000 0010 0000 = 0x0020
*/
usc_OutReg( info, CCSR, 0x1020 );
if ( info->params.flags & HDLC_FLAG_AUTO_CTS ) {
usc_OutReg( info, SICR,
(u16)(usc_InReg(info,SICR) | SICR_CTS_INACTIVE) );
}
/* enable Master Interrupt Enable bit (MIE) */
usc_EnableMasterIrqBit( info );
usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA |
TRANSMIT_STATUS | TRANSMIT_DATA | MISC);
/* arm RCC underflow interrupt */
usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
usc_EnableInterrupts(info, MISC);
info->mbre_bit = 0;
outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
info->mbre_bit = BIT8;
outw( BIT8, info->io_base ); /* set Master Bus Enable (DCAR) */
if (info->bus_type == MGSL_BUS_TYPE_ISA) {
/* Enable DMAEN (Port 7, Bit 14) */
/* This connects the DMA request signal to the ISA bus */
usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT15) & ~BIT14));
}
/* DMA Control Register (DCR)
*
* <15..14> 10 Priority mode = Alternating Tx/Rx
* 01 Rx has priority
* 00 Tx has priority
*
* <13> 1 Enable Priority Preempt per DCR<15..14>
* (WARNING DCR<11..10> must be 00 when this is 1)
* 0 Choose activate channel per DCR<11..10>
*
* <12> 0 Little Endian for Array/List
* <11..10> 00 Both Channels can use each bus grant
* <9..6> 0000 reserved
* <5> 0 7 CLK - Minimum Bus Re-request Interval
* <4> 0 1 = drive D/C and S/D pins
* <3> 1 1 = Add one wait state to all DMA cycles.
* <2> 0 1 = Strobe /UAS on every transfer.
* <1..0> 11 Addr incrementing only affects LS24 bits
*
* 0110 0000 0000 1011 = 0x600b
*/
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
/* PCI adapter does not need DMA wait state */
usc_OutDmaReg( info, DCR, 0xa00b );
}
else
usc_OutDmaReg( info, DCR, 0x800b );
/* Receive DMA mode Register (RDMR)
*
* <15..14> 11 DMA mode = Linked List Buffer mode
* <13> 1 RSBinA/L = store Rx status Block in Arrary/List entry
* <12> 1 Clear count of List Entry after fetching
* <11..10> 00 Address mode = Increment
* <9> 1 Terminate Buffer on RxBound
* <8> 0 Bus Width = 16bits
* <7..0> ? status Bits (write as 0s)
*
* 1111 0010 0000 0000 = 0xf200
*/
usc_OutDmaReg( info, RDMR, 0xf200 );
/* Transmit DMA mode Register (TDMR)
*
* <15..14> 11 DMA mode = Linked List Buffer mode
* <13> 1 TCBinA/L = fetch Tx Control Block from List entry
* <12> 1 Clear count of List Entry after fetching
* <11..10> 00 Address mode = Increment
* <9> 1 Terminate Buffer on end of frame
* <8> 0 Bus Width = 16bits
* <7..0> ? status Bits (Read Only so write as 0)
*
* 1111 0010 0000 0000 = 0xf200
*/
usc_OutDmaReg( info, TDMR, 0xf200 );
/* DMA Interrupt Control Register (DICR)
*
* <15> 1 DMA Interrupt Enable
* <14> 0 1 = Disable IEO from USC
* <13> 0 1 = Don't provide vector during IntAck
* <12> 1 1 = Include status in Vector
* <10..2> 0 reserved, Must be 0s
* <1> 0 1 = Rx DMA Interrupt Enabled
* <0> 0 1 = Tx DMA Interrupt Enabled
*
* 1001 0000 0000 0000 = 0x9000
*/
usc_OutDmaReg( info, DICR, 0x9000 );
usc_InDmaReg( info, RDMR ); /* clear pending receive DMA IRQ bits */
usc_InDmaReg( info, TDMR ); /* clear pending transmit DMA IRQ bits */
usc_OutDmaReg( info, CDIR, 0x0303 ); /* clear IUS and Pending for Tx and Rx */
/* Channel Control Register (CCR)
*
* <15..14> 10 Use 32-bit Tx Control Blocks (TCBs)
* <13> 0 Trigger Tx on SW Command Disabled
* <12> 0 Flag Preamble Disabled
* <11..10> 00 Preamble Length
* <9..8> 00 Preamble Pattern
* <7..6> 10 Use 32-bit Rx status Blocks (RSBs)
* <5> 0 Trigger Rx on SW Command Disabled
* <4..0> 0 reserved
*
* 1000 0000 1000 0000 = 0x8080
*/
RegValue = 0x8080;
switch ( info->params.preamble_length ) {
case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break;
}
switch ( info->params.preamble ) {
case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break;
case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break;
}
usc_OutReg( info, CCR, RegValue );
/*
* Burst/Dwell Control Register
*
* <15..8> 0x20 Maximum number of transfers per bus grant
* <7..0> 0x00 Maximum number of clock cycles per bus grant
*/
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
/* don't limit bus occupancy on PCI adapter */
usc_OutDmaReg( info, BDCR, 0x0000 );
}
else
usc_OutDmaReg( info, BDCR, 0x2000 );
usc_stop_transmitter(info);
usc_stop_receiver(info);
} /* end of usc_set_sdlc_mode() */
/* usc_enable_loopback()
*
* Set the 16C32 for internal loopback mode.
* The TxCLK and RxCLK signals are generated from the BRG0 and
* the TxD is looped back to the RxD internally.
*
* Arguments: info pointer to device instance data
* enable 1 = enable loopback, 0 = disable
* Return Value: None
*/
static void usc_enable_loopback(struct mgsl_struct *info, int enable)
{
if (enable) {
/* blank external TXD output */
usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6));
/* Clock mode Control Register (CMCR)
*
* <15..14> 00 counter 1 Disabled
* <13..12> 00 counter 0 Disabled
* <11..10> 11 BRG1 Input is TxC Pin
* <9..8> 11 BRG0 Input is TxC Pin
* <7..6> 01 DPLL Input is BRG1 Output
* <5..3> 100 TxCLK comes from BRG0
* <2..0> 100 RxCLK comes from BRG0
*
* 0000 1111 0110 0100 = 0x0f64
*/
usc_OutReg( info, CMCR, 0x0f64 );
/* Write 16-bit Time Constant for BRG0 */
/* use clock speed if available, otherwise use 8 for diagnostics */
if (info->params.clock_speed) {
if (info->bus_type == MGSL_BUS_TYPE_PCI)
usc_OutReg(info, TC0R, (u16)((11059200/info->params.clock_speed)-1));
else
usc_OutReg(info, TC0R, (u16)((14745600/info->params.clock_speed)-1));
} else
usc_OutReg(info, TC0R, (u16)8);
/* Hardware Configuration Register (HCR) Clear Bit 1, BRG0
mode = Continuous Set Bit 0 to enable BRG0. */
usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
/* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
usc_OutReg(info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004));
/* set Internal Data loopback mode */
info->loopback_bits = 0x300;
outw( 0x0300, info->io_base + CCAR );
} else {
/* enable external TXD output */
usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6));
/* clear Internal Data loopback mode */
info->loopback_bits = 0;
outw( 0,info->io_base + CCAR );
}
} /* end of usc_enable_loopback() */
/* usc_enable_aux_clock()
*
* Enabled the AUX clock output at the specified frequency.
*
* Arguments:
*
* info pointer to device extension
* data_rate data rate of clock in bits per second
* A data rate of 0 disables the AUX clock.
*
* Return Value: None
*/
static void usc_enable_aux_clock( struct mgsl_struct *info, u32 data_rate )
{
u32 XtalSpeed;
u16 Tc;
if ( data_rate ) {
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
XtalSpeed = 11059200;
else
XtalSpeed = 14745600;
/* Tc = (Xtal/Speed) - 1 */
/* If twice the remainder of (Xtal/Speed) is greater than Speed */
/* then rounding up gives a more precise time constant. Instead */
/* of rounding up and then subtracting 1 we just don't subtract */
/* the one in this case. */
Tc = (u16)(XtalSpeed/data_rate);
if ( !(((XtalSpeed % data_rate) * 2) / data_rate) )
Tc--;
/* Write 16-bit Time Constant for BRG0 */
usc_OutReg( info, TC0R, Tc );
/*
* Hardware Configuration Register (HCR)
* Clear Bit 1, BRG0 mode = Continuous
* Set Bit 0 to enable BRG0.
*/
usc_OutReg( info, HCR, (u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
/* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
usc_OutReg( info, IOCR, (u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
} else {
/* data rate == 0 so turn off BRG0 */
usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
}
} /* end of usc_enable_aux_clock() */
/*
*
* usc_process_rxoverrun_sync()
*
* This function processes a receive overrun by resetting the
* receive DMA buffers and issuing a Purge Rx FIFO command
* to allow the receiver to continue receiving.
*
* Arguments:
*
* info pointer to device extension
*
* Return Value: None
*/
static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
{
int start_index;
int end_index;
int frame_start_index;
bool start_of_frame_found = false;
bool end_of_frame_found = false;
bool reprogram_dma = false;
DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
u32 phys_addr;
usc_DmaCmd( info, DmaCmd_PauseRxChannel );
usc_RCmd( info, RCmd_EnterHuntmode );
usc_RTCmd( info, RTCmd_PurgeRxFifo );
/* CurrentRxBuffer points to the 1st buffer of the next */
/* possibly available receive frame. */
frame_start_index = start_index = end_index = info->current_rx_buffer;
/* Search for an unfinished string of buffers. This means */
/* that a receive frame started (at least one buffer with */
/* count set to zero) but there is no terminiting buffer */
/* (status set to non-zero). */
while( !buffer_list[end_index].count )
{
/* Count field has been reset to zero by 16C32. */
/* This buffer is currently in use. */
if ( !start_of_frame_found )
{
start_of_frame_found = true;
frame_start_index = end_index;
end_of_frame_found = false;
}
if ( buffer_list[end_index].status )
{
/* Status field has been set by 16C32. */
/* This is the last buffer of a received frame. */
/* We want to leave the buffers for this frame intact. */
/* Move on to next possible frame. */
start_of_frame_found = false;
end_of_frame_found = true;
}
/* advance to next buffer entry in linked list */
end_index++;
if ( end_index == info->rx_buffer_count )
end_index = 0;
if ( start_index == end_index )
{
/* The entire list has been searched with all Counts == 0 and */
/* all Status == 0. The receive buffers are */
/* completely screwed, reset all receive buffers! */
mgsl_reset_rx_dma_buffers( info );
frame_start_index = 0;
start_of_frame_found = false;
reprogram_dma = true;
break;
}
}
if ( start_of_frame_found && !end_of_frame_found )
{
/* There is an unfinished string of receive DMA buffers */
/* as a result of the receiver overrun. */
/* Reset the buffers for the unfinished frame */
/* and reprogram the receive DMA controller to start */
/* at the 1st buffer of unfinished frame. */
start_index = frame_start_index;
do
{
*((unsigned long *)&(info->rx_buffer_list[start_index++].count)) = DMABUFFERSIZE;
/* Adjust index for wrap around. */
if ( start_index == info->rx_buffer_count )
start_index = 0;
} while( start_index != end_index );
reprogram_dma = true;
}
if ( reprogram_dma )
{
usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
usc_ClearIrqPendingBits(info, RECEIVE_DATA|RECEIVE_STATUS);
usc_UnlatchRxstatusBits(info, RECEIVE_DATA|RECEIVE_STATUS);
usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
/* This empties the receive FIFO and loads the RCC with RCLR */
usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
/* program 16C32 with physical address of 1st DMA buffer entry */
phys_addr = info->rx_buffer_list[frame_start_index].phys_entry;
usc_OutDmaReg( info, NRARL, (u16)phys_addr );
usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
usc_EnableInterrupts( info, RECEIVE_STATUS );
/* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
/* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
usc_DmaCmd( info, DmaCmd_InitRxChannel );
if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
usc_EnableReceiver(info,ENABLE_AUTO_DCD);
else
usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
}
else
{
/* This empties the receive FIFO and loads the RCC with RCLR */
usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
usc_RTCmd( info, RTCmd_PurgeRxFifo );
}
} /* end of usc_process_rxoverrun_sync() */
/* usc_stop_receiver()
*
* Disable USC receiver
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void usc_stop_receiver( struct mgsl_struct *info )
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):usc_stop_receiver(%s)\n",
__FILE__,__LINE__, info->device_name );
/* Disable receive DMA channel. */
/* This also disables receive DMA channel interrupts */
usc_DmaCmd( info, DmaCmd_ResetRxChannel );
usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS );
usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
/* This empties the receive FIFO and loads the RCC with RCLR */
usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
usc_RTCmd( info, RTCmd_PurgeRxFifo );
info->rx_enabled = false;
info->rx_overflow = false;
info->rx_rcc_underrun = false;
} /* end of stop_receiver() */
/* usc_start_receiver()
*
* Enable the USC receiver
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void usc_start_receiver( struct mgsl_struct *info )
{
u32 phys_addr;
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):usc_start_receiver(%s)\n",
__FILE__,__LINE__, info->device_name );
mgsl_reset_rx_dma_buffers( info );
usc_stop_receiver( info );
usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
usc_RTCmd( info, RTCmd_PurgeRxFifo );
if ( info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW ) {
/* DMA mode Transfers */
/* Program the DMA controller. */
/* Enable the DMA controller end of buffer interrupt. */
/* program 16C32 with physical address of 1st DMA buffer entry */
phys_addr = info->rx_buffer_list[0].phys_entry;
usc_OutDmaReg( info, NRARL, (u16)phys_addr );
usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
usc_EnableInterrupts( info, RECEIVE_STATUS );
/* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
/* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
usc_DmaCmd( info, DmaCmd_InitRxChannel );
if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
usc_EnableReceiver(info,ENABLE_AUTO_DCD);
else
usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
} else {
usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
usc_EnableInterrupts(info, RECEIVE_DATA);
usc_RTCmd( info, RTCmd_PurgeRxFifo );
usc_RCmd( info, RCmd_EnterHuntmode );
usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
}
usc_OutReg( info, CCSR, 0x1020 );
info->rx_enabled = true;
} /* end of usc_start_receiver() */
/* usc_start_transmitter()
*
* Enable the USC transmitter and send a transmit frame if
* one is loaded in the DMA buffers.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void usc_start_transmitter( struct mgsl_struct *info )
{
u32 phys_addr;
unsigned int FrameSize;
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):usc_start_transmitter(%s)\n",
__FILE__,__LINE__, info->device_name );
if ( info->xmit_cnt ) {
/* If auto RTS enabled and RTS is inactive, then assert */
/* RTS and set a flag indicating that the driver should */
/* negate RTS when the transmission completes. */
info->drop_rts_on_tx_done = false;
if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
usc_get_serial_signals( info );
if ( !(info->serial_signals & SerialSignal_RTS) ) {
info->serial_signals |= SerialSignal_RTS;
usc_set_serial_signals( info );
info->drop_rts_on_tx_done = true;
}
}
if ( info->params.mode == MGSL_MODE_ASYNC ) {
if ( !info->tx_active ) {
usc_UnlatchTxstatusBits(info, TXSTATUS_ALL);
usc_ClearIrqPendingBits(info, TRANSMIT_STATUS + TRANSMIT_DATA);
usc_EnableInterrupts(info, TRANSMIT_DATA);
usc_load_txfifo(info);
}
} else {
/* Disable transmit DMA controller while programming. */
usc_DmaCmd( info, DmaCmd_ResetTxChannel );
/* Transmit DMA buffer is loaded, so program USC */
/* to send the frame contained in the buffers. */
FrameSize = info->tx_buffer_list[info->start_tx_dma_buffer].rcc;
/* if operating in Raw sync mode, reset the rcc component
* of the tx dma buffer entry, otherwise, the serial controller
* will send a closing sync char after this count.
*/
if ( info->params.mode == MGSL_MODE_RAW )
info->tx_buffer_list[info->start_tx_dma_buffer].rcc = 0;
/* Program the Transmit Character Length Register (TCLR) */
/* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
usc_OutReg( info, TCLR, (u16)FrameSize );
usc_RTCmd( info, RTCmd_PurgeTxFifo );
/* Program the address of the 1st DMA Buffer Entry in linked list */
phys_addr = info->tx_buffer_list[info->start_tx_dma_buffer].phys_entry;
usc_OutDmaReg( info, NTARL, (u16)phys_addr );
usc_OutDmaReg( info, NTARU, (u16)(phys_addr >> 16) );
usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
usc_EnableInterrupts( info, TRANSMIT_STATUS );
if ( info->params.mode == MGSL_MODE_RAW &&
info->num_tx_dma_buffers > 1 ) {
/* When running external sync mode, attempt to 'stream' transmit */
/* by filling tx dma buffers as they become available. To do this */
/* we need to enable Tx DMA EOB Status interrupts : */
/* */
/* 1. Arm End of Buffer (EOB) Transmit DMA Interrupt (BIT2 of TDIAR) */
/* 2. Enable Transmit DMA Interrupts (BIT0 of DICR) */
usc_OutDmaReg( info, TDIAR, BIT2|BIT3 );
usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT0) );
}
/* Initialize Transmit DMA Channel */
usc_DmaCmd( info, DmaCmd_InitTxChannel );
usc_TCmd( info, TCmd_SendFrame );
mod_timer(&info->tx_timer, jiffies +
msecs_to_jiffies(5000));
}
info->tx_active = true;
}
if ( !info->tx_enabled ) {
info->tx_enabled = true;
if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
else
usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
}
} /* end of usc_start_transmitter() */
/* usc_stop_transmitter()
*
* Stops the transmitter and DMA
*
* Arguments: info pointer to device isntance data
* Return Value: None
*/
static void usc_stop_transmitter( struct mgsl_struct *info )
{
if (debug_level >= DEBUG_LEVEL_ISR)
printk("%s(%d):usc_stop_transmitter(%s)\n",
__FILE__,__LINE__, info->device_name );
del_timer(&info->tx_timer);
usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA );
usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA );
usc_EnableTransmitter(info,DISABLE_UNCONDITIONAL);
usc_DmaCmd( info, DmaCmd_ResetTxChannel );
usc_RTCmd( info, RTCmd_PurgeTxFifo );
info->tx_enabled = false;
info->tx_active = false;
} /* end of usc_stop_transmitter() */
/* usc_load_txfifo()
*
* Fill the transmit FIFO until the FIFO is full or
* there is no more data to load.
*
* Arguments: info pointer to device extension (instance data)
* Return Value: None
*/
static void usc_load_txfifo( struct mgsl_struct *info )
{
int Fifocount;
u8 TwoBytes[2];
if ( !info->xmit_cnt && !info->x_char )
return;
/* Select transmit FIFO status readback in TICR */
usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
/* load the Transmit FIFO until FIFOs full or all data sent */
while( (Fifocount = usc_InReg(info, TICR) >> 8) && info->xmit_cnt ) {
/* there is more space in the transmit FIFO and */
/* there is more data in transmit buffer */
if ( (info->xmit_cnt > 1) && (Fifocount > 1) && !info->x_char ) {
/* write a 16-bit word from transmit buffer to 16C32 */
TwoBytes[0] = info->xmit_buf[info->xmit_tail++];
info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
TwoBytes[1] = info->xmit_buf[info->xmit_tail++];
info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
outw( *((u16 *)TwoBytes), info->io_base + DATAREG);
info->xmit_cnt -= 2;
info->icount.tx += 2;
} else {
/* only 1 byte left to transmit or 1 FIFO slot left */
outw( (inw( info->io_base + CCAR) & 0x0780) | (TDR+LSBONLY),
info->io_base + CCAR );
if (info->x_char) {
/* transmit pending high priority char */
outw( info->x_char,info->io_base + CCAR );
info->x_char = 0;
} else {
outw( info->xmit_buf[info->xmit_tail++],info->io_base + CCAR );
info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
info->xmit_cnt--;
}
info->icount.tx++;
}
}
} /* end of usc_load_txfifo() */
/* usc_reset()
*
* Reset the adapter to a known state and prepare it for further use.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void usc_reset( struct mgsl_struct *info )
{
if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
int i;
u32 readval;
/* Set BIT30 of Misc Control Register */
/* (Local Control Register 0x50) to force reset of USC. */
volatile u32 *MiscCtrl = (u32 *)(info->lcr_base + 0x50);
u32 *LCR0BRDR = (u32 *)(info->lcr_base + 0x28);
info->misc_ctrl_value |= BIT30;
*MiscCtrl = info->misc_ctrl_value;
/*
* Force at least 170ns delay before clearing
* reset bit. Each read from LCR takes at least
* 30ns so 10 times for 300ns to be safe.
*/
for(i=0;i<10;i++)
readval = *MiscCtrl;
info->misc_ctrl_value &= ~BIT30;
*MiscCtrl = info->misc_ctrl_value;
*LCR0BRDR = BUS_DESCRIPTOR(
1, // Write Strobe Hold (0-3)
2, // Write Strobe Delay (0-3)
2, // Read Strobe Delay (0-3)
0, // NWDD (Write data-data) (0-3)
4, // NWAD (Write Addr-data) (0-31)
0, // NXDA (Read/Write Data-Addr) (0-3)
0, // NRDD (Read Data-Data) (0-3)
5 // NRAD (Read Addr-Data) (0-31)
);
} else {
/* do HW reset */
outb( 0,info->io_base + 8 );
}
info->mbre_bit = 0;
info->loopback_bits = 0;
info->usc_idle_mode = 0;
/*
* Program the Bus Configuration Register (BCR)
*
* <15> 0 Don't use separate address
* <14..6> 0 reserved
* <5..4> 00 IAckmode = Default, don't care
* <3> 1 Bus Request Totem Pole output
* <2> 1 Use 16 Bit data bus
* <1> 0 IRQ Totem Pole output
* <0> 0 Don't Shift Right Addr
*
* 0000 0000 0000 1100 = 0x000c
*
* By writing to io_base + SDPIN the Wait/Ack pin is
* programmed to work as a Wait pin.
*/
outw( 0x000c,info->io_base + SDPIN );
outw( 0,info->io_base );
outw( 0,info->io_base + CCAR );
/* select little endian byte ordering */
usc_RTCmd( info, RTCmd_SelectLittleEndian );
/* Port Control Register (PCR)
*
* <15..14> 11 Port 7 is Output (~DMAEN, Bit 14 : 0 = Enabled)
* <13..12> 11 Port 6 is Output (~INTEN, Bit 12 : 0 = Enabled)
* <11..10> 00 Port 5 is Input (No Connect, Don't Care)
* <9..8> 00 Port 4 is Input (No Connect, Don't Care)
* <7..6> 11 Port 3 is Output (~RTS, Bit 6 : 0 = Enabled )
* <5..4> 11 Port 2 is Output (~DTR, Bit 4 : 0 = Enabled )
* <3..2> 01 Port 1 is Input (Dedicated RxC)
* <1..0> 01 Port 0 is Input (Dedicated TxC)
*
* 1111 0000 1111 0101 = 0xf0f5
*/
usc_OutReg( info, PCR, 0xf0f5 );
/*
* Input/Output Control Register
*
* <15..14> 00 CTS is active low input
* <13..12> 00 DCD is active low input
* <11..10> 00 TxREQ pin is input (DSR)
* <9..8> 00 RxREQ pin is input (RI)
* <7..6> 00 TxD is output (Transmit Data)
* <5..3> 000 TxC Pin in Input (14.7456MHz Clock)
* <2..0> 100 RxC is Output (drive with BRG0)
*
* 0000 0000 0000 0100 = 0x0004
*/
usc_OutReg( info, IOCR, 0x0004 );
} /* end of usc_reset() */
/* usc_set_async_mode()
*
* Program adapter for asynchronous communications.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void usc_set_async_mode( struct mgsl_struct *info )
{
u16 RegValue;
/* disable interrupts while programming USC */
usc_DisableMasterIrqBit( info );
outw( 0, info->io_base ); /* clear Master Bus Enable (DCAR) */
usc_DmaCmd( info, DmaCmd_ResetAllChannels ); /* disable both DMA channels */
usc_loopback_frame( info );
/* Channel mode Register (CMR)
*
* <15..14> 00 Tx Sub modes, 00 = 1 Stop Bit
* <13..12> 00 00 = 16X Clock
* <11..8> 0000 Transmitter mode = Asynchronous
* <7..6> 00 reserved?
* <5..4> 00 Rx Sub modes, 00 = 16X Clock
* <3..0> 0000 Receiver mode = Asynchronous
*
* 0000 0000 0000 0000 = 0x0
*/
RegValue = 0;
if ( info->params.stop_bits != 1 )
RegValue |= BIT14;
usc_OutReg( info, CMR, RegValue );
/* Receiver mode Register (RMR)
*
* <15..13> 000 encoding = None
* <12..08> 00000 reserved (Sync Only)
* <7..6> 00 Even parity
* <5> 0 parity disabled
* <4..2> 000 Receive Char Length = 8 bits
* <1..0> 00 Disable Receiver
*
* 0000 0000 0000 0000 = 0x0
*/
RegValue = 0;
if ( info->params.data_bits != 8 )
RegValue |= BIT4 | BIT3 | BIT2;
if ( info->params.parity != ASYNC_PARITY_NONE ) {
RegValue |= BIT5;
if ( info->params.parity != ASYNC_PARITY_ODD )
RegValue |= BIT6;
}
usc_OutReg( info, RMR, RegValue );
/* Set IRQ trigger level */
usc_RCmd( info, RCmd_SelectRicrIntLevel );
/* Receive Interrupt Control Register (RICR)
*
* <15..8> ? RxFIFO IRQ Request Level
*
* Note: For async mode the receive FIFO level must be set
* to 0 to avoid the situation where the FIFO contains fewer bytes
* than the trigger level and no more data is expected.
*
* <7> 0 Exited Hunt IA (Interrupt Arm)
* <6> 0 Idle Received IA
* <5> 0 Break/Abort IA
* <4> 0 Rx Bound IA
* <3> 0 Queued status reflects oldest byte in FIFO
* <2> 0 Abort/PE IA
* <1> 0 Rx Overrun IA
* <0> 0 Select TC0 value for readback
*
* 0000 0000 0100 0000 = 0x0000 + (FIFOLEVEL in MSB)
*/
usc_OutReg( info, RICR, 0x0000 );
usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
usc_ClearIrqPendingBits( info, RECEIVE_STATUS );
/* Transmit mode Register (TMR)
*
* <15..13> 000 encoding = None
* <12..08> 00000 reserved (Sync Only)
* <7..6> 00 Transmit parity Even
* <5> 0 Transmit parity Disabled
* <4..2> 000 Tx Char Length = 8 bits
* <1..0> 00 Disable Transmitter
*
* 0000 0000 0000 0000 = 0x0
*/
RegValue = 0;
if ( info->params.data_bits != 8 )
RegValue |= BIT4 | BIT3 | BIT2;
if ( info->params.parity != ASYNC_PARITY_NONE ) {
RegValue |= BIT5;
if ( info->params.parity != ASYNC_PARITY_ODD )
RegValue |= BIT6;
}
usc_OutReg( info, TMR, RegValue );
usc_set_txidle( info );
/* Set IRQ trigger level */
usc_TCmd( info, TCmd_SelectTicrIntLevel );
/* Transmit Interrupt Control Register (TICR)
*
* <15..8> ? Transmit FIFO IRQ Level
* <7> 0 Present IA (Interrupt Arm)
* <6> 1 Idle Sent IA
* <5> 0 Abort Sent IA
* <4> 0 EOF/EOM Sent IA
* <3> 0 CRC Sent IA
* <2> 0 1 = Wait for SW Trigger to Start Frame
* <1> 0 Tx Underrun IA
* <0> 0 TC0 constant on read back
*
* 0000 0000 0100 0000 = 0x0040
*/
usc_OutReg( info, TICR, 0x1f40 );
usc_UnlatchTxstatusBits( info, TXSTATUS_ALL );
usc_ClearIrqPendingBits( info, TRANSMIT_STATUS );
usc_enable_async_clock( info, info->params.data_rate );
/* Channel Control/status Register (CCSR)
*
* <15> X RCC FIFO Overflow status (RO)
* <14> X RCC FIFO Not Empty status (RO)
* <13> 0 1 = Clear RCC FIFO (WO)
* <12> X DPLL in Sync status (RO)
* <11> X DPLL 2 Missed Clocks status (RO)
* <10> X DPLL 1 Missed Clock status (RO)
* <9..8> 00 DPLL Resync on rising and falling edges (RW)
* <7> X SDLC Loop On status (RO)
* <6> X SDLC Loop Send status (RO)
* <5> 1 Bypass counters for TxClk and RxClk (RW)
* <4..2> 000 Last Char of SDLC frame has 8 bits (RW)
* <1..0> 00 reserved
*
* 0000 0000 0010 0000 = 0x0020
*/
usc_OutReg( info, CCSR, 0x0020 );
usc_DisableInterrupts( info, TRANSMIT_STATUS + TRANSMIT_DATA +
RECEIVE_DATA + RECEIVE_STATUS );
usc_ClearIrqPendingBits( info, TRANSMIT_STATUS + TRANSMIT_DATA +
RECEIVE_DATA + RECEIVE_STATUS );
usc_EnableMasterIrqBit( info );
if (info->bus_type == MGSL_BUS_TYPE_ISA) {
/* Enable INTEN (Port 6, Bit12) */
/* This connects the IRQ request signal to the ISA bus */
usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
}
if (info->params.loopback) {
info->loopback_bits = 0x300;
outw(0x0300, info->io_base + CCAR);
}
} /* end of usc_set_async_mode() */
/* usc_loopback_frame()
*
* Loop back a small (2 byte) dummy SDLC frame.
* Interrupts and DMA are NOT used. The purpose of this is to
* clear any 'stale' status info left over from running in async mode.
*
* The 16C32 shows the strange behaviour of marking the 1st
* received SDLC frame with a CRC error even when there is no
* CRC error. To get around this a small dummy from of 2 bytes
* is looped back when switching from async to sync mode.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void usc_loopback_frame( struct mgsl_struct *info )
{
int i;
unsigned long oldmode = info->params.mode;
info->params.mode = MGSL_MODE_HDLC;
usc_DisableMasterIrqBit( info );
usc_set_sdlc_mode( info );
usc_enable_loopback( info, 1 );
/* Write 16-bit Time Constant for BRG0 */
usc_OutReg( info, TC0R, 0 );
/* Channel Control Register (CCR)
*
* <15..14> 00 Don't use 32-bit Tx Control Blocks (TCBs)
* <13> 0 Trigger Tx on SW Command Disabled
* <12> 0 Flag Preamble Disabled
* <11..10> 00 Preamble Length = 8-Bits
* <9..8> 01 Preamble Pattern = flags
* <7..6> 10 Don't use 32-bit Rx status Blocks (RSBs)
* <5> 0 Trigger Rx on SW Command Disabled
* <4..0> 0 reserved
*
* 0000 0001 0000 0000 = 0x0100
*/
usc_OutReg( info, CCR, 0x0100 );
/* SETUP RECEIVER */
usc_RTCmd( info, RTCmd_PurgeRxFifo );
usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
/* SETUP TRANSMITTER */
/* Program the Transmit Character Length Register (TCLR) */
/* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
usc_OutReg( info, TCLR, 2 );
usc_RTCmd( info, RTCmd_PurgeTxFifo );
/* unlatch Tx status bits, and start transmit channel. */
usc_UnlatchTxstatusBits(info,TXSTATUS_ALL);
outw(0,info->io_base + DATAREG);
/* ENABLE TRANSMITTER */
usc_TCmd( info, TCmd_SendFrame );
usc_EnableTransmitter(info,ENABLE_UNCONDITIONAL);
/* WAIT FOR RECEIVE COMPLETE */
for (i=0 ; i<1000 ; i++)
if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1))
break;
/* clear Internal Data loopback mode */
usc_enable_loopback(info, 0);
usc_EnableMasterIrqBit(info);
info->params.mode = oldmode;
} /* end of usc_loopback_frame() */
/* usc_set_sync_mode() Programs the USC for SDLC communications.
*
* Arguments: info pointer to adapter info structure
* Return Value: None
*/
static void usc_set_sync_mode( struct mgsl_struct *info )
{
usc_loopback_frame( info );
usc_set_sdlc_mode( info );
if (info->bus_type == MGSL_BUS_TYPE_ISA) {
/* Enable INTEN (Port 6, Bit12) */
/* This connects the IRQ request signal to the ISA bus */
usc_OutReg(info, PCR, (u16)((usc_InReg(info, PCR) | BIT13) & ~BIT12));
}
usc_enable_aux_clock(info, info->params.clock_speed);
if (info->params.loopback)
usc_enable_loopback(info,1);
} /* end of mgsl_set_sync_mode() */
/* usc_set_txidle() Set the HDLC idle mode for the transmitter.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void usc_set_txidle( struct mgsl_struct *info )
{
u16 usc_idle_mode = IDLEMODE_FLAGS;
/* Map API idle mode to USC register bits */
switch( info->idle_mode ){
case HDLC_TXIDLE_FLAGS: usc_idle_mode = IDLEMODE_FLAGS; break;
case HDLC_TXIDLE_ALT_ZEROS_ONES: usc_idle_mode = IDLEMODE_ALT_ONE_ZERO; break;
case HDLC_TXIDLE_ZEROS: usc_idle_mode = IDLEMODE_ZERO; break;
case HDLC_TXIDLE_ONES: usc_idle_mode = IDLEMODE_ONE; break;
case HDLC_TXIDLE_ALT_MARK_SPACE: usc_idle_mode = IDLEMODE_ALT_MARK_SPACE; break;
case HDLC_TXIDLE_SPACE: usc_idle_mode = IDLEMODE_SPACE; break;
case HDLC_TXIDLE_MARK: usc_idle_mode = IDLEMODE_MARK; break;
}
info->usc_idle_mode = usc_idle_mode;
//usc_OutReg(info, TCSR, usc_idle_mode);
info->tcsr_value &= ~IDLEMODE_MASK; /* clear idle mode bits */
info->tcsr_value += usc_idle_mode;
usc_OutReg(info, TCSR, info->tcsr_value);
/*
* if SyncLink WAN adapter is running in external sync mode, the
* transmitter has been set to Monosync in order to try to mimic
* a true raw outbound bit stream. Monosync still sends an open/close
* sync char at the start/end of a frame. Try to match those sync
* patterns to the idle mode set here
*/
if ( info->params.mode == MGSL_MODE_RAW ) {
unsigned char syncpat = 0;
switch( info->idle_mode ) {
case HDLC_TXIDLE_FLAGS:
syncpat = 0x7e;
break;
case HDLC_TXIDLE_ALT_ZEROS_ONES:
syncpat = 0x55;
break;
case HDLC_TXIDLE_ZEROS:
case HDLC_TXIDLE_SPACE:
syncpat = 0x00;
break;
case HDLC_TXIDLE_ONES:
case HDLC_TXIDLE_MARK:
syncpat = 0xff;
break;
case HDLC_TXIDLE_ALT_MARK_SPACE:
syncpat = 0xaa;
break;
}
usc_SetTransmitSyncChars(info,syncpat,syncpat);
}
} /* end of usc_set_txidle() */
/* usc_get_serial_signals()
*
* Query the adapter for the state of the V24 status (input) signals.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void usc_get_serial_signals( struct mgsl_struct *info )
{
u16 status;
/* clear all serial signals except RTS and DTR */
info->serial_signals &= SerialSignal_RTS | SerialSignal_DTR;
/* Read the Misc Interrupt status Register (MISR) to get */
/* the V24 status signals. */
status = usc_InReg( info, MISR );
/* set serial signal bits to reflect MISR */
if ( status & MISCSTATUS_CTS )
info->serial_signals |= SerialSignal_CTS;
if ( status & MISCSTATUS_DCD )
info->serial_signals |= SerialSignal_DCD;
if ( status & MISCSTATUS_RI )
info->serial_signals |= SerialSignal_RI;
if ( status & MISCSTATUS_DSR )
info->serial_signals |= SerialSignal_DSR;
} /* end of usc_get_serial_signals() */
/* usc_set_serial_signals()
*
* Set the state of RTS and DTR based on contents of
* serial_signals member of device extension.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void usc_set_serial_signals( struct mgsl_struct *info )
{
u16 Control;
unsigned char V24Out = info->serial_signals;
/* get the current value of the Port Control Register (PCR) */
Control = usc_InReg( info, PCR );
if ( V24Out & SerialSignal_RTS )
Control &= ~(BIT6);
else
Control |= BIT6;
if ( V24Out & SerialSignal_DTR )
Control &= ~(BIT4);
else
Control |= BIT4;
usc_OutReg( info, PCR, Control );
} /* end of usc_set_serial_signals() */
/* usc_enable_async_clock()
*
* Enable the async clock at the specified frequency.
*
* Arguments: info pointer to device instance data
* data_rate data rate of clock in bps
* 0 disables the AUX clock.
* Return Value: None
*/
static void usc_enable_async_clock( struct mgsl_struct *info, u32 data_rate )
{
if ( data_rate ) {
/*
* Clock mode Control Register (CMCR)
*
* <15..14> 00 counter 1 Disabled
* <13..12> 00 counter 0 Disabled
* <11..10> 11 BRG1 Input is TxC Pin
* <9..8> 11 BRG0 Input is TxC Pin
* <7..6> 01 DPLL Input is BRG1 Output
* <5..3> 100 TxCLK comes from BRG0
* <2..0> 100 RxCLK comes from BRG0
*
* 0000 1111 0110 0100 = 0x0f64
*/
usc_OutReg( info, CMCR, 0x0f64 );
/*
* Write 16-bit Time Constant for BRG0
* Time Constant = (ClkSpeed / data_rate) - 1
* ClkSpeed = 921600 (ISA), 691200 (PCI)
*/
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
usc_OutReg( info, TC0R, (u16)((691200/data_rate) - 1) );
else
usc_OutReg( info, TC0R, (u16)((921600/data_rate) - 1) );
/*
* Hardware Configuration Register (HCR)
* Clear Bit 1, BRG0 mode = Continuous
* Set Bit 0 to enable BRG0.
*/
usc_OutReg( info, HCR,
(u16)((usc_InReg( info, HCR ) & ~BIT1) | BIT0) );
/* Input/Output Control Reg, <2..0> = 100, Drive RxC pin with BRG0 */
usc_OutReg( info, IOCR,
(u16)((usc_InReg(info, IOCR) & 0xfff8) | 0x0004) );
} else {
/* data rate == 0 so turn off BRG0 */
usc_OutReg( info, HCR, (u16)(usc_InReg( info, HCR ) & ~BIT0) );
}
} /* end of usc_enable_async_clock() */
/*
* Buffer Structures:
*
* Normal memory access uses virtual addresses that can make discontiguous
* physical memory pages appear to be contiguous in the virtual address
* space (the processors memory mapping handles the conversions).
*
* DMA transfers require physically contiguous memory. This is because
* the DMA system controller and DMA bus masters deal with memory using
* only physical addresses.
*
* This causes a problem under Windows NT when large DMA buffers are
* needed. Fragmentation of the nonpaged pool prevents allocations of
* physically contiguous buffers larger than the PAGE_SIZE.
*
* However the 16C32 supports Bus Master Scatter/Gather DMA which
* allows DMA transfers to physically discontiguous buffers. Information
* about each data transfer buffer is contained in a memory structure
* called a 'buffer entry'. A list of buffer entries is maintained
* to track and control the use of the data transfer buffers.
*
* To support this strategy we will allocate sufficient PAGE_SIZE
* contiguous memory buffers to allow for the total required buffer
* space.
*
* The 16C32 accesses the list of buffer entries using Bus Master
* DMA. Control information is read from the buffer entries by the
* 16C32 to control data transfers. status information is written to
* the buffer entries by the 16C32 to indicate the status of completed
* transfers.
*
* The CPU writes control information to the buffer entries to control
* the 16C32 and reads status information from the buffer entries to
* determine information about received and transmitted frames.
*
* Because the CPU and 16C32 (adapter) both need simultaneous access
* to the buffer entries, the buffer entry memory is allocated with
* HalAllocateCommonBuffer(). This restricts the size of the buffer
* entry list to PAGE_SIZE.
*
* The actual data buffers on the other hand will only be accessed
* by the CPU or the adapter but not by both simultaneously. This allows
* Scatter/Gather packet based DMA procedures for using physically
* discontiguous pages.
*/
/*
* mgsl_reset_tx_dma_buffers()
*
* Set the count for all transmit buffers to 0 to indicate the
* buffer is available for use and set the current buffer to the
* first buffer. This effectively makes all buffers free and
* discards any data in buffers.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info )
{
unsigned int i;
for ( i = 0; i < info->tx_buffer_count; i++ ) {
*((unsigned long *)&(info->tx_buffer_list[i].count)) = 0;
}
info->current_tx_buffer = 0;
info->start_tx_dma_buffer = 0;
info->tx_dma_buffers_used = 0;
info->get_tx_holding_index = 0;
info->put_tx_holding_index = 0;
info->tx_holding_count = 0;
} /* end of mgsl_reset_tx_dma_buffers() */
/*
* num_free_tx_dma_buffers()
*
* returns the number of free tx dma buffers available
*
* Arguments: info pointer to device instance data
* Return Value: number of free tx dma buffers
*/
static int num_free_tx_dma_buffers(struct mgsl_struct *info)
{
return info->tx_buffer_count - info->tx_dma_buffers_used;
}
/*
* mgsl_reset_rx_dma_buffers()
*
* Set the count for all receive buffers to DMABUFFERSIZE
* and set the current buffer to the first buffer. This effectively
* makes all buffers free and discards any data in buffers.
*
* Arguments: info pointer to device instance data
* Return Value: None
*/
static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
{
unsigned int i;
for ( i = 0; i < info->rx_buffer_count; i++ ) {
*((unsigned long *)&(info->rx_buffer_list[i].count)) = DMABUFFERSIZE;
// info->rx_buffer_list[i].count = DMABUFFERSIZE;
// info->rx_buffer_list[i].status = 0;
}
info->current_rx_buffer = 0;
} /* end of mgsl_reset_rx_dma_buffers() */
/*
* mgsl_free_rx_frame_buffers()
*
* Free the receive buffers used by a received SDLC
* frame such that the buffers can be reused.
*
* Arguments:
*
* info pointer to device instance data
* StartIndex index of 1st receive buffer of frame
* EndIndex index of last receive buffer of frame
*
* Return Value: None
*/
static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
{
bool Done = false;
DMABUFFERENTRY *pBufEntry;
unsigned int Index;
/* Starting with 1st buffer entry of the frame clear the status */
/* field and set the count field to DMA Buffer Size. */
Index = StartIndex;
while( !Done ) {
pBufEntry = &(info->rx_buffer_list[Index]);
if ( Index == EndIndex ) {
/* This is the last buffer of the frame! */
Done = true;
}
/* reset current buffer for reuse */
// pBufEntry->status = 0;
// pBufEntry->count = DMABUFFERSIZE;
*((unsigned long *)&(pBufEntry->count)) = DMABUFFERSIZE;
/* advance to next buffer entry in linked list */
Index++;
if ( Index == info->rx_buffer_count )
Index = 0;
}
/* set current buffer to next buffer after last buffer of frame */
info->current_rx_buffer = Index;
} /* end of free_rx_frame_buffers() */
/* mgsl_get_rx_frame()
*
* This function attempts to return a received SDLC frame from the
* receive DMA buffers. Only frames received without errors are returned.
*
* Arguments: info pointer to device extension
* Return Value: true if frame returned, otherwise false
*/
static bool mgsl_get_rx_frame(struct mgsl_struct *info)
{
unsigned int StartIndex, EndIndex; /* index of 1st and last buffers of Rx frame */
unsigned short status;
DMABUFFERENTRY *pBufEntry;
unsigned int framesize = 0;
bool ReturnCode = false;
unsigned long flags;
struct tty_struct *tty = info->port.tty;
bool return_frame = false;
/*
* current_rx_buffer points to the 1st buffer of the next available
* receive frame. To find the last buffer of the frame look for
* a non-zero status field in the buffer entries. (The status
* field is set by the 16C32 after completing a receive frame.
*/
StartIndex = EndIndex = info->current_rx_buffer;
while( !info->rx_buffer_list[EndIndex].status ) {
/*
* If the count field of the buffer entry is non-zero then
* this buffer has not been used. (The 16C32 clears the count
* field when it starts using the buffer.) If an unused buffer
* is encountered then there are no frames available.
*/
if ( info->rx_buffer_list[EndIndex].count )
goto Cleanup;
/* advance to next buffer entry in linked list */
EndIndex++;
if ( EndIndex == info->rx_buffer_count )
EndIndex = 0;
/* if entire list searched then no frame available */
if ( EndIndex == StartIndex ) {
/* If this occurs then something bad happened,
* all buffers have been 'used' but none mark
* the end of a frame. Reset buffers and receiver.
*/
if ( info->rx_enabled ){
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_start_receiver(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
goto Cleanup;
}
}
/* check status of receive frame */
status = info->rx_buffer_list[EndIndex].status;
if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
if ( status & RXSTATUS_SHORT_FRAME )
info->icount.rxshort++;
else if ( status & RXSTATUS_ABORT )
info->icount.rxabort++;
else if ( status & RXSTATUS_OVERRUN )
info->icount.rxover++;
else {
info->icount.rxcrc++;
if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
return_frame = true;
}
framesize = 0;
#if SYNCLINK_GENERIC_HDLC
{
info->netdev->stats.rx_errors++;
info->netdev->stats.rx_frame_errors++;
}
#endif
} else
return_frame = true;
if ( return_frame ) {
/* receive frame has no errors, get frame size.
* The frame size is the starting value of the RCC (which was
* set to 0xffff) minus the ending value of the RCC (decremented
* once for each receive character) minus 2 for the 16-bit CRC.
*/
framesize = RCLRVALUE - info->rx_buffer_list[EndIndex].rcc;
/* adjust frame size for CRC if any */
if ( info->params.crc_type == HDLC_CRC_16_CCITT )
framesize -= 2;
else if ( info->params.crc_type == HDLC_CRC_32_CCITT )
framesize -= 4;
}
if ( debug_level >= DEBUG_LEVEL_BH )
printk("%s(%d):mgsl_get_rx_frame(%s) status=%04X size=%d\n",
__FILE__,__LINE__,info->device_name,status,framesize);
if ( debug_level >= DEBUG_LEVEL_DATA )
mgsl_trace_block(info,info->rx_buffer_list[StartIndex].virt_addr,
min_t(int, framesize, DMABUFFERSIZE),0);
if (framesize) {
if ( ( (info->params.crc_type & HDLC_CRC_RETURN_EX) &&
((framesize+1) > info->max_frame_size) ) ||
(framesize > info->max_frame_size) )
info->icount.rxlong++;
else {
/* copy dma buffer(s) to contiguous intermediate buffer */
int copy_count = framesize;
int index = StartIndex;
unsigned char *ptmp = info->intermediate_rxbuffer;
if ( !(status & RXSTATUS_CRC_ERROR))
info->icount.rxok++;
while(copy_count) {
int partial_count;
if ( copy_count > DMABUFFERSIZE )
partial_count = DMABUFFERSIZE;
else
partial_count = copy_count;
pBufEntry = &(info->rx_buffer_list[index]);
memcpy( ptmp, pBufEntry->virt_addr, partial_count );
ptmp += partial_count;
copy_count -= partial_count;
if ( ++index == info->rx_buffer_count )
index = 0;
}
if ( info->params.crc_type & HDLC_CRC_RETURN_EX ) {
++framesize;
*ptmp = (status & RXSTATUS_CRC_ERROR ?
RX_CRC_ERROR :
RX_OK);
if ( debug_level >= DEBUG_LEVEL_DATA )
printk("%s(%d):mgsl_get_rx_frame(%s) rx frame status=%d\n",
__FILE__,__LINE__,info->device_name,
*ptmp);
}
#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_rx(info,info->intermediate_rxbuffer,framesize);
else
#endif
ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
}
}
/* Free the buffers used by this frame. */
mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
ReturnCode = true;
Cleanup:
if ( info->rx_enabled && info->rx_overflow ) {
/* The receiver needs to restarted because of
* a receive overflow (buffer or FIFO). If the
* receive buffers are now empty, then restart receiver.
*/
if ( !info->rx_buffer_list[EndIndex].status &&
info->rx_buffer_list[EndIndex].count ) {
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_start_receiver(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
}
return ReturnCode;
} /* end of mgsl_get_rx_frame() */
/* mgsl_get_raw_rx_frame()
*
* This function attempts to return a received frame from the
* receive DMA buffers when running in external loop mode. In this mode,
* we will return at most one DMABUFFERSIZE frame to the application.
* The USC receiver is triggering off of DCD going active to start a new
* frame, and DCD going inactive to terminate the frame (similar to
* processing a closing flag character).
*
* In this routine, we will return DMABUFFERSIZE "chunks" at a time.
* If DCD goes inactive, the last Rx DMA Buffer will have a non-zero
* status field and the RCC field will indicate the length of the
* entire received frame. We take this RCC field and get the modulus
* of RCC and DMABUFFERSIZE to determine if number of bytes in the
* last Rx DMA buffer and return that last portion of the frame.
*
* Arguments: info pointer to device extension
* Return Value: true if frame returned, otherwise false
*/
static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
{
unsigned int CurrentIndex, NextIndex;
unsigned short status;
DMABUFFERENTRY *pBufEntry;
unsigned int framesize = 0;
bool ReturnCode = false;
unsigned long flags;
struct tty_struct *tty = info->port.tty;
/*
* current_rx_buffer points to the 1st buffer of the next available
* receive frame. The status field is set by the 16C32 after
* completing a receive frame. If the status field of this buffer
* is zero, either the USC is still filling this buffer or this
* is one of a series of buffers making up a received frame.
*
* If the count field of this buffer is zero, the USC is either
* using this buffer or has used this buffer. Look at the count
* field of the next buffer. If that next buffer's count is
* non-zero, the USC is still actively using the current buffer.
* Otherwise, if the next buffer's count field is zero, the
* current buffer is complete and the USC is using the next
* buffer.
*/
CurrentIndex = NextIndex = info->current_rx_buffer;
++NextIndex;
if ( NextIndex == info->rx_buffer_count )
NextIndex = 0;
if ( info->rx_buffer_list[CurrentIndex].status != 0 ||
(info->rx_buffer_list[CurrentIndex].count == 0 &&
info->rx_buffer_list[NextIndex].count == 0)) {
/*
* Either the status field of this dma buffer is non-zero
* (indicating the last buffer of a receive frame) or the next
* buffer is marked as in use -- implying this buffer is complete
* and an intermediate buffer for this received frame.
*/
status = info->rx_buffer_list[CurrentIndex].status;
if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
if ( status & RXSTATUS_SHORT_FRAME )
info->icount.rxshort++;
else if ( status & RXSTATUS_ABORT )
info->icount.rxabort++;
else if ( status & RXSTATUS_OVERRUN )
info->icount.rxover++;
else
info->icount.rxcrc++;
framesize = 0;
} else {
/*
* A receive frame is available, get frame size and status.
*
* The frame size is the starting value of the RCC (which was
* set to 0xffff) minus the ending value of the RCC (decremented
* once for each receive character) minus 2 or 4 for the 16-bit
* or 32-bit CRC.
*
* If the status field is zero, this is an intermediate buffer.
* It's size is 4K.
*
* If the DMA Buffer Entry's Status field is non-zero, the
* receive operation completed normally (ie: DCD dropped). The
* RCC field is valid and holds the received frame size.
* It is possible that the RCC field will be zero on a DMA buffer
* entry with a non-zero status. This can occur if the total
* frame size (number of bytes between the time DCD goes active
* to the time DCD goes inactive) exceeds 65535 bytes. In this
* case the 16C32 has underrun on the RCC count and appears to
* stop updating this counter to let us know the actual received
* frame size. If this happens (non-zero status and zero RCC),
* simply return the entire RxDMA Buffer
*/
if ( status ) {
/*
* In the event that the final RxDMA Buffer is
* terminated with a non-zero status and the RCC
* field is zero, we interpret this as the RCC
* having underflowed (received frame > 65535 bytes).
*
* Signal the event to the user by passing back
* a status of RxStatus_CrcError returning the full
* buffer and let the app figure out what data is
* actually valid
*/
if ( info->rx_buffer_list[CurrentIndex].rcc )
framesize = RCLRVALUE - info->rx_buffer_list[CurrentIndex].rcc;
else
framesize = DMABUFFERSIZE;
}
else
framesize = DMABUFFERSIZE;
}
if ( framesize > DMABUFFERSIZE ) {
/*
* if running in raw sync mode, ISR handler for
* End Of Buffer events terminates all buffers at 4K.
* If this frame size is said to be >4K, get the
* actual number of bytes of the frame in this buffer.
*/
framesize = framesize % DMABUFFERSIZE;
}
if ( debug_level >= DEBUG_LEVEL_BH )
printk("%s(%d):mgsl_get_raw_rx_frame(%s) status=%04X size=%d\n",
__FILE__,__LINE__,info->device_name,status,framesize);
if ( debug_level >= DEBUG_LEVEL_DATA )
mgsl_trace_block(info,info->rx_buffer_list[CurrentIndex].virt_addr,
min_t(int, framesize, DMABUFFERSIZE),0);
if (framesize) {
/* copy dma buffer(s) to contiguous intermediate buffer */
/* NOTE: we never copy more than DMABUFFERSIZE bytes */
pBufEntry = &(info->rx_buffer_list[CurrentIndex]);
memcpy( info->intermediate_rxbuffer, pBufEntry->virt_addr, framesize);
info->icount.rxok++;
ldisc_receive_buf(tty, info->intermediate_rxbuffer, info->flag_buf, framesize);
}
/* Free the buffers used by this frame. */
mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
ReturnCode = true;
}
if ( info->rx_enabled && info->rx_overflow ) {
/* The receiver needs to restarted because of
* a receive overflow (buffer or FIFO). If the
* receive buffers are now empty, then restart receiver.
*/
if ( !info->rx_buffer_list[CurrentIndex].status &&
info->rx_buffer_list[CurrentIndex].count ) {
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_start_receiver(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
}
return ReturnCode;
} /* end of mgsl_get_raw_rx_frame() */
/* mgsl_load_tx_dma_buffer()
*
* Load the transmit DMA buffer with the specified data.
*
* Arguments:
*
* info pointer to device extension
* Buffer pointer to buffer containing frame to load
* BufferSize size in bytes of frame in Buffer
*
* Return Value: None
*/
static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
const char *Buffer, unsigned int BufferSize)
{
unsigned short Copycount;
unsigned int i = 0;
DMABUFFERENTRY *pBufEntry;
if ( debug_level >= DEBUG_LEVEL_DATA )
mgsl_trace_block(info,Buffer, min_t(int, BufferSize, DMABUFFERSIZE), 1);
if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
/* set CMR:13 to start transmit when
* next GoAhead (abort) is received
*/
info->cmr_value |= BIT13;
}
/* begin loading the frame in the next available tx dma
* buffer, remember it's starting location for setting
* up tx dma operation
*/
i = info->current_tx_buffer;
info->start_tx_dma_buffer = i;
/* Setup the status and RCC (Frame Size) fields of the 1st */
/* buffer entry in the transmit DMA buffer list. */
info->tx_buffer_list[i].status = info->cmr_value & 0xf000;
info->tx_buffer_list[i].rcc = BufferSize;
info->tx_buffer_list[i].count = BufferSize;
/* Copy frame data from 1st source buffer to the DMA buffers. */
/* The frame data may span multiple DMA buffers. */
while( BufferSize ){
/* Get a pointer to next DMA buffer entry. */
pBufEntry = &info->tx_buffer_list[i++];
if ( i == info->tx_buffer_count )
i=0;
/* Calculate the number of bytes that can be copied from */
/* the source buffer to this DMA buffer. */
if ( BufferSize > DMABUFFERSIZE )
Copycount = DMABUFFERSIZE;
else
Copycount = BufferSize;
/* Actually copy data from source buffer to DMA buffer. */
/* Also set the data count for this individual DMA buffer. */
if ( info->bus_type == MGSL_BUS_TYPE_PCI )
mgsl_load_pci_memory(pBufEntry->virt_addr, Buffer,Copycount);
else
memcpy(pBufEntry->virt_addr, Buffer, Copycount);
pBufEntry->count = Copycount;
/* Advance source pointer and reduce remaining data count. */
Buffer += Copycount;
BufferSize -= Copycount;
++info->tx_dma_buffers_used;
}
/* remember next available tx dma buffer */
info->current_tx_buffer = i;
} /* end of mgsl_load_tx_dma_buffer() */
/*
* mgsl_register_test()
*
* Performs a register test of the 16C32.
*
* Arguments: info pointer to device instance data
* Return Value: true if test passed, otherwise false
*/
static bool mgsl_register_test( struct mgsl_struct *info )
{
static unsigned short BitPatterns[] =
{ 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
unsigned int i;
bool rc = true;
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_reset(info);
/* Verify the reset state of some registers. */
if ( (usc_InReg( info, SICR ) != 0) ||
(usc_InReg( info, IVR ) != 0) ||
(usc_InDmaReg( info, DIVR ) != 0) ){
rc = false;
}
if ( rc ){
/* Write bit patterns to various registers but do it out of */
/* sync, then read back and verify values. */
for ( i = 0 ; i < Patterncount ; i++ ) {
usc_OutReg( info, TC0R, BitPatterns[i] );
usc_OutReg( info, TC1R, BitPatterns[(i+1)%Patterncount] );
usc_OutReg( info, TCLR, BitPatterns[(i+2)%Patterncount] );
usc_OutReg( info, RCLR, BitPatterns[(i+3)%Patterncount] );
usc_OutReg( info, RSR, BitPatterns[(i+4)%Patterncount] );
usc_OutDmaReg( info, TBCR, BitPatterns[(i+5)%Patterncount] );
if ( (usc_InReg( info, TC0R ) != BitPatterns[i]) ||
(usc_InReg( info, TC1R ) != BitPatterns[(i+1)%Patterncount]) ||
(usc_InReg( info, TCLR ) != BitPatterns[(i+2)%Patterncount]) ||
(usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
(usc_InReg( info, RSR ) != BitPatterns[(i+4)%Patterncount]) ||
(usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
rc = false;
break;
}
}
}
usc_reset(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return rc;
} /* end of mgsl_register_test() */
/* mgsl_irq_test() Perform interrupt test of the 16C32.
*
* Arguments: info pointer to device instance data
* Return Value: true if test passed, otherwise false
*/
static bool mgsl_irq_test( struct mgsl_struct *info )
{
unsigned long EndTime;
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_reset(info);
/*
* Setup 16C32 to interrupt on TxC pin (14MHz clock) transition.
* The ISR sets irq_occurred to true.
*/
info->irq_occurred = false;
/* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
/* Enable INTEN (Port 6, Bit12) */
/* This connects the IRQ request signal to the ISA bus */
/* on the ISA adapter. This has no effect for the PCI adapter */
usc_OutReg( info, PCR, (unsigned short)((usc_InReg(info, PCR) | BIT13) & ~BIT12) );
usc_EnableMasterIrqBit(info);
usc_EnableInterrupts(info, IO_PIN);
usc_ClearIrqPendingBits(info, IO_PIN);
usc_UnlatchIostatusBits(info, MISCSTATUS_TXC_LATCHED);
usc_EnableStatusIrqs(info, SICR_TXC_ACTIVE + SICR_TXC_INACTIVE);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
EndTime=100;
while( EndTime-- && !info->irq_occurred ) {
msleep_interruptible(10);
}
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_reset(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return info->irq_occurred;
} /* end of mgsl_irq_test() */
/* mgsl_dma_test()
*
* Perform a DMA test of the 16C32. A small frame is
* transmitted via DMA from a transmit buffer to a receive buffer
* using single buffer DMA mode.
*
* Arguments: info pointer to device instance data
* Return Value: true if test passed, otherwise false
*/
static bool mgsl_dma_test( struct mgsl_struct *info )
{
unsigned short FifoLevel;
unsigned long phys_addr;
unsigned int FrameSize;
unsigned int i;
char *TmpPtr;
bool rc = true;
unsigned short status=0;
unsigned long EndTime;
unsigned long flags;
MGSL_PARAMS tmp_params;
/* save current port options */
memcpy(&tmp_params,&info->params,sizeof(MGSL_PARAMS));
/* load default port options */
memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
#define TESTFRAMESIZE 40
spin_lock_irqsave(&info->irq_spinlock,flags);
/* setup 16C32 for SDLC DMA transfer mode */
usc_reset(info);
usc_set_sdlc_mode(info);
usc_enable_loopback(info,1);
/* Reprogram the RDMR so that the 16C32 does NOT clear the count
* field of the buffer entry after fetching buffer address. This
* way we can detect a DMA failure for a DMA read (which should be
* non-destructive to system memory) before we try and write to
* memory (where a failure could corrupt system memory).
*/
/* Receive DMA mode Register (RDMR)
*
* <15..14> 11 DMA mode = Linked List Buffer mode
* <13> 1 RSBinA/L = store Rx status Block in List entry
* <12> 0 1 = Clear count of List Entry after fetching
* <11..10> 00 Address mode = Increment
* <9> 1 Terminate Buffer on RxBound
* <8> 0 Bus Width = 16bits
* <7..0> ? status Bits (write as 0s)
*
* 1110 0010 0000 0000 = 0xe200
*/
usc_OutDmaReg( info, RDMR, 0xe200 );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
/* SETUP TRANSMIT AND RECEIVE DMA BUFFERS */
FrameSize = TESTFRAMESIZE;
/* setup 1st transmit buffer entry: */
/* with frame size and transmit control word */
info->tx_buffer_list[0].count = FrameSize;
info->tx_buffer_list[0].rcc = FrameSize;
info->tx_buffer_list[0].status = 0x4000;
/* build a transmit frame in 1st transmit DMA buffer */
TmpPtr = info->tx_buffer_list[0].virt_addr;
for (i = 0; i < FrameSize; i++ )
*TmpPtr++ = i;
/* setup 1st receive buffer entry: */
/* clear status, set max receive buffer size */
info->rx_buffer_list[0].status = 0;
info->rx_buffer_list[0].count = FrameSize + 4;
/* zero out the 1st receive buffer */
memset( info->rx_buffer_list[0].virt_addr, 0, FrameSize + 4 );
/* Set count field of next buffer entries to prevent */
/* 16C32 from using buffers after the 1st one. */
info->tx_buffer_list[1].count = 0;
info->rx_buffer_list[1].count = 0;
/***************************/
/* Program 16C32 receiver. */
/***************************/
spin_lock_irqsave(&info->irq_spinlock,flags);
/* setup DMA transfers */
usc_RTCmd( info, RTCmd_PurgeRxFifo );
/* program 16C32 receiver with physical address of 1st DMA buffer entry */
phys_addr = info->rx_buffer_list[0].phys_entry;
usc_OutDmaReg( info, NRARL, (unsigned short)phys_addr );
usc_OutDmaReg( info, NRARU, (unsigned short)(phys_addr >> 16) );
/* Clear the Rx DMA status bits (read RDMR) and start channel */
usc_InDmaReg( info, RDMR );
usc_DmaCmd( info, DmaCmd_InitRxChannel );
/* Enable Receiver (RMR <1..0> = 10) */
usc_OutReg( info, RMR, (unsigned short)((usc_InReg(info, RMR) & 0xfffc) | 0x0002) );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
/*************************************************************/
/* WAIT FOR RECEIVER TO DMA ALL PARAMETERS FROM BUFFER ENTRY */
/*************************************************************/
/* Wait 100ms for interrupt. */
EndTime = jiffies + msecs_to_jiffies(100);
for(;;) {
if (time_after(jiffies, EndTime)) {
rc = false;
break;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
status = usc_InDmaReg( info, RDMR );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
if ( !(status & BIT4) && (status & BIT5) ) {
/* INITG (BIT 4) is inactive (no entry read in progress) AND */
/* BUSY (BIT 5) is active (channel still active). */
/* This means the buffer entry read has completed. */
break;
}
}
/******************************/
/* Program 16C32 transmitter. */
/******************************/
spin_lock_irqsave(&info->irq_spinlock,flags);
/* Program the Transmit Character Length Register (TCLR) */
/* and clear FIFO (TCC is loaded with TCLR on FIFO clear) */
usc_OutReg( info, TCLR, (unsigned short)info->tx_buffer_list[0].count );
usc_RTCmd( info, RTCmd_PurgeTxFifo );
/* Program the address of the 1st DMA Buffer Entry in linked list */
phys_addr = info->tx_buffer_list[0].phys_entry;
usc_OutDmaReg( info, NTARL, (unsigned short)phys_addr );
usc_OutDmaReg( info, NTARU, (unsigned short)(phys_addr >> 16) );
/* unlatch Tx status bits, and start transmit channel. */
usc_OutReg( info, TCSR, (unsigned short)(( usc_InReg(info, TCSR) & 0x0f00) | 0xfa) );
usc_DmaCmd( info, DmaCmd_InitTxChannel );
/* wait for DMA controller to fill transmit FIFO */
usc_TCmd( info, TCmd_SelectTicrTxFifostatus );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
/**********************************/
/* WAIT FOR TRANSMIT FIFO TO FILL */
/**********************************/
/* Wait 100ms */
EndTime = jiffies + msecs_to_jiffies(100);
for(;;) {
if (time_after(jiffies, EndTime)) {
rc = false;
break;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
FifoLevel = usc_InReg(info, TICR) >> 8;
spin_unlock_irqrestore(&info->irq_spinlock,flags);
if ( FifoLevel < 16 )
break;
else
if ( FrameSize < 32 ) {
/* This frame is smaller than the entire transmit FIFO */
/* so wait for the entire frame to be loaded. */
if ( FifoLevel <= (32 - FrameSize) )
break;
}
}
if ( rc )
{
/* Enable 16C32 transmitter. */
spin_lock_irqsave(&info->irq_spinlock,flags);
/* Transmit mode Register (TMR), <1..0> = 10, Enable Transmitter */
usc_TCmd( info, TCmd_SendFrame );
usc_OutReg( info, TMR, (unsigned short)((usc_InReg(info, TMR) & 0xfffc) | 0x0002) );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
/******************************/
/* WAIT FOR TRANSMIT COMPLETE */
/******************************/
/* Wait 100ms */
EndTime = jiffies + msecs_to_jiffies(100);
/* While timer not expired wait for transmit complete */
spin_lock_irqsave(&info->irq_spinlock,flags);
status = usc_InReg( info, TCSR );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) {
if (time_after(jiffies, EndTime)) {
rc = false;
break;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
status = usc_InReg( info, TCSR );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
}
if ( rc ){
/* CHECK FOR TRANSMIT ERRORS */
if ( status & (BIT5 | BIT1) )
rc = false;
}
if ( rc ) {
/* WAIT FOR RECEIVE COMPLETE */
/* Wait 100ms */
EndTime = jiffies + msecs_to_jiffies(100);
/* Wait for 16C32 to write receive status to buffer entry. */
status=info->rx_buffer_list[0].status;
while ( status == 0 ) {
if (time_after(jiffies, EndTime)) {
rc = false;
break;
}
status=info->rx_buffer_list[0].status;
}
}
if ( rc ) {
/* CHECK FOR RECEIVE ERRORS */
status = info->rx_buffer_list[0].status;
if ( status & (BIT8 | BIT3 | BIT1) ) {
/* receive error has occurred */
rc = false;
} else {
if ( memcmp( info->tx_buffer_list[0].virt_addr ,
info->rx_buffer_list[0].virt_addr, FrameSize ) ){
rc = false;
}
}
}
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_reset( info );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
/* restore current port options */
memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
return rc;
} /* end of mgsl_dma_test() */
/* mgsl_adapter_test()
*
* Perform the register, IRQ, and DMA tests for the 16C32.
*
* Arguments: info pointer to device instance data
* Return Value: 0 if success, otherwise -ENODEV
*/
static int mgsl_adapter_test( struct mgsl_struct *info )
{
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):Testing device %s\n",
__FILE__,__LINE__,info->device_name );
if ( !mgsl_register_test( info ) ) {
info->init_error = DiagStatus_AddressFailure;
printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
__FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
return -ENODEV;
}
if ( !mgsl_irq_test( info ) ) {
info->init_error = DiagStatus_IrqFailure;
printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
__FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
return -ENODEV;
}
if ( !mgsl_dma_test( info ) ) {
info->init_error = DiagStatus_DmaFailure;
printk( "%s(%d):DMA test failure for device %s DMA=%d\n",
__FILE__,__LINE__,info->device_name, (unsigned short)(info->dma_level) );
return -ENODEV;
}
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):device %s passed diagnostics\n",
__FILE__,__LINE__,info->device_name );
return 0;
} /* end of mgsl_adapter_test() */
/* mgsl_memory_test()
*
* Test the shared memory on a PCI adapter.
*
* Arguments: info pointer to device instance data
* Return Value: true if test passed, otherwise false
*/
static bool mgsl_memory_test( struct mgsl_struct *info )
{
static unsigned long BitPatterns[] =
{ 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
unsigned long Patterncount = ARRAY_SIZE(BitPatterns);
unsigned long i;
unsigned long TestLimit = SHARED_MEM_ADDRESS_SIZE/sizeof(unsigned long);
unsigned long * TestAddr;
if ( info->bus_type != MGSL_BUS_TYPE_PCI )
return true;
TestAddr = (unsigned long *)info->memory_base;
/* Test data lines with test pattern at one location. */
for ( i = 0 ; i < Patterncount ; i++ ) {
*TestAddr = BitPatterns[i];
if ( *TestAddr != BitPatterns[i] )
return false;
}
/* Test address lines with incrementing pattern over */
/* entire address range. */
for ( i = 0 ; i < TestLimit ; i++ ) {
*TestAddr = i * 4;
TestAddr++;
}
TestAddr = (unsigned long *)info->memory_base;
for ( i = 0 ; i < TestLimit ; i++ ) {
if ( *TestAddr != i * 4 )
return false;
TestAddr++;
}
memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
return true;
} /* End Of mgsl_memory_test() */
/* mgsl_load_pci_memory()
*
* Load a large block of data into the PCI shared memory.
* Use this instead of memcpy() or memmove() to move data
* into the PCI shared memory.
*
* Notes:
*
* This function prevents the PCI9050 interface chip from hogging
* the adapter local bus, which can starve the 16C32 by preventing
* 16C32 bus master cycles.
*
* The PCI9050 documentation says that the 9050 will always release
* control of the local bus after completing the current read
* or write operation.
*
* It appears that as long as the PCI9050 write FIFO is full, the
* PCI9050 treats all of the writes as a single burst transaction
* and will not release the bus. This causes DMA latency problems
* at high speeds when copying large data blocks to the shared
* memory.
*
* This function in effect, breaks the a large shared memory write
* into multiple transations by interleaving a shared memory read
* which will flush the write FIFO and 'complete' the write
* transation. This allows any pending DMA request to gain control
* of the local bus in a timely fasion.
*
* Arguments:
*
* TargetPtr pointer to target address in PCI shared memory
* SourcePtr pointer to source buffer for data
* count count in bytes of data to copy
*
* Return Value: None
*/
static void mgsl_load_pci_memory( char* TargetPtr, const char* SourcePtr,
unsigned short count )
{
/* 16 32-bit writes @ 60ns each = 960ns max latency on local bus */
#define PCI_LOAD_INTERVAL 64
unsigned short Intervalcount = count / PCI_LOAD_INTERVAL;
unsigned short Index;
unsigned long Dummy;
for ( Index = 0 ; Index < Intervalcount ; Index++ )
{
memcpy(TargetPtr, SourcePtr, PCI_LOAD_INTERVAL);
Dummy = *((volatile unsigned long *)TargetPtr);
TargetPtr += PCI_LOAD_INTERVAL;
SourcePtr += PCI_LOAD_INTERVAL;
}
memcpy( TargetPtr, SourcePtr, count % PCI_LOAD_INTERVAL );
} /* End Of mgsl_load_pci_memory() */
static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int count, int xmit)
{
int i;
int linecount;
if (xmit)
printk("%s tx data:\n",info->device_name);
else
printk("%s rx data:\n",info->device_name);
while(count) {
if (count > 16)
linecount = 16;
else
linecount = count;
for(i=0;i<linecount;i++)
printk("%02X ",(unsigned char)data[i]);
for(;i<17;i++)
printk(" ");
for(i=0;i<linecount;i++) {
if (data[i]>=040 && data[i]<=0176)
printk("%c",data[i]);
else
printk(".");
}
printk("\n");
data += linecount;
count -= linecount;
}
} /* end of mgsl_trace_block() */
/* mgsl_tx_timeout()
*
* called when HDLC frame times out
* update stats and do tx completion processing
*
* Arguments: context pointer to device instance data
* Return Value: None
*/
static void mgsl_tx_timeout(unsigned long context)
{
struct mgsl_struct *info = (struct mgsl_struct*)context;
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
printk( "%s(%d):mgsl_tx_timeout(%s)\n",
__FILE__,__LINE__,info->device_name);
if(info->tx_active &&
(info->params.mode == MGSL_MODE_HDLC ||
info->params.mode == MGSL_MODE_RAW) ) {
info->icount.txtimeout++;
}
spin_lock_irqsave(&info->irq_spinlock,flags);
info->tx_active = false;
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
usc_loopmode_cancel_transmit( info );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
#if SYNCLINK_GENERIC_HDLC
if (info->netcount)
hdlcdev_tx_done(info);
else
#endif
mgsl_bh_transmit(info);
} /* end of mgsl_tx_timeout() */
/* signal that there are no more frames to send, so that
* line is 'released' by echoing RxD to TxD when current
* transmission is complete (or immediately if no tx in progress).
*/
static int mgsl_loopmode_send_done( struct mgsl_struct * info )
{
unsigned long flags;
spin_lock_irqsave(&info->irq_spinlock,flags);
if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
if (info->tx_active)
info->loopmode_send_done_requested = true;
else
usc_loopmode_send_done(info);
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return 0;
}
/* release the line by echoing RxD to TxD
* upon completion of a transmit frame
*/
static void usc_loopmode_send_done( struct mgsl_struct * info )
{
info->loopmode_send_done_requested = false;
/* clear CMR:13 to 0 to start echoing RxData to TxData */
info->cmr_value &= ~BIT13;
usc_OutReg(info, CMR, info->cmr_value);
}
/* abort a transmit in progress while in HDLC LoopMode
*/
static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
{
/* reset tx dma channel and purge TxFifo */
usc_RTCmd( info, RTCmd_PurgeTxFifo );
usc_DmaCmd( info, DmaCmd_ResetTxChannel );
usc_loopmode_send_done( info );
}
/* for HDLC/SDLC LoopMode, setting CMR:13 after the transmitter is enabled
* is an Insert Into Loop action. Upon receipt of a GoAhead sequence (RxAbort)
* we must clear CMR:13 to begin repeating TxData to RxData
*/
static void usc_loopmode_insert_request( struct mgsl_struct * info )
{
info->loopmode_insert_requested = true;
/* enable RxAbort irq. On next RxAbort, clear CMR:13 to
* begin repeating TxData on RxData (complete insertion)
*/
usc_OutReg( info, RICR,
(usc_InReg( info, RICR ) | RXSTATUS_ABORT_RECEIVED ) );
/* set CMR:13 to insert into loop on next GoAhead (RxAbort) */
info->cmr_value |= BIT13;
usc_OutReg(info, CMR, info->cmr_value);
}
/* return 1 if station is inserted into the loop, otherwise 0
*/
static int usc_loopmode_active( struct mgsl_struct * info)
{
return usc_InReg( info, CCSR ) & BIT7 ? 1 : 0 ;
}
#if SYNCLINK_GENERIC_HDLC
/**
* called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
* set encoding and frame check sequence (FCS) options
*
* dev pointer to network device structure
* encoding serial encoding setting
* parity FCS setting
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
struct mgsl_struct *info = dev_to_port(dev);
unsigned char new_encoding;
unsigned short new_crctype;
/* return error if TTY interface open */
if (info->port.count)
return -EBUSY;
switch (encoding)
{
case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break;
case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break;
case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break;
case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break;
case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break;
default: return -EINVAL;
}
switch (parity)
{
case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break;
case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break;
case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break;
default: return -EINVAL;
}
info->params.encoding = new_encoding;
info->params.crc_type = new_crctype;
/* if network interface up, reprogram hardware */
if (info->netcount)
mgsl_program_hw(info);
return 0;
}
/**
* called by generic HDLC layer to send frame
*
* skb socket buffer containing HDLC frame
* dev pointer to network device structure
*/
static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct mgsl_struct *info = dev_to_port(dev);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
/* stop sending until this frame completes */
netif_stop_queue(dev);
/* copy data to device buffers */
info->xmit_cnt = skb->len;
mgsl_load_tx_dma_buffer(info, skb->data, skb->len);
/* update network statistics */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
/* done with socket buffer, so free it */
dev_kfree_skb(skb);
/* save start time for transmit timeout detection */
dev->trans_start = jiffies;
/* start hardware transmitter if necessary */
spin_lock_irqsave(&info->irq_spinlock,flags);
if (!info->tx_active)
usc_start_transmitter(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
return NETDEV_TX_OK;
}
/**
* called by network layer when interface enabled
* claim resources and initialize hardware
*
* dev pointer to network device structure
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_open(struct net_device *dev)
{
struct mgsl_struct *info = dev_to_port(dev);
int rc;
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
/* generic HDLC layer open processing */
if ((rc = hdlc_open(dev)))
return rc;
/* arbitrate between network and tty opens */
spin_lock_irqsave(&info->netlock, flags);
if (info->port.count != 0 || info->netcount != 0) {
printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
spin_unlock_irqrestore(&info->netlock, flags);
return -EBUSY;
}
info->netcount=1;
spin_unlock_irqrestore(&info->netlock, flags);
/* claim resources and init adapter */
if ((rc = startup(info)) != 0) {
spin_lock_irqsave(&info->netlock, flags);
info->netcount=0;
spin_unlock_irqrestore(&info->netlock, flags);
return rc;
}
/* assert RTS and DTR, apply hardware settings */
info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
mgsl_program_hw(info);
/* enable network layer transmit */
dev->trans_start = jiffies;
netif_start_queue(dev);
/* inform generic HDLC layer of current DCD status */
spin_lock_irqsave(&info->irq_spinlock, flags);
usc_get_serial_signals(info);
spin_unlock_irqrestore(&info->irq_spinlock, flags);
if (info->serial_signals & SerialSignal_DCD)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
return 0;
}
/**
* called by network layer when interface is disabled
* shutdown hardware and release resources
*
* dev pointer to network device structure
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_close(struct net_device *dev)
{
struct mgsl_struct *info = dev_to_port(dev);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
netif_stop_queue(dev);
/* shutdown adapter and release resources */
shutdown(info);
hdlc_close(dev);
spin_lock_irqsave(&info->netlock, flags);
info->netcount=0;
spin_unlock_irqrestore(&info->netlock, flags);
return 0;
}
/**
* called by network layer to process IOCTL call to network device
*
* dev pointer to network device structure
* ifr pointer to network interface request structure
* cmd IOCTL command code
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync;
struct mgsl_struct *info = dev_to_port(dev);
unsigned int flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
/* return error if TTY interface open */
if (info->port.count)
return -EBUSY;
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
switch(ifr->ifr_settings.type) {
case IF_GET_IFACE: /* return current sync_serial_settings */
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
if (ifr->ifr_settings.size < size) {
ifr->ifr_settings.size = size; /* data size wanted */
return -ENOBUFS;
}
flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
memset(&new_line, 0, sizeof(new_line));
switch (flags){
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break;
case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break;
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break;
case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break;
default: new_line.clock_type = CLOCK_DEFAULT;
}
new_line.clock_rate = info->params.clock_speed;
new_line.loopback = info->params.loopback ? 1:0;
if (copy_to_user(line, &new_line, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */
if(!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
switch (new_line.clock_type)
{
case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break;
case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break;
case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break;
case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break;
case CLOCK_DEFAULT: flags = info->params.flags &
(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break;
default: return -EINVAL;
}
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL |
HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN |
HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL |
HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN);
info->params.flags |= flags;
info->params.loopback = new_line.loopback;
if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG))
info->params.clock_speed = new_line.clock_rate;
else
info->params.clock_speed = 0;
/* if network interface up, reprogram hardware */
if (info->netcount)
mgsl_program_hw(info);
return 0;
default:
return hdlc_ioctl(dev, ifr, cmd);
}
}
/**
* called by network layer when transmit timeout is detected
*
* dev pointer to network device structure
*/
static void hdlcdev_tx_timeout(struct net_device *dev)
{
struct mgsl_struct *info = dev_to_port(dev);
unsigned long flags;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("hdlcdev_tx_timeout(%s)\n",dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
spin_lock_irqsave(&info->irq_spinlock,flags);
usc_stop_transmitter(info);
spin_unlock_irqrestore(&info->irq_spinlock,flags);
netif_wake_queue(dev);
}
/**
* called by device driver when transmit completes
* reenable network layer transmit if stopped
*
* info pointer to device instance information
*/
static void hdlcdev_tx_done(struct mgsl_struct *info)
{
if (netif_queue_stopped(info->netdev))
netif_wake_queue(info->netdev);
}
/**
* called by device driver when frame received
* pass frame to network layer
*
* info pointer to device instance information
* buf pointer to buffer contianing frame data
* size count of data bytes in buf
*/
static void hdlcdev_rx(struct mgsl_struct *info, char *buf, int size)
{
struct sk_buff *skb = dev_alloc_skb(size);
struct net_device *dev = info->netdev;
if (debug_level >= DEBUG_LEVEL_INFO)
printk("hdlcdev_rx(%s)\n", dev->name);
if (skb == NULL) {
printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n",
dev->name);
dev->stats.rx_dropped++;
return;
}
memcpy(skb_put(skb, size), buf, size);
skb->protocol = hdlc_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += size;
netif_rx(skb);
}
static const struct net_device_ops hdlcdev_ops = {
.ndo_open = hdlcdev_open,
.ndo_stop = hdlcdev_close,
.ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = hdlcdev_ioctl,
.ndo_tx_timeout = hdlcdev_tx_timeout,
};
/**
* called by device driver when adding device instance
* do generic HDLC initialization
*
* info pointer to device instance information
*
* returns 0 if success, otherwise error code
*/
static int hdlcdev_init(struct mgsl_struct *info)
{
int rc;
struct net_device *dev;
hdlc_device *hdlc;
/* allocate and initialize network and HDLC layer objects */
if (!(dev = alloc_hdlcdev(info))) {
printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
return -ENOMEM;
}
/* for network layer reporting purposes only */
dev->base_addr = info->io_base;
dev->irq = info->irq_level;
dev->dma = info->dma_level;
/* network layer callbacks and settings */
dev->netdev_ops = &hdlcdev_ops;
dev->watchdog_timeo = 10 * HZ;
dev->tx_queue_len = 50;
/* generic HDLC layer callbacks and settings */
hdlc = dev_to_hdlc(dev);
hdlc->attach = hdlcdev_attach;
hdlc->xmit = hdlcdev_xmit;
/* register objects with HDLC layer */
if ((rc = register_hdlc_device(dev))) {
printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
free_netdev(dev);
return rc;
}
info->netdev = dev;
return 0;
}
/**
* called by device driver when removing device instance
* do generic HDLC cleanup
*
* info pointer to device instance information
*/
static void hdlcdev_exit(struct mgsl_struct *info)
{
unregister_hdlc_device(info->netdev);
free_netdev(info->netdev);
info->netdev = NULL;
}
#endif /* CONFIG_HDLC */
static int synclink_init_one (struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct mgsl_struct *info;
if (pci_enable_device(dev)) {
printk("error enabling pci device %p\n", dev);
return -EIO;
}
if (!(info = mgsl_allocate_device())) {
printk("can't allocate device instance data.\n");
return -EIO;
}
/* Copy user configuration info to device instance data */
info->io_base = pci_resource_start(dev, 2);
info->irq_level = dev->irq;
info->phys_memory_base = pci_resource_start(dev, 3);
/* Because veremap only works on page boundaries we must map
* a larger area than is actually implemented for the LCR
* memory range. We map a full page starting at the page boundary.
*/
info->phys_lcr_base = pci_resource_start(dev, 0);
info->lcr_offset = info->phys_lcr_base & (PAGE_SIZE-1);
info->phys_lcr_base &= ~(PAGE_SIZE-1);
info->bus_type = MGSL_BUS_TYPE_PCI;
info->io_addr_size = 8;
info->irq_flags = IRQF_SHARED;
if (dev->device == 0x0210) {
/* Version 1 PCI9030 based universal PCI adapter */
info->misc_ctrl_value = 0x007c4080;
info->hw_version = 1;
} else {
/* Version 0 PCI9050 based 5V PCI adapter
* A PCI9050 bug prevents reading LCR registers if
* LCR base address bit 7 is set. Maintain shadow
* value so we can write to LCR misc control reg.
*/
info->misc_ctrl_value = 0x087e4546;
info->hw_version = 0;
}
mgsl_add_device(info);
return 0;
}
static void synclink_remove_one (struct pci_dev *dev)
{
}
| gpl-2.0 |
firefoxu8833/huawei-kernel-test | drivers/gpu/drm/nouveau/nouveau_fence.c | 1358 | 14542 | /*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
struct nouveau_fence {
struct nouveau_channel *channel;
struct kref refcount;
struct list_head entry;
uint32_t sequence;
bool signalled;
void (*work)(void *priv, bool signalled);
void *priv;
};
struct nouveau_semaphore {
struct kref ref;
struct drm_device *dev;
struct drm_mm_node *mem;
};
static inline struct nouveau_fence *
nouveau_fence(void *sync_obj)
{
return (struct nouveau_fence *)sync_obj;
}
static void
nouveau_fence_del(struct kref *ref)
{
struct nouveau_fence *fence =
container_of(ref, struct nouveau_fence, refcount);
nouveau_channel_ref(NULL, &fence->channel);
kfree(fence);
}
void
nouveau_fence_update(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_fence *tmp, *fence;
uint32_t sequence;
spin_lock(&chan->fence.lock);
/* Fetch the last sequence if the channel is still up and running */
if (likely(!list_empty(&chan->fence.pending))) {
if (USE_REFCNT(dev))
sequence = nvchan_rd32(chan, 0x48);
else
sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence)
goto out;
chan->fence.sequence_ack = sequence;
}
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
sequence = fence->sequence;
fence->signalled = true;
list_del(&fence->entry);
if (unlikely(fence->work))
fence->work(fence->priv, true);
kref_put(&fence->refcount, nouveau_fence_del);
if (sequence == chan->fence.sequence_ack)
break;
}
out:
spin_unlock(&chan->fence.lock);
}
int
nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
bool emit)
{
struct nouveau_fence *fence;
int ret = 0;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
kref_init(&fence->refcount);
nouveau_channel_ref(chan, &fence->channel);
if (emit)
ret = nouveau_fence_emit(fence);
if (ret)
nouveau_fence_unref(&fence);
*pfence = fence;
return ret;
}
struct nouveau_channel *
nouveau_fence_channel(struct nouveau_fence *fence)
{
return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
}
int
nouveau_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
nouveau_fence_update(chan);
BUG_ON(chan->fence.sequence ==
chan->fence.sequence_ack - 1);
}
fence->sequence = ++chan->fence.sequence;
kref_get(&fence->refcount);
spin_lock(&chan->fence.lock);
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock(&chan->fence.lock);
if (USE_REFCNT(dev)) {
if (dev_priv->card_type < NV_C0)
BEGIN_RING(chan, NvSubSw, 0x0050, 1);
else
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
} else {
BEGIN_RING(chan, NvSubSw, 0x0150, 1);
}
OUT_RING (chan, fence->sequence);
FIRE_RING(chan);
return 0;
}
void
nouveau_fence_work(struct nouveau_fence *fence,
void (*work)(void *priv, bool signalled),
void *priv)
{
BUG_ON(fence->work);
spin_lock(&fence->channel->fence.lock);
if (fence->signalled) {
work(priv, true);
} else {
fence->work = work;
fence->priv = priv;
}
spin_unlock(&fence->channel->fence.lock);
}
void
__nouveau_fence_unref(void **sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(*sync_obj);
if (fence)
kref_put(&fence->refcount, nouveau_fence_del);
*sync_obj = NULL;
}
void *
__nouveau_fence_ref(void *sync_obj)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
kref_get(&fence->refcount);
return sync_obj;
}
bool
__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
struct nouveau_channel *chan = fence->channel;
if (fence->signalled)
return true;
nouveau_fence_update(chan);
return fence->signalled;
}
int
__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{
unsigned long timeout = jiffies + (3 * DRM_HZ);
unsigned long sleep_time = NSEC_PER_MSEC / 1000;
ktime_t t;
int ret = 0;
while (1) {
if (__nouveau_fence_signalled(sync_obj, sync_arg))
break;
if (time_after_eq(jiffies, timeout)) {
ret = -EBUSY;
break;
}
__set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE);
if (lazy) {
t = ktime_set(0, sleep_time);
schedule_hrtimeout(&t, HRTIMER_MODE_REL);
sleep_time *= 2;
if (sleep_time > NSEC_PER_MSEC)
sleep_time = NSEC_PER_MSEC;
}
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
}
__set_current_state(TASK_RUNNING);
return ret;
}
static struct nouveau_semaphore *
semaphore_alloc(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_semaphore *sema;
int size = (dev_priv->chipset < 0x84) ? 4 : 16;
int ret, i;
if (!USE_SEMA(dev))
return NULL;
sema = kmalloc(sizeof(*sema), GFP_KERNEL);
if (!sema)
goto fail;
ret = drm_mm_pre_get(&dev_priv->fence.heap);
if (ret)
goto fail;
spin_lock(&dev_priv->fence.lock);
sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
if (sema->mem)
sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
spin_unlock(&dev_priv->fence.lock);
if (!sema->mem)
goto fail;
kref_init(&sema->ref);
sema->dev = dev;
for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
return sema;
fail:
kfree(sema);
return NULL;
}
static void
semaphore_free(struct kref *ref)
{
struct nouveau_semaphore *sema =
container_of(ref, struct nouveau_semaphore, ref);
struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
spin_lock(&dev_priv->fence.lock);
drm_mm_put_block(sema->mem);
spin_unlock(&dev_priv->fence.lock);
kfree(sema);
}
static void
semaphore_work(void *priv, bool signalled)
{
struct nouveau_semaphore *sema = priv;
struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
if (unlikely(!signalled))
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
kref_put(&sema->ref, semaphore_free);
}
static int
semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
int ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 4);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 1); /* ACQUIRE_EQ */
} else {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
}
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new(chan, &fence, true);
if (ret)
return ret;
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
return 0;
}
static int
semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_fence *fence = NULL;
int ret;
if (dev_priv->chipset < 0x84) {
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
OUT_RING (chan, NvSema);
OUT_RING (chan, sema->mem->start);
BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
OUT_RING (chan, 1);
} else
if (dev_priv->chipset < 0xc0) {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 7);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING (chan, chan->vram_handle);
BEGIN_RING(chan, NvSubSw, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 2); /* RELEASE */
} else {
struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
u64 offset = vma->offset + sema->mem->start;
ret = RING_SPACE(chan, 5);
if (ret)
return ret;
BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
OUT_RING (chan, upper_32_bits(offset));
OUT_RING (chan, lower_32_bits(offset));
OUT_RING (chan, 1);
OUT_RING (chan, 0x1002); /* RELEASE */
}
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new(chan, &fence, true);
if (ret)
return ret;
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
return 0;
}
int
nouveau_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *wchan)
{
struct nouveau_channel *chan = nouveau_fence_channel(fence);
struct drm_device *dev = wchan->dev;
struct nouveau_semaphore *sema;
int ret = 0;
if (likely(!chan || chan == wchan ||
nouveau_fence_signalled(fence)))
goto out;
sema = semaphore_alloc(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
ret = nouveau_fence_wait(fence, true, false);
goto out;
}
/* try to take chan's mutex, if we can't take it right away
* we have to fallback to software sync to prevent locking
* order issues
*/
if (!mutex_trylock(&chan->mutex)) {
ret = nouveau_fence_wait(fence, true, false);
goto out_unref;
}
/* Make wchan wait until it gets signalled */
ret = semaphore_acquire(wchan, sema);
if (ret)
goto out_unlock;
/* Signal the semaphore from chan */
ret = semaphore_release(chan, sema);
out_unlock:
mutex_unlock(&chan->mutex);
out_unref:
kref_put(&sema->ref, semaphore_free);
out:
if (chan)
nouveau_channel_put_unlocked(&chan);
return ret;
}
int
__nouveau_fence_flush(void *sync_obj, void *sync_arg)
{
return 0;
}
int
nouveau_fence_channel_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *obj = NULL;
int ret;
if (dev_priv->card_type < NV_C0) {
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
ret = RING_SPACE(chan, 2);
if (ret)
return ret;
BEGIN_RING(chan, NvSubSw, 0, 1);
OUT_RING (chan, NvSw);
FIRE_RING (chan);
}
/* Setup area of memory shared between all channels for x-chan sync */
if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
mem->start << PAGE_SHIFT,
mem->size, NV_MEM_ACCESS_RW,
NV_MEM_TARGET_VRAM, &obj);
if (ret)
return ret;
ret = nouveau_ramht_insert(chan, NvSema, obj);
nouveau_gpuobj_ref(NULL, &obj);
if (ret)
return ret;
}
atomic_set(&chan->fence.last_sequence_irq, 0);
return 0;
}
void
nouveau_fence_channel_fini(struct nouveau_channel *chan)
{
struct nouveau_fence *tmp, *fence;
spin_lock(&chan->fence.lock);
list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
fence->signalled = true;
list_del(&fence->entry);
if (unlikely(fence->work))
fence->work(fence->priv, false);
kref_put(&fence->refcount, nouveau_fence_del);
}
spin_unlock(&chan->fence.lock);
}
int
nouveau_fence_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
int ret;
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
0, 0, &dev_priv->fence.bo);
if (ret)
return ret;
ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
if (ret)
goto fail;
ret = nouveau_bo_map(dev_priv->fence.bo);
if (ret)
goto fail;
ret = drm_mm_init(&dev_priv->fence.heap, 0,
dev_priv->fence.bo->bo.mem.size);
if (ret)
goto fail;
spin_lock_init(&dev_priv->fence.lock);
}
return 0;
fail:
nouveau_bo_unmap(dev_priv->fence.bo);
nouveau_bo_ref(NULL, &dev_priv->fence.bo);
return ret;
}
void
nouveau_fence_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (USE_SEMA(dev)) {
drm_mm_takedown(&dev_priv->fence.heap);
nouveau_bo_unmap(dev_priv->fence.bo);
nouveau_bo_unpin(dev_priv->fence.bo);
nouveau_bo_ref(NULL, &dev_priv->fence.bo);
}
}
| gpl-2.0 |
bm371613/zso3-kernel | net/ipv4/netfilter/ipt_rpfilter.c | 1614 | 3695 | /*
* Copyright (c) 2011 Florian Westphal <fw@strlen.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* based on fib_frontend.c; Author: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <net/ip.h>
#include <net/ip_fib.h>
#include <net/route.h>
#include <linux/netfilter/xt_rpfilter.h>
#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
MODULE_DESCRIPTION("iptables: ipv4 reverse path filter match");
/* don't try to find route from mcast/bcast/zeronet */
static __be32 rpfilter_get_saddr(__be32 addr)
{
if (ipv4_is_multicast(addr) || ipv4_is_lbcast(addr) ||
ipv4_is_zeronet(addr))
return 0;
return addr;
}
static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
const struct net_device *dev, u8 flags)
{
struct fib_result res;
bool dev_match;
struct net *net = dev_net(dev);
int ret __maybe_unused;
if (fib_lookup(net, fl4, &res))
return false;
if (res.type != RTN_UNICAST) {
if (res.type != RTN_LOCAL || !(flags & XT_RPFILTER_ACCEPT_LOCAL))
return false;
}
dev_match = false;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
for (ret = 0; ret < res.fi->fib_nhs; ret++) {
struct fib_nh *nh = &res.fi->fib_nh[ret];
if (nh->nh_dev == dev) {
dev_match = true;
break;
}
}
#else
if (FIB_RES_DEV(res) == dev)
dev_match = true;
#endif
if (dev_match || flags & XT_RPFILTER_LOOSE)
return FIB_RES_NH(res).nh_scope <= RT_SCOPE_HOST;
return dev_match;
}
static bool rpfilter_is_local(const struct sk_buff *skb)
{
const struct rtable *rt = skb_rtable(skb);
return rt && (rt->rt_flags & RTCF_LOCAL);
}
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_rpfilter_info *info;
const struct iphdr *iph;
struct flowi4 flow;
bool invert;
info = par->matchinfo;
invert = info->flags & XT_RPFILTER_INVERT;
if (rpfilter_is_local(skb))
return true ^ invert;
iph = ip_hdr(skb);
if (ipv4_is_multicast(iph->daddr)) {
if (ipv4_is_zeronet(iph->saddr))
return ipv4_is_local_multicast(iph->daddr) ^ invert;
}
flow.flowi4_iif = LOOPBACK_IFINDEX;
flow.daddr = iph->saddr;
flow.saddr = rpfilter_get_saddr(iph->daddr);
flow.flowi4_oif = 0;
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
flow.flowi4_tos = RT_TOS(iph->tos);
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
return rpfilter_lookup_reverse(&flow, par->in, info->flags) ^ invert;
}
static int rpfilter_check(const struct xt_mtchk_param *par)
{
const struct xt_rpfilter_info *info = par->matchinfo;
unsigned int options = ~XT_RPFILTER_OPTION_MASK;
if (info->flags & options) {
pr_info("unknown options encountered");
return -EINVAL;
}
if (strcmp(par->table, "mangle") != 0 &&
strcmp(par->table, "raw") != 0) {
pr_info("match only valid in the \'raw\' "
"or \'mangle\' tables, not \'%s\'.\n", par->table);
return -EINVAL;
}
return 0;
}
static struct xt_match rpfilter_mt_reg __read_mostly = {
.name = "rpfilter",
.family = NFPROTO_IPV4,
.checkentry = rpfilter_check,
.match = rpfilter_mt,
.matchsize = sizeof(struct xt_rpfilter_info),
.hooks = (1 << NF_INET_PRE_ROUTING),
.me = THIS_MODULE
};
static int __init rpfilter_mt_init(void)
{
return xt_register_match(&rpfilter_mt_reg);
}
static void __exit rpfilter_mt_exit(void)
{
xt_unregister_match(&rpfilter_mt_reg);
}
module_init(rpfilter_mt_init);
module_exit(rpfilter_mt_exit);
| gpl-2.0 |
cholokei/msm8660_test_kernel-1 | security/security.c | 1614 | 33297 | /*
* Security plug functions
*
* Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com>
* Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/security.h>
#include <linux/ima.h>
/* Boot-time LSM user choice */
static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
CONFIG_DEFAULT_SECURITY;
/* things that live in capability.c */
extern void __init security_fixup_ops(struct security_operations *ops);
static struct security_operations *security_ops;
static struct security_operations default_security_ops = {
.name = "default",
};
static inline int __init verify(struct security_operations *ops)
{
/* verify the security_operations structure exists */
if (!ops)
return -EINVAL;
security_fixup_ops(ops);
return 0;
}
static void __init do_security_initcalls(void)
{
initcall_t *call;
call = __security_initcall_start;
while (call < __security_initcall_end) {
(*call) ();
call++;
}
}
/**
* security_init - initializes the security framework
*
* This should be called early in the kernel initialization sequence.
*/
int __init security_init(void)
{
printk(KERN_INFO "Security Framework initialized\n");
security_fixup_ops(&default_security_ops);
security_ops = &default_security_ops;
do_security_initcalls();
return 0;
}
void reset_security_ops(void)
{
security_ops = &default_security_ops;
}
/* Save user chosen LSM */
static int __init choose_lsm(char *str)
{
strncpy(chosen_lsm, str, SECURITY_NAME_MAX);
return 1;
}
__setup("security=", choose_lsm);
/**
* security_module_enable - Load given security module on boot ?
* @ops: a pointer to the struct security_operations that is to be checked.
*
* Each LSM must pass this method before registering its own operations
* to avoid security registration races. This method may also be used
* to check if your LSM is currently loaded during kernel initialization.
*
* Return true if:
* -The passed LSM is the one chosen by user at boot time,
* -or the passed LSM is configured as the default and the user did not
* choose an alternate LSM at boot time.
* Otherwise, return false.
*/
int __init security_module_enable(struct security_operations *ops)
{
return !strcmp(ops->name, chosen_lsm);
}
/**
* register_security - registers a security framework with the kernel
* @ops: a pointer to the struct security_options that is to be registered
*
* This function allows a security module to register itself with the
* kernel security subsystem. Some rudimentary checking is done on the @ops
* value passed to this function. You'll need to check first if your LSM
* is allowed to register its @ops by calling security_module_enable(@ops).
*
* If there is already a security module registered with the kernel,
* an error will be returned. Otherwise %0 is returned on success.
*/
int __init register_security(struct security_operations *ops)
{
if (verify(ops)) {
printk(KERN_DEBUG "%s could not verify "
"security_operations structure.\n", __func__);
return -EINVAL;
}
if (security_ops != &default_security_ops)
return -EAGAIN;
security_ops = ops;
return 0;
}
/* Security operations */
int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
return security_ops->ptrace_access_check(child, mode);
}
int security_ptrace_traceme(struct task_struct *parent)
{
return security_ops->ptrace_traceme(parent);
}
int security_capget(struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
{
return security_ops->capget(target, effective, inheritable, permitted);
}
int security_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted)
{
return security_ops->capset(new, old,
effective, inheritable, permitted);
}
int security_capable(struct user_namespace *ns, const struct cred *cred,
int cap)
{
return security_ops->capable(current, cred, ns, cap,
SECURITY_CAP_AUDIT);
}
int security_real_capable(struct task_struct *tsk, struct user_namespace *ns,
int cap)
{
const struct cred *cred;
int ret;
cred = get_task_cred(tsk);
ret = security_ops->capable(tsk, cred, ns, cap, SECURITY_CAP_AUDIT);
put_cred(cred);
return ret;
}
int security_real_capable_noaudit(struct task_struct *tsk,
struct user_namespace *ns, int cap)
{
const struct cred *cred;
int ret;
cred = get_task_cred(tsk);
ret = security_ops->capable(tsk, cred, ns, cap, SECURITY_CAP_NOAUDIT);
put_cred(cred);
return ret;
}
int security_quotactl(int cmds, int type, int id, struct super_block *sb)
{
return security_ops->quotactl(cmds, type, id, sb);
}
int security_quota_on(struct dentry *dentry)
{
return security_ops->quota_on(dentry);
}
int security_syslog(int type)
{
return security_ops->syslog(type);
}
int security_settime(const struct timespec *ts, const struct timezone *tz)
{
return security_ops->settime(ts, tz);
}
int security_vm_enough_memory(long pages)
{
WARN_ON(current->mm == NULL);
return security_ops->vm_enough_memory(current->mm, pages);
}
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
WARN_ON(mm == NULL);
return security_ops->vm_enough_memory(mm, pages);
}
int security_vm_enough_memory_kern(long pages)
{
/* If current->mm is a kernel thread then we will pass NULL,
for this specific case that is fine */
return security_ops->vm_enough_memory(current->mm, pages);
}
int security_bprm_set_creds(struct linux_binprm *bprm)
{
return security_ops->bprm_set_creds(bprm);
}
int security_bprm_check(struct linux_binprm *bprm)
{
int ret;
ret = security_ops->bprm_check_security(bprm);
if (ret)
return ret;
return ima_bprm_check(bprm);
}
void security_bprm_committing_creds(struct linux_binprm *bprm)
{
security_ops->bprm_committing_creds(bprm);
}
void security_bprm_committed_creds(struct linux_binprm *bprm)
{
security_ops->bprm_committed_creds(bprm);
}
int security_bprm_secureexec(struct linux_binprm *bprm)
{
return security_ops->bprm_secureexec(bprm);
}
int security_sb_alloc(struct super_block *sb)
{
return security_ops->sb_alloc_security(sb);
}
void security_sb_free(struct super_block *sb)
{
security_ops->sb_free_security(sb);
}
int security_sb_copy_data(char *orig, char *copy)
{
return security_ops->sb_copy_data(orig, copy);
}
EXPORT_SYMBOL(security_sb_copy_data);
int security_sb_remount(struct super_block *sb, void *data)
{
return security_ops->sb_remount(sb, data);
}
int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
return security_ops->sb_kern_mount(sb, flags, data);
}
int security_sb_show_options(struct seq_file *m, struct super_block *sb)
{
return security_ops->sb_show_options(m, sb);
}
int security_sb_statfs(struct dentry *dentry)
{
return security_ops->sb_statfs(dentry);
}
int security_sb_mount(char *dev_name, struct path *path,
char *type, unsigned long flags, void *data)
{
return security_ops->sb_mount(dev_name, path, type, flags, data);
}
int security_sb_umount(struct vfsmount *mnt, int flags)
{
return security_ops->sb_umount(mnt, flags);
}
int security_sb_pivotroot(struct path *old_path, struct path *new_path)
{
return security_ops->sb_pivotroot(old_path, new_path);
}
int security_sb_set_mnt_opts(struct super_block *sb,
struct security_mnt_opts *opts)
{
return security_ops->sb_set_mnt_opts(sb, opts);
}
EXPORT_SYMBOL(security_sb_set_mnt_opts);
void security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb)
{
security_ops->sb_clone_mnt_opts(oldsb, newsb);
}
EXPORT_SYMBOL(security_sb_clone_mnt_opts);
int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
{
return security_ops->sb_parse_opts_str(options, opts);
}
EXPORT_SYMBOL(security_sb_parse_opts_str);
int security_inode_alloc(struct inode *inode)
{
inode->i_security = NULL;
return security_ops->inode_alloc_security(inode);
}
void security_inode_free(struct inode *inode)
{
ima_inode_free(inode);
security_ops->inode_free_security(inode);
}
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, char **name,
void **value, size_t *len)
{
if (unlikely(IS_PRIVATE(inode)))
return -EOPNOTSUPP;
return security_ops->inode_init_security(inode, dir, qstr, name, value,
len);
}
EXPORT_SYMBOL(security_inode_init_security);
#ifdef CONFIG_SECURITY_PATH
int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
unsigned int dev)
{
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_mknod(dir, dentry, mode, dev);
}
EXPORT_SYMBOL(security_path_mknod);
int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
{
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_mkdir(dir, dentry, mode);
}
EXPORT_SYMBOL(security_path_mkdir);
int security_path_rmdir(struct path *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_rmdir(dir, dentry);
}
int security_path_unlink(struct path *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_unlink(dir, dentry);
}
EXPORT_SYMBOL(security_path_unlink);
int security_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
return security_ops->path_symlink(dir, dentry, old_name);
}
int security_path_link(struct dentry *old_dentry, struct path *new_dir,
struct dentry *new_dentry)
{
if (unlikely(IS_PRIVATE(old_dentry->d_inode)))
return 0;
return security_ops->path_link(old_dentry, new_dir, new_dentry);
}
int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
struct path *new_dir, struct dentry *new_dentry)
{
if (unlikely(IS_PRIVATE(old_dentry->d_inode) ||
(new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode))))
return 0;
return security_ops->path_rename(old_dir, old_dentry, new_dir,
new_dentry);
}
EXPORT_SYMBOL(security_path_rename);
int security_path_truncate(struct path *path)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
return 0;
return security_ops->path_truncate(path);
}
int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
mode_t mode)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->path_chmod(dentry, mnt, mode);
}
int security_path_chown(struct path *path, uid_t uid, gid_t gid)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
return 0;
return security_ops->path_chown(path, uid, gid);
}
int security_path_chroot(struct path *path)
{
return security_ops->path_chroot(path);
}
#endif
int security_inode_create(struct inode *dir, struct dentry *dentry, int mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return security_ops->inode_create(dir, dentry, mode);
}
EXPORT_SYMBOL_GPL(security_inode_create);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
if (unlikely(IS_PRIVATE(old_dentry->d_inode)))
return 0;
return security_ops->inode_link(old_dentry, dir, new_dentry);
}
int security_inode_unlink(struct inode *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_unlink(dir, dentry);
}
int security_inode_symlink(struct inode *dir, struct dentry *dentry,
const char *old_name)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return security_ops->inode_symlink(dir, dentry, old_name);
}
int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return security_ops->inode_mkdir(dir, dentry, mode);
}
EXPORT_SYMBOL_GPL(security_inode_mkdir);
int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_rmdir(dir, dentry);
}
int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
return security_ops->inode_mknod(dir, dentry, mode, dev);
}
int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
if (unlikely(IS_PRIVATE(old_dentry->d_inode) ||
(new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode))))
return 0;
return security_ops->inode_rename(old_dir, old_dentry,
new_dir, new_dentry);
}
int security_inode_readlink(struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_readlink(dentry);
}
int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_follow_link(dentry, nd);
}
int security_inode_permission(struct inode *inode, int mask)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return security_ops->inode_permission(inode, mask, 0);
}
int security_inode_exec_permission(struct inode *inode, unsigned int flags)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return security_ops->inode_permission(inode, MAY_EXEC, flags);
}
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_setattr(dentry, attr);
}
EXPORT_SYMBOL_GPL(security_inode_setattr);
int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_getattr(mnt, dentry);
}
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_setxattr(dentry, name, value, size, flags);
}
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return;
security_ops->inode_post_setxattr(dentry, name, value, size, flags);
}
int security_inode_getxattr(struct dentry *dentry, const char *name)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_getxattr(dentry, name);
}
int security_inode_listxattr(struct dentry *dentry)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_listxattr(dentry);
}
int security_inode_removexattr(struct dentry *dentry, const char *name)
{
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
return security_ops->inode_removexattr(dentry, name);
}
int security_inode_need_killpriv(struct dentry *dentry)
{
return security_ops->inode_need_killpriv(dentry);
}
int security_inode_killpriv(struct dentry *dentry)
{
return security_ops->inode_killpriv(dentry);
}
int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
if (unlikely(IS_PRIVATE(inode)))
return -EOPNOTSUPP;
return security_ops->inode_getsecurity(inode, name, buffer, alloc);
}
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(inode)))
return -EOPNOTSUPP;
return security_ops->inode_setsecurity(inode, name, value, size, flags);
}
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return security_ops->inode_listsecurity(inode, buffer, buffer_size);
}
void security_inode_getsecid(const struct inode *inode, u32 *secid)
{
security_ops->inode_getsecid(inode, secid);
}
int security_file_permission(struct file *file, int mask)
{
int ret;
ret = security_ops->file_permission(file, mask);
if (ret)
return ret;
return fsnotify_perm(file, mask);
}
int security_file_alloc(struct file *file)
{
return security_ops->file_alloc_security(file);
}
void security_file_free(struct file *file)
{
security_ops->file_free_security(file);
}
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return security_ops->file_ioctl(file, cmd, arg);
}
int security_file_mmap(struct file *file, unsigned long reqprot,
unsigned long prot, unsigned long flags,
unsigned long addr, unsigned long addr_only)
{
int ret;
ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only);
if (ret)
return ret;
return ima_file_mmap(file, prot);
}
int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot)
{
return security_ops->file_mprotect(vma, reqprot, prot);
}
int security_file_lock(struct file *file, unsigned int cmd)
{
return security_ops->file_lock(file, cmd);
}
int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
return security_ops->file_fcntl(file, cmd, arg);
}
int security_file_set_fowner(struct file *file)
{
return security_ops->file_set_fowner(file);
}
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig)
{
return security_ops->file_send_sigiotask(tsk, fown, sig);
}
int security_file_receive(struct file *file)
{
return security_ops->file_receive(file);
}
int security_dentry_open(struct file *file, const struct cred *cred)
{
int ret;
ret = security_ops->dentry_open(file, cred);
if (ret)
return ret;
return fsnotify_perm(file, MAY_OPEN);
}
int security_task_create(unsigned long clone_flags)
{
return security_ops->task_create(clone_flags);
}
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
return security_ops->cred_alloc_blank(cred, gfp);
}
void security_cred_free(struct cred *cred)
{
security_ops->cred_free(cred);
}
int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
{
return security_ops->cred_prepare(new, old, gfp);
}
void security_transfer_creds(struct cred *new, const struct cred *old)
{
security_ops->cred_transfer(new, old);
}
int security_kernel_act_as(struct cred *new, u32 secid)
{
return security_ops->kernel_act_as(new, secid);
}
int security_kernel_create_files_as(struct cred *new, struct inode *inode)
{
return security_ops->kernel_create_files_as(new, inode);
}
int security_kernel_module_request(char *kmod_name)
{
return security_ops->kernel_module_request(kmod_name);
}
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags)
{
return security_ops->task_fix_setuid(new, old, flags);
}
int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return security_ops->task_setpgid(p, pgid);
}
int security_task_getpgid(struct task_struct *p)
{
return security_ops->task_getpgid(p);
}
int security_task_getsid(struct task_struct *p)
{
return security_ops->task_getsid(p);
}
void security_task_getsecid(struct task_struct *p, u32 *secid)
{
security_ops->task_getsecid(p, secid);
}
EXPORT_SYMBOL(security_task_getsecid);
int security_task_setnice(struct task_struct *p, int nice)
{
return security_ops->task_setnice(p, nice);
}
int security_task_setioprio(struct task_struct *p, int ioprio)
{
return security_ops->task_setioprio(p, ioprio);
}
int security_task_getioprio(struct task_struct *p)
{
return security_ops->task_getioprio(p);
}
int security_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim)
{
return security_ops->task_setrlimit(p, resource, new_rlim);
}
int security_task_setscheduler(struct task_struct *p)
{
return security_ops->task_setscheduler(p);
}
int security_task_getscheduler(struct task_struct *p)
{
return security_ops->task_getscheduler(p);
}
int security_task_movememory(struct task_struct *p)
{
return security_ops->task_movememory(p);
}
int security_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid)
{
return security_ops->task_kill(p, info, sig, secid);
}
int security_task_wait(struct task_struct *p)
{
return security_ops->task_wait(p);
}
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
return security_ops->task_prctl(option, arg2, arg3, arg4, arg5);
}
void security_task_to_inode(struct task_struct *p, struct inode *inode)
{
security_ops->task_to_inode(p, inode);
}
int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag)
{
return security_ops->ipc_permission(ipcp, flag);
}
void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
{
security_ops->ipc_getsecid(ipcp, secid);
}
int security_msg_msg_alloc(struct msg_msg *msg)
{
return security_ops->msg_msg_alloc_security(msg);
}
void security_msg_msg_free(struct msg_msg *msg)
{
security_ops->msg_msg_free_security(msg);
}
int security_msg_queue_alloc(struct msg_queue *msq)
{
return security_ops->msg_queue_alloc_security(msq);
}
void security_msg_queue_free(struct msg_queue *msq)
{
security_ops->msg_queue_free_security(msq);
}
int security_msg_queue_associate(struct msg_queue *msq, int msqflg)
{
return security_ops->msg_queue_associate(msq, msqflg);
}
int security_msg_queue_msgctl(struct msg_queue *msq, int cmd)
{
return security_ops->msg_queue_msgctl(msq, cmd);
}
int security_msg_queue_msgsnd(struct msg_queue *msq,
struct msg_msg *msg, int msqflg)
{
return security_ops->msg_queue_msgsnd(msq, msg, msqflg);
}
int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
struct task_struct *target, long type, int mode)
{
return security_ops->msg_queue_msgrcv(msq, msg, target, type, mode);
}
int security_shm_alloc(struct shmid_kernel *shp)
{
return security_ops->shm_alloc_security(shp);
}
void security_shm_free(struct shmid_kernel *shp)
{
security_ops->shm_free_security(shp);
}
int security_shm_associate(struct shmid_kernel *shp, int shmflg)
{
return security_ops->shm_associate(shp, shmflg);
}
int security_shm_shmctl(struct shmid_kernel *shp, int cmd)
{
return security_ops->shm_shmctl(shp, cmd);
}
int security_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr, int shmflg)
{
return security_ops->shm_shmat(shp, shmaddr, shmflg);
}
int security_sem_alloc(struct sem_array *sma)
{
return security_ops->sem_alloc_security(sma);
}
void security_sem_free(struct sem_array *sma)
{
security_ops->sem_free_security(sma);
}
int security_sem_associate(struct sem_array *sma, int semflg)
{
return security_ops->sem_associate(sma, semflg);
}
int security_sem_semctl(struct sem_array *sma, int cmd)
{
return security_ops->sem_semctl(sma, cmd);
}
int security_sem_semop(struct sem_array *sma, struct sembuf *sops,
unsigned nsops, int alter)
{
return security_ops->sem_semop(sma, sops, nsops, alter);
}
void security_d_instantiate(struct dentry *dentry, struct inode *inode)
{
if (unlikely(inode && IS_PRIVATE(inode)))
return;
security_ops->d_instantiate(dentry, inode);
}
EXPORT_SYMBOL(security_d_instantiate);
int security_getprocattr(struct task_struct *p, char *name, char **value)
{
return security_ops->getprocattr(p, name, value);
}
int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size)
{
return security_ops->setprocattr(p, name, value, size);
}
int security_netlink_send(struct sock *sk, struct sk_buff *skb)
{
return security_ops->netlink_send(sk, skb);
}
int security_netlink_recv(struct sk_buff *skb, int cap)
{
return security_ops->netlink_recv(skb, cap);
}
EXPORT_SYMBOL(security_netlink_recv);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return security_ops->secid_to_secctx(secid, secdata, seclen);
}
EXPORT_SYMBOL(security_secid_to_secctx);
int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
return security_ops->secctx_to_secid(secdata, seclen, secid);
}
EXPORT_SYMBOL(security_secctx_to_secid);
void security_release_secctx(char *secdata, u32 seclen)
{
security_ops->release_secctx(secdata, seclen);
}
EXPORT_SYMBOL(security_release_secctx);
int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
{
return security_ops->inode_notifysecctx(inode, ctx, ctxlen);
}
EXPORT_SYMBOL(security_inode_notifysecctx);
int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
{
return security_ops->inode_setsecctx(dentry, ctx, ctxlen);
}
EXPORT_SYMBOL(security_inode_setsecctx);
int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
return security_ops->inode_getsecctx(inode, ctx, ctxlen);
}
EXPORT_SYMBOL(security_inode_getsecctx);
#ifdef CONFIG_SECURITY_NETWORK
int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
{
return security_ops->unix_stream_connect(sock, other, newsk);
}
EXPORT_SYMBOL(security_unix_stream_connect);
int security_unix_may_send(struct socket *sock, struct socket *other)
{
return security_ops->unix_may_send(sock, other);
}
EXPORT_SYMBOL(security_unix_may_send);
int security_socket_create(int family, int type, int protocol, int kern)
{
return security_ops->socket_create(family, type, protocol, kern);
}
int security_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
return security_ops->socket_post_create(sock, family, type,
protocol, kern);
}
int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
{
return security_ops->socket_bind(sock, address, addrlen);
}
int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
{
return security_ops->socket_connect(sock, address, addrlen);
}
int security_socket_listen(struct socket *sock, int backlog)
{
return security_ops->socket_listen(sock, backlog);
}
int security_socket_accept(struct socket *sock, struct socket *newsock)
{
return security_ops->socket_accept(sock, newsock);
}
int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size)
{
return security_ops->socket_sendmsg(sock, msg, size);
}
int security_socket_recvmsg(struct socket *sock, struct msghdr *msg,
int size, int flags)
{
return security_ops->socket_recvmsg(sock, msg, size, flags);
}
int security_socket_getsockname(struct socket *sock)
{
return security_ops->socket_getsockname(sock);
}
int security_socket_getpeername(struct socket *sock)
{
return security_ops->socket_getpeername(sock);
}
int security_socket_getsockopt(struct socket *sock, int level, int optname)
{
return security_ops->socket_getsockopt(sock, level, optname);
}
int security_socket_setsockopt(struct socket *sock, int level, int optname)
{
return security_ops->socket_setsockopt(sock, level, optname);
}
int security_socket_shutdown(struct socket *sock, int how)
{
return security_ops->socket_shutdown(sock, how);
}
int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
return security_ops->socket_sock_rcv_skb(sk, skb);
}
EXPORT_SYMBOL(security_sock_rcv_skb);
int security_socket_getpeersec_stream(struct socket *sock, char __user *optval,
int __user *optlen, unsigned len)
{
return security_ops->socket_getpeersec_stream(sock, optval, optlen, len);
}
int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid)
{
return security_ops->socket_getpeersec_dgram(sock, skb, secid);
}
EXPORT_SYMBOL(security_socket_getpeersec_dgram);
int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
{
return security_ops->sk_alloc_security(sk, family, priority);
}
void security_sk_free(struct sock *sk)
{
security_ops->sk_free_security(sk);
}
void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
security_ops->sk_clone_security(sk, newsk);
}
void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
{
security_ops->sk_getsecid(sk, &fl->flowi_secid);
}
EXPORT_SYMBOL(security_sk_classify_flow);
void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
{
security_ops->req_classify_flow(req, fl);
}
EXPORT_SYMBOL(security_req_classify_flow);
void security_sock_graft(struct sock *sk, struct socket *parent)
{
security_ops->sock_graft(sk, parent);
}
EXPORT_SYMBOL(security_sock_graft);
int security_inet_conn_request(struct sock *sk,
struct sk_buff *skb, struct request_sock *req)
{
return security_ops->inet_conn_request(sk, skb, req);
}
EXPORT_SYMBOL(security_inet_conn_request);
void security_inet_csk_clone(struct sock *newsk,
const struct request_sock *req)
{
security_ops->inet_csk_clone(newsk, req);
}
void security_inet_conn_established(struct sock *sk,
struct sk_buff *skb)
{
security_ops->inet_conn_established(sk, skb);
}
int security_secmark_relabel_packet(u32 secid)
{
return security_ops->secmark_relabel_packet(secid);
}
EXPORT_SYMBOL(security_secmark_relabel_packet);
void security_secmark_refcount_inc(void)
{
security_ops->secmark_refcount_inc();
}
EXPORT_SYMBOL(security_secmark_refcount_inc);
void security_secmark_refcount_dec(void)
{
security_ops->secmark_refcount_dec();
}
EXPORT_SYMBOL(security_secmark_refcount_dec);
int security_tun_dev_create(void)
{
return security_ops->tun_dev_create();
}
EXPORT_SYMBOL(security_tun_dev_create);
void security_tun_dev_post_create(struct sock *sk)
{
return security_ops->tun_dev_post_create(sk);
}
EXPORT_SYMBOL(security_tun_dev_post_create);
int security_tun_dev_attach(struct sock *sk)
{
return security_ops->tun_dev_attach(sk);
}
EXPORT_SYMBOL(security_tun_dev_attach);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
{
return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx);
}
EXPORT_SYMBOL(security_xfrm_policy_alloc);
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp)
{
return security_ops->xfrm_policy_clone_security(old_ctx, new_ctxp);
}
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
security_ops->xfrm_policy_free_security(ctx);
}
EXPORT_SYMBOL(security_xfrm_policy_free);
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
return security_ops->xfrm_policy_delete_security(ctx);
}
int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
{
return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0);
}
EXPORT_SYMBOL(security_xfrm_state_alloc);
int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
if (!polsec)
return 0;
/*
* We want the context to be taken from secid which is usually
* from the sock.
*/
return security_ops->xfrm_state_alloc_security(x, NULL, secid);
}
int security_xfrm_state_delete(struct xfrm_state *x)
{
return security_ops->xfrm_state_delete_security(x);
}
EXPORT_SYMBOL(security_xfrm_state_delete);
void security_xfrm_state_free(struct xfrm_state *x)
{
security_ops->xfrm_state_free_security(x);
}
int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
{
return security_ops->xfrm_policy_lookup(ctx, fl_secid, dir);
}
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
const struct flowi *fl)
{
return security_ops->xfrm_state_pol_flow_match(x, xp, fl);
}
int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
{
return security_ops->xfrm_decode_session(skb, secid, 1);
}
void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
{
int rc = security_ops->xfrm_decode_session(skb, &fl->flowi_secid, 0);
BUG_ON(rc);
}
EXPORT_SYMBOL(security_skb_classify_flow);
#endif /* CONFIG_SECURITY_NETWORK_XFRM */
#ifdef CONFIG_KEYS
int security_key_alloc(struct key *key, const struct cred *cred,
unsigned long flags)
{
return security_ops->key_alloc(key, cred, flags);
}
void security_key_free(struct key *key)
{
security_ops->key_free(key);
}
int security_key_permission(key_ref_t key_ref,
const struct cred *cred, key_perm_t perm)
{
return security_ops->key_permission(key_ref, cred, perm);
}
int security_key_getsecurity(struct key *key, char **_buffer)
{
return security_ops->key_getsecurity(key, _buffer);
}
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
{
return security_ops->audit_rule_init(field, op, rulestr, lsmrule);
}
int security_audit_rule_known(struct audit_krule *krule)
{
return security_ops->audit_rule_known(krule);
}
void security_audit_rule_free(void *lsmrule)
{
security_ops->audit_rule_free(lsmrule);
}
int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule,
struct audit_context *actx)
{
return security_ops->audit_rule_match(secid, field, op, lsmrule, actx);
}
#endif /* CONFIG_AUDIT */
| gpl-2.0 |
Bauuuuu/android_kernel_zte_nx512j | drivers/pnp/pnpacpi/rsparser.c | 1614 | 26350 | /*
* pnpacpi -- PnP ACPI driver
*
* Copyright (c) 2004 Matthieu Castet <castet.matthieu@free.fr>
* Copyright (c) 2004 Li Shaohua <shaohua.li@intel.com>
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/pnp.h>
#include <linux/slab.h>
#include "../base.h"
#include "pnpacpi.h"
static void decode_irq_flags(struct pnp_dev *dev, int flags, int *triggering,
int *polarity, int *shareable)
{
switch (flags & (IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_HIGHLEVEL |
IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE)) {
case IORESOURCE_IRQ_LOWLEVEL:
*triggering = ACPI_LEVEL_SENSITIVE;
*polarity = ACPI_ACTIVE_LOW;
break;
case IORESOURCE_IRQ_HIGHLEVEL:
*triggering = ACPI_LEVEL_SENSITIVE;
*polarity = ACPI_ACTIVE_HIGH;
break;
case IORESOURCE_IRQ_LOWEDGE:
*triggering = ACPI_EDGE_SENSITIVE;
*polarity = ACPI_ACTIVE_LOW;
break;
case IORESOURCE_IRQ_HIGHEDGE:
*triggering = ACPI_EDGE_SENSITIVE;
*polarity = ACPI_ACTIVE_HIGH;
break;
default:
dev_err(&dev->dev, "can't encode invalid IRQ mode %#x\n",
flags);
*triggering = ACPI_EDGE_SENSITIVE;
*polarity = ACPI_ACTIVE_HIGH;
break;
}
if (flags & IORESOURCE_IRQ_SHAREABLE)
*shareable = ACPI_SHARED;
else
*shareable = ACPI_EXCLUSIVE;
}
static int dma_flags(struct pnp_dev *dev, int type, int bus_master,
int transfer)
{
int flags = 0;
if (bus_master)
flags |= IORESOURCE_DMA_MASTER;
switch (type) {
case ACPI_COMPATIBILITY:
flags |= IORESOURCE_DMA_COMPATIBLE;
break;
case ACPI_TYPE_A:
flags |= IORESOURCE_DMA_TYPEA;
break;
case ACPI_TYPE_B:
flags |= IORESOURCE_DMA_TYPEB;
break;
case ACPI_TYPE_F:
flags |= IORESOURCE_DMA_TYPEF;
break;
default:
/* Set a default value ? */
flags |= IORESOURCE_DMA_COMPATIBLE;
dev_err(&dev->dev, "invalid DMA type %d\n", type);
}
switch (transfer) {
case ACPI_TRANSFER_8:
flags |= IORESOURCE_DMA_8BIT;
break;
case ACPI_TRANSFER_8_16:
flags |= IORESOURCE_DMA_8AND16BIT;
break;
case ACPI_TRANSFER_16:
flags |= IORESOURCE_DMA_16BIT;
break;
default:
/* Set a default value ? */
flags |= IORESOURCE_DMA_8AND16BIT;
dev_err(&dev->dev, "invalid DMA transfer type %d\n", transfer);
}
return flags;
}
/*
* Allocated Resources
*/
static void pnpacpi_add_irqresource(struct pnp_dev *dev, struct resource *r)
{
if (!(r->flags & IORESOURCE_DISABLED))
pcibios_penalize_isa_irq(r->start, 1);
pnp_add_resource(dev, r);
}
/*
* Device CSRs that do not appear in PCI config space should be described
* via ACPI. This would normally be done with Address Space Descriptors
* marked as "consumer-only," but old versions of Windows and Linux ignore
* the producer/consumer flag, so HP invented a vendor-defined resource to
* describe the location and size of CSR space.
*/
static struct acpi_vendor_uuid hp_ccsr_uuid = {
.subtype = 2,
.data = { 0xf9, 0xad, 0xe9, 0x69, 0x4f, 0x92, 0x5f, 0xab, 0xf6, 0x4a,
0x24, 0xd2, 0x01, 0x37, 0x0e, 0xad },
};
static int vendor_resource_matches(struct pnp_dev *dev,
struct acpi_resource_vendor_typed *vendor,
struct acpi_vendor_uuid *match,
int expected_len)
{
int uuid_len = sizeof(vendor->uuid);
u8 uuid_subtype = vendor->uuid_subtype;
u8 *uuid = vendor->uuid;
int actual_len;
/* byte_length includes uuid_subtype and uuid */
actual_len = vendor->byte_length - uuid_len - 1;
if (uuid_subtype == match->subtype &&
uuid_len == sizeof(match->data) &&
memcmp(uuid, match->data, uuid_len) == 0) {
if (expected_len && expected_len != actual_len) {
dev_err(&dev->dev, "wrong vendor descriptor size; "
"expected %d, found %d bytes\n",
expected_len, actual_len);
return 0;
}
return 1;
}
return 0;
}
static void pnpacpi_parse_allocated_vendor(struct pnp_dev *dev,
struct acpi_resource_vendor_typed *vendor)
{
if (vendor_resource_matches(dev, vendor, &hp_ccsr_uuid, 16)) {
u64 start, length;
memcpy(&start, vendor->byte_data, sizeof(start));
memcpy(&length, vendor->byte_data + 8, sizeof(length));
pnp_add_mem_resource(dev, start, start + length - 1, 0);
}
}
static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
void *data)
{
struct pnp_dev *dev = data;
struct acpi_resource_dma *dma;
struct acpi_resource_vendor_typed *vendor_typed;
struct resource r;
int i, flags;
if (acpi_dev_resource_address_space(res, &r)
|| acpi_dev_resource_ext_address_space(res, &r)) {
pnp_add_resource(dev, &r);
return AE_OK;
}
r.flags = 0;
if (acpi_dev_resource_interrupt(res, 0, &r)) {
pnpacpi_add_irqresource(dev, &r);
for (i = 1; acpi_dev_resource_interrupt(res, i, &r); i++)
pnpacpi_add_irqresource(dev, &r);
if (i > 1) {
/*
* The IRQ encoder puts a single interrupt in each
* descriptor, so if a _CRS descriptor has more than
* one interrupt, we won't be able to re-encode it.
*/
if (pnp_can_write(dev)) {
dev_warn(&dev->dev, "multiple interrupts in "
"_CRS descriptor; configuration can't "
"be changed\n");
dev->capabilities &= ~PNP_WRITE;
}
}
return AE_OK;
} else if (r.flags & IORESOURCE_DISABLED) {
pnp_add_irq_resource(dev, 0, IORESOURCE_DISABLED);
return AE_OK;
}
switch (res->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
case ACPI_RESOURCE_TYPE_MEMORY32:
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
if (acpi_dev_resource_memory(res, &r))
pnp_add_resource(dev, &r);
break;
case ACPI_RESOURCE_TYPE_IO:
case ACPI_RESOURCE_TYPE_FIXED_IO:
if (acpi_dev_resource_io(res, &r))
pnp_add_resource(dev, &r);
break;
case ACPI_RESOURCE_TYPE_DMA:
dma = &res->data.dma;
if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
flags = dma_flags(dev, dma->type, dma->bus_master,
dma->transfer);
else
flags = IORESOURCE_DISABLED;
pnp_add_dma_resource(dev, dma->channels[0], flags);
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
break;
case ACPI_RESOURCE_TYPE_VENDOR:
vendor_typed = &res->data.vendor_typed;
pnpacpi_parse_allocated_vendor(dev, vendor_typed);
break;
case ACPI_RESOURCE_TYPE_END_TAG:
break;
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
break;
default:
dev_warn(&dev->dev, "unknown resource type %d in _CRS\n",
res->type);
return AE_ERROR;
}
return AE_OK;
}
int pnpacpi_parse_allocated_resource(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev = dev->data;
acpi_handle handle = acpi_dev->handle;
acpi_status status;
pnp_dbg(&dev->dev, "parse allocated resources\n");
pnp_init_resources(dev);
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
pnpacpi_allocated_resource, dev);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
dev_err(&dev->dev, "can't evaluate _CRS: %d", status);
return -EPERM;
}
return 0;
}
static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_dma *p)
{
int i;
unsigned char map = 0, flags;
for (i = 0; i < p->channel_count; i++)
map |= 1 << p->channels[i];
flags = dma_flags(dev, p->type, p->bus_master, p->transfer);
pnp_register_dma_resource(dev, option_flags, map, flags);
}
static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_irq *p)
{
int i;
pnp_irq_mask_t map;
unsigned char flags;
bitmap_zero(map.bits, PNP_IRQ_NR);
for (i = 0; i < p->interrupt_count; i++)
if (p->interrupts[i])
__set_bit(p->interrupts[i], map.bits);
flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_extended_irq *p)
{
int i;
pnp_irq_mask_t map;
unsigned char flags;
bitmap_zero(map.bits, PNP_IRQ_NR);
for (i = 0; i < p->interrupt_count; i++) {
if (p->interrupts[i]) {
if (p->interrupts[i] < PNP_IRQ_NR)
__set_bit(p->interrupts[i], map.bits);
else
dev_err(&dev->dev, "ignoring IRQ %d option "
"(too large for %d entry bitmap)\n",
p->interrupts[i], PNP_IRQ_NR);
}
}
flags = acpi_dev_irq_flags(p->triggering, p->polarity, p->sharable);
pnp_register_irq_resource(dev, option_flags, &map, flags);
}
static __init void pnpacpi_parse_port_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_io *io)
{
unsigned char flags = 0;
if (io->io_decode == ACPI_DECODE_16)
flags = IORESOURCE_IO_16BIT_ADDR;
pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum,
io->alignment, io->address_length, flags);
}
static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_fixed_io *io)
{
pnp_register_port_resource(dev, option_flags, io->address, io->address,
0, io->address_length, IORESOURCE_IO_FIXED);
}
static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_memory24 *p)
{
unsigned char flags = 0;
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p->alignment, p->address_length, flags);
}
static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_memory32 *p)
{
unsigned char flags = 0;
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
p->alignment, p->address_length, flags);
}
static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource_fixed_memory32 *p)
{
unsigned char flags = 0;
if (p->write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->address, p->address,
0, p->address_length, flags);
}
static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource *r)
{
struct acpi_resource_address64 addr, *p = &addr;
acpi_status status;
unsigned char flags = 0;
status = acpi_resource_to_address64(r, p);
if (ACPI_FAILURE(status)) {
dev_warn(&dev->dev, "can't convert resource type %d\n",
r->type);
return;
}
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
pnp_register_port_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
IORESOURCE_IO_FIXED);
}
static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
unsigned int option_flags,
struct acpi_resource *r)
{
struct acpi_resource_extended_address64 *p = &r->data.ext_address64;
unsigned char flags = 0;
if (p->resource_type == ACPI_MEMORY_RANGE) {
if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
flags = IORESOURCE_MEM_WRITEABLE;
pnp_register_mem_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
flags);
} else if (p->resource_type == ACPI_IO_RANGE)
pnp_register_port_resource(dev, option_flags, p->minimum,
p->minimum, 0, p->address_length,
IORESOURCE_IO_FIXED);
}
struct acpipnp_parse_option_s {
struct pnp_dev *dev;
unsigned int option_flags;
};
static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
void *data)
{
int priority;
struct acpipnp_parse_option_s *parse_data = data;
struct pnp_dev *dev = parse_data->dev;
unsigned int option_flags = parse_data->option_flags;
switch (res->type) {
case ACPI_RESOURCE_TYPE_IRQ:
pnpacpi_parse_irq_option(dev, option_flags, &res->data.irq);
break;
case ACPI_RESOURCE_TYPE_DMA:
pnpacpi_parse_dma_option(dev, option_flags, &res->data.dma);
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
switch (res->data.start_dpf.compatibility_priority) {
case ACPI_GOOD_CONFIGURATION:
priority = PNP_RES_PRIORITY_PREFERRED;
break;
case ACPI_ACCEPTABLE_CONFIGURATION:
priority = PNP_RES_PRIORITY_ACCEPTABLE;
break;
case ACPI_SUB_OPTIMAL_CONFIGURATION:
priority = PNP_RES_PRIORITY_FUNCTIONAL;
break;
default:
priority = PNP_RES_PRIORITY_INVALID;
break;
}
parse_data->option_flags = pnp_new_dependent_set(dev, priority);
break;
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
parse_data->option_flags = 0;
break;
case ACPI_RESOURCE_TYPE_IO:
pnpacpi_parse_port_option(dev, option_flags, &res->data.io);
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
pnpacpi_parse_fixed_port_option(dev, option_flags,
&res->data.fixed_io);
break;
case ACPI_RESOURCE_TYPE_VENDOR:
case ACPI_RESOURCE_TYPE_END_TAG:
break;
case ACPI_RESOURCE_TYPE_MEMORY24:
pnpacpi_parse_mem24_option(dev, option_flags,
&res->data.memory24);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
pnpacpi_parse_mem32_option(dev, option_flags,
&res->data.memory32);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
pnpacpi_parse_fixed_mem32_option(dev, option_flags,
&res->data.fixed_memory32);
break;
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
pnpacpi_parse_address_option(dev, option_flags, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
pnpacpi_parse_ext_address_option(dev, option_flags, res);
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
pnpacpi_parse_ext_irq_option(dev, option_flags,
&res->data.extended_irq);
break;
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
break;
default:
dev_warn(&dev->dev, "unknown resource type %d in _PRS\n",
res->type);
return AE_ERROR;
}
return AE_OK;
}
int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev = dev->data;
acpi_handle handle = acpi_dev->handle;
acpi_status status;
struct acpipnp_parse_option_s parse_data;
pnp_dbg(&dev->dev, "parse resource options\n");
parse_data.dev = dev;
parse_data.option_flags = 0;
status = acpi_walk_resources(handle, METHOD_NAME__PRS,
pnpacpi_option_resource, &parse_data);
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND)
dev_err(&dev->dev, "can't evaluate _PRS: %d", status);
return -EPERM;
}
return 0;
}
static int pnpacpi_supported_resource(struct acpi_resource *res)
{
switch (res->type) {
case ACPI_RESOURCE_TYPE_IRQ:
case ACPI_RESOURCE_TYPE_DMA:
case ACPI_RESOURCE_TYPE_IO:
case ACPI_RESOURCE_TYPE_FIXED_IO:
case ACPI_RESOURCE_TYPE_MEMORY24:
case ACPI_RESOURCE_TYPE_MEMORY32:
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
return 1;
}
return 0;
}
/*
* Set resource
*/
static acpi_status pnpacpi_count_resources(struct acpi_resource *res,
void *data)
{
int *res_cnt = data;
if (pnpacpi_supported_resource(res))
(*res_cnt)++;
return AE_OK;
}
static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data)
{
struct acpi_resource **resource = data;
if (pnpacpi_supported_resource(res)) {
(*resource)->type = res->type;
(*resource)->length = sizeof(struct acpi_resource);
if (res->type == ACPI_RESOURCE_TYPE_IRQ)
(*resource)->data.irq.descriptor_length =
res->data.irq.descriptor_length;
(*resource)++;
}
return AE_OK;
}
int pnpacpi_build_resource_template(struct pnp_dev *dev,
struct acpi_buffer *buffer)
{
struct acpi_device *acpi_dev = dev->data;
acpi_handle handle = acpi_dev->handle;
struct acpi_resource *resource;
int res_cnt = 0;
acpi_status status;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
pnpacpi_count_resources, &res_cnt);
if (ACPI_FAILURE(status)) {
dev_err(&dev->dev, "can't evaluate _CRS: %d\n", status);
return -EINVAL;
}
if (!res_cnt)
return -EINVAL;
buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1;
buffer->pointer = kzalloc(buffer->length - 1, GFP_KERNEL);
if (!buffer->pointer)
return -ENOMEM;
resource = (struct acpi_resource *)buffer->pointer;
status = acpi_walk_resources(handle, METHOD_NAME__CRS,
pnpacpi_type_resources, &resource);
if (ACPI_FAILURE(status)) {
kfree(buffer->pointer);
dev_err(&dev->dev, "can't evaluate _CRS: %d\n", status);
return -EINVAL;
}
/* resource will pointer the end resource now */
resource->type = ACPI_RESOURCE_TYPE_END_TAG;
resource->length = sizeof(struct acpi_resource);
return 0;
}
static void pnpacpi_encode_irq(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_irq *irq = &resource->data.irq;
int triggering, polarity, shareable;
if (!pnp_resource_enabled(p)) {
irq->interrupt_count = 0;
pnp_dbg(&dev->dev, " encode irq (%s)\n",
p ? "disabled" : "missing");
return;
}
decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable);
irq->triggering = triggering;
irq->polarity = polarity;
irq->sharable = shareable;
irq->interrupt_count = 1;
irq->interrupts[0] = p->start;
pnp_dbg(&dev->dev, " encode irq %d %s %s %s (%d-byte descriptor)\n",
(int) p->start,
triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
polarity == ACPI_ACTIVE_LOW ? "low" : "high",
irq->sharable == ACPI_SHARED ? "shared" : "exclusive",
irq->descriptor_length);
}
static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq;
int triggering, polarity, shareable;
if (!pnp_resource_enabled(p)) {
extended_irq->interrupt_count = 0;
pnp_dbg(&dev->dev, " encode extended irq (%s)\n",
p ? "disabled" : "missing");
return;
}
decode_irq_flags(dev, p->flags, &triggering, &polarity, &shareable);
extended_irq->producer_consumer = ACPI_CONSUMER;
extended_irq->triggering = triggering;
extended_irq->polarity = polarity;
extended_irq->sharable = shareable;
extended_irq->interrupt_count = 1;
extended_irq->interrupts[0] = p->start;
pnp_dbg(&dev->dev, " encode irq %d %s %s %s\n", (int) p->start,
triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
polarity == ACPI_ACTIVE_LOW ? "low" : "high",
extended_irq->sharable == ACPI_SHARED ? "shared" : "exclusive");
}
static void pnpacpi_encode_dma(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_dma *dma = &resource->data.dma;
if (!pnp_resource_enabled(p)) {
dma->channel_count = 0;
pnp_dbg(&dev->dev, " encode dma (%s)\n",
p ? "disabled" : "missing");
return;
}
/* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */
switch (p->flags & IORESOURCE_DMA_SPEED_MASK) {
case IORESOURCE_DMA_TYPEA:
dma->type = ACPI_TYPE_A;
break;
case IORESOURCE_DMA_TYPEB:
dma->type = ACPI_TYPE_B;
break;
case IORESOURCE_DMA_TYPEF:
dma->type = ACPI_TYPE_F;
break;
default:
dma->type = ACPI_COMPATIBILITY;
}
switch (p->flags & IORESOURCE_DMA_TYPE_MASK) {
case IORESOURCE_DMA_8BIT:
dma->transfer = ACPI_TRANSFER_8;
break;
case IORESOURCE_DMA_8AND16BIT:
dma->transfer = ACPI_TRANSFER_8_16;
break;
default:
dma->transfer = ACPI_TRANSFER_16;
}
dma->bus_master = !!(p->flags & IORESOURCE_DMA_MASTER);
dma->channel_count = 1;
dma->channels[0] = p->start;
pnp_dbg(&dev->dev, " encode dma %d "
"type %#x transfer %#x master %d\n",
(int) p->start, dma->type, dma->transfer, dma->bus_master);
}
static void pnpacpi_encode_io(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_io *io = &resource->data.io;
if (pnp_resource_enabled(p)) {
/* Note: pnp_assign_port copies pnp_port->flags into p->flags */
io->io_decode = (p->flags & IORESOURCE_IO_16BIT_ADDR) ?
ACPI_DECODE_16 : ACPI_DECODE_10;
io->minimum = p->start;
io->maximum = p->end;
io->alignment = 0; /* Correct? */
io->address_length = resource_size(p);
} else {
io->minimum = 0;
io->address_length = 0;
}
pnp_dbg(&dev->dev, " encode io %#x-%#x decode %#x\n", io->minimum,
io->minimum + io->address_length - 1, io->io_decode);
}
static void pnpacpi_encode_fixed_io(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_fixed_io *fixed_io = &resource->data.fixed_io;
if (pnp_resource_enabled(p)) {
fixed_io->address = p->start;
fixed_io->address_length = resource_size(p);
} else {
fixed_io->address = 0;
fixed_io->address_length = 0;
}
pnp_dbg(&dev->dev, " encode fixed_io %#x-%#x\n", fixed_io->address,
fixed_io->address + fixed_io->address_length - 1);
}
static void pnpacpi_encode_mem24(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_memory24 *memory24 = &resource->data.memory24;
if (pnp_resource_enabled(p)) {
/* Note: pnp_assign_mem copies pnp_mem->flags into p->flags */
memory24->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
memory24->minimum = p->start;
memory24->maximum = p->end;
memory24->alignment = 0;
memory24->address_length = resource_size(p);
} else {
memory24->minimum = 0;
memory24->address_length = 0;
}
pnp_dbg(&dev->dev, " encode mem24 %#x-%#x write_protect %#x\n",
memory24->minimum,
memory24->minimum + memory24->address_length - 1,
memory24->write_protect);
}
static void pnpacpi_encode_mem32(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_memory32 *memory32 = &resource->data.memory32;
if (pnp_resource_enabled(p)) {
memory32->write_protect = p->flags & IORESOURCE_MEM_WRITEABLE ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
memory32->minimum = p->start;
memory32->maximum = p->end;
memory32->alignment = 0;
memory32->address_length = resource_size(p);
} else {
memory32->minimum = 0;
memory32->alignment = 0;
}
pnp_dbg(&dev->dev, " encode mem32 %#x-%#x write_protect %#x\n",
memory32->minimum,
memory32->minimum + memory32->address_length - 1,
memory32->write_protect);
}
static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev,
struct acpi_resource *resource,
struct resource *p)
{
struct acpi_resource_fixed_memory32 *fixed_memory32 = &resource->data.fixed_memory32;
if (pnp_resource_enabled(p)) {
fixed_memory32->write_protect =
p->flags & IORESOURCE_MEM_WRITEABLE ?
ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
fixed_memory32->address = p->start;
fixed_memory32->address_length = resource_size(p);
} else {
fixed_memory32->address = 0;
fixed_memory32->address_length = 0;
}
pnp_dbg(&dev->dev, " encode fixed_mem32 %#x-%#x write_protect %#x\n",
fixed_memory32->address,
fixed_memory32->address + fixed_memory32->address_length - 1,
fixed_memory32->write_protect);
}
int pnpacpi_encode_resources(struct pnp_dev *dev, struct acpi_buffer *buffer)
{
int i = 0;
/* pnpacpi_build_resource_template allocates extra mem */
int res_cnt = (buffer->length - 1) / sizeof(struct acpi_resource) - 1;
struct acpi_resource *resource = buffer->pointer;
int port = 0, irq = 0, dma = 0, mem = 0;
pnp_dbg(&dev->dev, "encode %d resources\n", res_cnt);
while (i < res_cnt) {
switch (resource->type) {
case ACPI_RESOURCE_TYPE_IRQ:
pnpacpi_encode_irq(dev, resource,
pnp_get_resource(dev, IORESOURCE_IRQ, irq));
irq++;
break;
case ACPI_RESOURCE_TYPE_DMA:
pnpacpi_encode_dma(dev, resource,
pnp_get_resource(dev, IORESOURCE_DMA, dma));
dma++;
break;
case ACPI_RESOURCE_TYPE_IO:
pnpacpi_encode_io(dev, resource,
pnp_get_resource(dev, IORESOURCE_IO, port));
port++;
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
pnpacpi_encode_fixed_io(dev, resource,
pnp_get_resource(dev, IORESOURCE_IO, port));
port++;
break;
case ACPI_RESOURCE_TYPE_MEMORY24:
pnpacpi_encode_mem24(dev, resource,
pnp_get_resource(dev, IORESOURCE_MEM, mem));
mem++;
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
pnpacpi_encode_mem32(dev, resource,
pnp_get_resource(dev, IORESOURCE_MEM, mem));
mem++;
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
pnpacpi_encode_fixed_mem32(dev, resource,
pnp_get_resource(dev, IORESOURCE_MEM, mem));
mem++;
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
pnpacpi_encode_ext_irq(dev, resource,
pnp_get_resource(dev, IORESOURCE_IRQ, irq));
irq++;
break;
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
case ACPI_RESOURCE_TYPE_VENDOR:
case ACPI_RESOURCE_TYPE_END_TAG:
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
default: /* other type */
dev_warn(&dev->dev, "can't encode unknown resource "
"type %d\n", resource->type);
return -EINVAL;
}
resource++;
i++;
}
return 0;
}
| gpl-2.0 |
ea4862/boeffla43_e210k | fs/dlm/lockspace.c | 2382 | 19009 | /******************************************************************************
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
** of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/
#include "dlm_internal.h"
#include "lockspace.h"
#include "member.h"
#include "recoverd.h"
#include "ast.h"
#include "dir.h"
#include "lowcomms.h"
#include "config.h"
#include "memory.h"
#include "lock.h"
#include "recover.h"
#include "requestqueue.h"
#include "user.h"
static int ls_count;
static struct mutex ls_lock;
static struct list_head lslist;
static spinlock_t lslist_lock;
static struct task_struct * scand_task;
static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
{
ssize_t ret = len;
int n = simple_strtol(buf, NULL, 0);
ls = dlm_find_lockspace_local(ls->ls_local_handle);
if (!ls)
return -EINVAL;
switch (n) {
case 0:
dlm_ls_stop(ls);
break;
case 1:
dlm_ls_start(ls);
break;
default:
ret = -EINVAL;
}
dlm_put_lockspace(ls);
return ret;
}
static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
{
ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
wake_up(&ls->ls_uevent_wait);
return len;
}
static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
}
static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
{
ls->ls_global_id = simple_strtoul(buf, NULL, 0);
return len;
}
static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
{
uint32_t status = dlm_recover_status(ls);
return snprintf(buf, PAGE_SIZE, "%x\n", status);
}
static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
}
struct dlm_attr {
struct attribute attr;
ssize_t (*show)(struct dlm_ls *, char *);
ssize_t (*store)(struct dlm_ls *, const char *, size_t);
};
static struct dlm_attr dlm_attr_control = {
.attr = {.name = "control", .mode = S_IWUSR},
.store = dlm_control_store
};
static struct dlm_attr dlm_attr_event = {
.attr = {.name = "event_done", .mode = S_IWUSR},
.store = dlm_event_store
};
static struct dlm_attr dlm_attr_id = {
.attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
.show = dlm_id_show,
.store = dlm_id_store
};
static struct dlm_attr dlm_attr_recover_status = {
.attr = {.name = "recover_status", .mode = S_IRUGO},
.show = dlm_recover_status_show
};
static struct dlm_attr dlm_attr_recover_nodeid = {
.attr = {.name = "recover_nodeid", .mode = S_IRUGO},
.show = dlm_recover_nodeid_show
};
static struct attribute *dlm_attrs[] = {
&dlm_attr_control.attr,
&dlm_attr_event.attr,
&dlm_attr_id.attr,
&dlm_attr_recover_status.attr,
&dlm_attr_recover_nodeid.attr,
NULL,
};
static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
return a->show ? a->show(ls, buf) : 0;
}
static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t len)
{
struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
return a->store ? a->store(ls, buf, len) : len;
}
static void lockspace_kobj_release(struct kobject *k)
{
struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
kfree(ls);
}
static const struct sysfs_ops dlm_attr_ops = {
.show = dlm_attr_show,
.store = dlm_attr_store,
};
static struct kobj_type dlm_ktype = {
.default_attrs = dlm_attrs,
.sysfs_ops = &dlm_attr_ops,
.release = lockspace_kobj_release,
};
static struct kset *dlm_kset;
static int do_uevent(struct dlm_ls *ls, int in)
{
int error;
if (in)
kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
else
kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
log_debug(ls, "%s the lockspace group...", in ? "joining" : "leaving");
/* dlm_controld will see the uevent, do the necessary group management
and then write to sysfs to wake us */
error = wait_event_interruptible(ls->ls_uevent_wait,
test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
log_debug(ls, "group event done %d %d", error, ls->ls_uevent_result);
if (error)
goto out;
error = ls->ls_uevent_result;
out:
if (error)
log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
error, ls->ls_uevent_result);
return error;
}
static int dlm_uevent(struct kset *kset, struct kobject *kobj,
struct kobj_uevent_env *env)
{
struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
return 0;
}
static struct kset_uevent_ops dlm_uevent_ops = {
.uevent = dlm_uevent,
};
int __init dlm_lockspace_init(void)
{
ls_count = 0;
mutex_init(&ls_lock);
INIT_LIST_HEAD(&lslist);
spin_lock_init(&lslist_lock);
dlm_kset = kset_create_and_add("dlm", &dlm_uevent_ops, kernel_kobj);
if (!dlm_kset) {
printk(KERN_WARNING "%s: can not create kset\n", __func__);
return -ENOMEM;
}
return 0;
}
void dlm_lockspace_exit(void)
{
kset_unregister(dlm_kset);
}
static struct dlm_ls *find_ls_to_scan(void)
{
struct dlm_ls *ls;
spin_lock(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (time_after_eq(jiffies, ls->ls_scan_time +
dlm_config.ci_scan_secs * HZ)) {
spin_unlock(&lslist_lock);
return ls;
}
}
spin_unlock(&lslist_lock);
return NULL;
}
static int dlm_scand(void *data)
{
struct dlm_ls *ls;
while (!kthread_should_stop()) {
ls = find_ls_to_scan();
if (ls) {
if (dlm_lock_recovery_try(ls)) {
ls->ls_scan_time = jiffies;
dlm_scan_rsbs(ls);
dlm_scan_timeout(ls);
dlm_scan_waiters(ls);
dlm_unlock_recovery(ls);
} else {
ls->ls_scan_time += HZ;
}
continue;
}
schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
}
return 0;
}
static int dlm_scand_start(void)
{
struct task_struct *p;
int error = 0;
p = kthread_run(dlm_scand, NULL, "dlm_scand");
if (IS_ERR(p))
error = PTR_ERR(p);
else
scand_task = p;
return error;
}
static void dlm_scand_stop(void)
{
kthread_stop(scand_task);
}
struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
{
struct dlm_ls *ls;
spin_lock(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_global_id == id) {
ls->ls_count++;
goto out;
}
}
ls = NULL;
out:
spin_unlock(&lslist_lock);
return ls;
}
struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
{
struct dlm_ls *ls;
spin_lock(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_local_handle == lockspace) {
ls->ls_count++;
goto out;
}
}
ls = NULL;
out:
spin_unlock(&lslist_lock);
return ls;
}
struct dlm_ls *dlm_find_lockspace_device(int minor)
{
struct dlm_ls *ls;
spin_lock(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (ls->ls_device.minor == minor) {
ls->ls_count++;
goto out;
}
}
ls = NULL;
out:
spin_unlock(&lslist_lock);
return ls;
}
void dlm_put_lockspace(struct dlm_ls *ls)
{
spin_lock(&lslist_lock);
ls->ls_count--;
spin_unlock(&lslist_lock);
}
static void remove_lockspace(struct dlm_ls *ls)
{
for (;;) {
spin_lock(&lslist_lock);
if (ls->ls_count == 0) {
WARN_ON(ls->ls_create_count != 0);
list_del(&ls->ls_list);
spin_unlock(&lslist_lock);
return;
}
spin_unlock(&lslist_lock);
ssleep(1);
}
}
static int threads_start(void)
{
int error;
/* Thread which process lock requests for all lockspace's */
error = dlm_astd_start();
if (error) {
log_print("cannot start dlm_astd thread %d", error);
goto fail;
}
error = dlm_scand_start();
if (error) {
log_print("cannot start dlm_scand thread %d", error);
goto astd_fail;
}
/* Thread for sending/receiving messages for all lockspace's */
error = dlm_lowcomms_start();
if (error) {
log_print("cannot start dlm lowcomms %d", error);
goto scand_fail;
}
return 0;
scand_fail:
dlm_scand_stop();
astd_fail:
dlm_astd_stop();
fail:
return error;
}
static void threads_stop(void)
{
dlm_scand_stop();
dlm_lowcomms_stop();
dlm_astd_stop();
}
static int new_lockspace(const char *name, int namelen, void **lockspace,
uint32_t flags, int lvblen)
{
struct dlm_ls *ls;
int i, size, error;
int do_unreg = 0;
if (namelen > DLM_LOCKSPACE_LEN)
return -EINVAL;
if (!lvblen || (lvblen % 8))
return -EINVAL;
if (!try_module_get(THIS_MODULE))
return -EINVAL;
if (!dlm_user_daemon_available()) {
module_put(THIS_MODULE);
return -EUNATCH;
}
error = 0;
spin_lock(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
WARN_ON(ls->ls_create_count <= 0);
if (ls->ls_namelen != namelen)
continue;
if (memcmp(ls->ls_name, name, namelen))
continue;
if (flags & DLM_LSFL_NEWEXCL) {
error = -EEXIST;
break;
}
ls->ls_create_count++;
*lockspace = ls;
error = 1;
break;
}
spin_unlock(&lslist_lock);
if (error)
goto out;
error = -ENOMEM;
ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
if (!ls)
goto out;
memcpy(ls->ls_name, name, namelen);
ls->ls_namelen = namelen;
ls->ls_lvblen = lvblen;
ls->ls_count = 0;
ls->ls_flags = 0;
ls->ls_scan_time = jiffies;
if (flags & DLM_LSFL_TIMEWARN)
set_bit(LSFL_TIMEWARN, &ls->ls_flags);
/* ls_exflags are forced to match among nodes, and we don't
need to require all nodes to have some flags set */
ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
DLM_LSFL_NEWEXCL));
size = dlm_config.ci_rsbtbl_size;
ls->ls_rsbtbl_size = size;
ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_NOFS);
if (!ls->ls_rsbtbl)
goto out_lsfree;
for (i = 0; i < size; i++) {
INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
spin_lock_init(&ls->ls_rsbtbl[i].lock);
}
size = dlm_config.ci_lkbtbl_size;
ls->ls_lkbtbl_size = size;
ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_NOFS);
if (!ls->ls_lkbtbl)
goto out_rsbfree;
for (i = 0; i < size; i++) {
INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
rwlock_init(&ls->ls_lkbtbl[i].lock);
ls->ls_lkbtbl[i].counter = 1;
}
size = dlm_config.ci_dirtbl_size;
ls->ls_dirtbl_size = size;
ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_NOFS);
if (!ls->ls_dirtbl)
goto out_lkbfree;
for (i = 0; i < size; i++) {
INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
spin_lock_init(&ls->ls_dirtbl[i].lock);
}
INIT_LIST_HEAD(&ls->ls_waiters);
mutex_init(&ls->ls_waiters_mutex);
INIT_LIST_HEAD(&ls->ls_orphans);
mutex_init(&ls->ls_orphans_mutex);
INIT_LIST_HEAD(&ls->ls_timeout);
mutex_init(&ls->ls_timeout_mutex);
INIT_LIST_HEAD(&ls->ls_nodes);
INIT_LIST_HEAD(&ls->ls_nodes_gone);
ls->ls_num_nodes = 0;
ls->ls_low_nodeid = 0;
ls->ls_total_weight = 0;
ls->ls_node_array = NULL;
memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
ls->ls_stub_rsb.res_ls = ls;
ls->ls_debug_rsb_dentry = NULL;
ls->ls_debug_waiters_dentry = NULL;
init_waitqueue_head(&ls->ls_uevent_wait);
ls->ls_uevent_result = 0;
init_completion(&ls->ls_members_done);
ls->ls_members_result = -1;
ls->ls_recoverd_task = NULL;
mutex_init(&ls->ls_recoverd_active);
spin_lock_init(&ls->ls_recover_lock);
spin_lock_init(&ls->ls_rcom_spin);
get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
ls->ls_recover_status = 0;
ls->ls_recover_seq = 0;
ls->ls_recover_args = NULL;
init_rwsem(&ls->ls_in_recovery);
init_rwsem(&ls->ls_recv_active);
INIT_LIST_HEAD(&ls->ls_requestqueue);
mutex_init(&ls->ls_requestqueue_mutex);
mutex_init(&ls->ls_clear_proc_locks);
ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
if (!ls->ls_recover_buf)
goto out_dirfree;
INIT_LIST_HEAD(&ls->ls_recover_list);
spin_lock_init(&ls->ls_recover_list_lock);
ls->ls_recover_list_count = 0;
ls->ls_local_handle = ls;
init_waitqueue_head(&ls->ls_wait_general);
INIT_LIST_HEAD(&ls->ls_root_list);
init_rwsem(&ls->ls_root_sem);
down_write(&ls->ls_in_recovery);
spin_lock(&lslist_lock);
ls->ls_create_count = 1;
list_add(&ls->ls_list, &lslist);
spin_unlock(&lslist_lock);
/* needs to find ls in lslist */
error = dlm_recoverd_start(ls);
if (error) {
log_error(ls, "can't start dlm_recoverd %d", error);
goto out_delist;
}
ls->ls_kobj.kset = dlm_kset;
error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
"%s", ls->ls_name);
if (error)
goto out_stop;
kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
/* let kobject handle freeing of ls if there's an error */
do_unreg = 1;
/* This uevent triggers dlm_controld in userspace to add us to the
group of nodes that are members of this lockspace (managed by the
cluster infrastructure.) Once it's done that, it tells us who the
current lockspace members are (via configfs) and then tells the
lockspace to start running (via sysfs) in dlm_ls_start(). */
error = do_uevent(ls, 1);
if (error)
goto out_stop;
wait_for_completion(&ls->ls_members_done);
error = ls->ls_members_result;
if (error)
goto out_members;
dlm_create_debug_file(ls);
log_debug(ls, "join complete");
*lockspace = ls;
return 0;
out_members:
do_uevent(ls, 0);
dlm_clear_members(ls);
kfree(ls->ls_node_array);
out_stop:
dlm_recoverd_stop(ls);
out_delist:
spin_lock(&lslist_lock);
list_del(&ls->ls_list);
spin_unlock(&lslist_lock);
kfree(ls->ls_recover_buf);
out_dirfree:
kfree(ls->ls_dirtbl);
out_lkbfree:
kfree(ls->ls_lkbtbl);
out_rsbfree:
kfree(ls->ls_rsbtbl);
out_lsfree:
if (do_unreg)
kobject_put(&ls->ls_kobj);
else
kfree(ls);
out:
module_put(THIS_MODULE);
return error;
}
int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
uint32_t flags, int lvblen)
{
int error = 0;
mutex_lock(&ls_lock);
if (!ls_count)
error = threads_start();
if (error)
goto out;
error = new_lockspace(name, namelen, lockspace, flags, lvblen);
if (!error)
ls_count++;
if (error > 0)
error = 0;
if (!ls_count)
threads_stop();
out:
mutex_unlock(&ls_lock);
return error;
}
/* Return 1 if the lockspace still has active remote locks,
* 2 if the lockspace still has active local locks.
*/
static int lockspace_busy(struct dlm_ls *ls)
{
int i, lkb_found = 0;
struct dlm_lkb *lkb;
/* NOTE: We check the lockidtbl here rather than the resource table.
This is because there may be LKBs queued as ASTs that have been
unlinked from their RSBs and are pending deletion once the AST has
been delivered */
for (i = 0; i < ls->ls_lkbtbl_size; i++) {
read_lock(&ls->ls_lkbtbl[i].lock);
if (!list_empty(&ls->ls_lkbtbl[i].list)) {
lkb_found = 1;
list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
lkb_idtbl_list) {
if (!lkb->lkb_nodeid) {
read_unlock(&ls->ls_lkbtbl[i].lock);
return 2;
}
}
}
read_unlock(&ls->ls_lkbtbl[i].lock);
}
return lkb_found;
}
static int release_lockspace(struct dlm_ls *ls, int force)
{
struct dlm_lkb *lkb;
struct dlm_rsb *rsb;
struct list_head *head;
int i, busy, rv;
busy = lockspace_busy(ls);
spin_lock(&lslist_lock);
if (ls->ls_create_count == 1) {
if (busy > force)
rv = -EBUSY;
else {
/* remove_lockspace takes ls off lslist */
ls->ls_create_count = 0;
rv = 0;
}
} else if (ls->ls_create_count > 1) {
rv = --ls->ls_create_count;
} else {
rv = -EINVAL;
}
spin_unlock(&lslist_lock);
if (rv) {
log_debug(ls, "release_lockspace no remove %d", rv);
return rv;
}
dlm_device_deregister(ls);
if (force < 3 && dlm_user_daemon_available())
do_uevent(ls, 0);
dlm_recoverd_stop(ls);
remove_lockspace(ls);
dlm_delete_debug_file(ls);
dlm_astd_suspend();
kfree(ls->ls_recover_buf);
/*
* Free direntry structs.
*/
dlm_dir_clear(ls);
kfree(ls->ls_dirtbl);
/*
* Free all lkb's on lkbtbl[] lists.
*/
for (i = 0; i < ls->ls_lkbtbl_size; i++) {
head = &ls->ls_lkbtbl[i].list;
while (!list_empty(head)) {
lkb = list_entry(head->next, struct dlm_lkb,
lkb_idtbl_list);
list_del(&lkb->lkb_idtbl_list);
dlm_del_ast(lkb);
if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
dlm_free_lvb(lkb->lkb_lvbptr);
dlm_free_lkb(lkb);
}
}
dlm_astd_resume();
kfree(ls->ls_lkbtbl);
/*
* Free all rsb's on rsbtbl[] lists
*/
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
head = &ls->ls_rsbtbl[i].list;
while (!list_empty(head)) {
rsb = list_entry(head->next, struct dlm_rsb,
res_hashchain);
list_del(&rsb->res_hashchain);
dlm_free_rsb(rsb);
}
head = &ls->ls_rsbtbl[i].toss;
while (!list_empty(head)) {
rsb = list_entry(head->next, struct dlm_rsb,
res_hashchain);
list_del(&rsb->res_hashchain);
dlm_free_rsb(rsb);
}
}
kfree(ls->ls_rsbtbl);
/*
* Free structures on any other lists
*/
dlm_purge_requestqueue(ls);
kfree(ls->ls_recover_args);
dlm_clear_free_entries(ls);
dlm_clear_members(ls);
dlm_clear_members_gone(ls);
kfree(ls->ls_node_array);
log_debug(ls, "release_lockspace final free");
kobject_put(&ls->ls_kobj);
/* The ls structure will be freed when the kobject is done with */
module_put(THIS_MODULE);
return 0;
}
/*
* Called when a system has released all its locks and is not going to use the
* lockspace any longer. We free everything we're managing for this lockspace.
* Remaining nodes will go through the recovery process as if we'd died. The
* lockspace must continue to function as usual, participating in recoveries,
* until this returns.
*
* Force has 4 possible values:
* 0 - don't destroy locksapce if it has any LKBs
* 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
* 2 - destroy lockspace regardless of LKBs
* 3 - destroy lockspace as part of a forced shutdown
*/
int dlm_release_lockspace(void *lockspace, int force)
{
struct dlm_ls *ls;
int error;
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -EINVAL;
dlm_put_lockspace(ls);
mutex_lock(&ls_lock);
error = release_lockspace(ls, force);
if (!error)
ls_count--;
if (!ls_count)
threads_stop();
mutex_unlock(&ls_lock);
return error;
}
void dlm_stop_lockspaces(void)
{
struct dlm_ls *ls;
restart:
spin_lock(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (!test_bit(LSFL_RUNNING, &ls->ls_flags))
continue;
spin_unlock(&lslist_lock);
log_error(ls, "no userland control daemon, stopping lockspace");
dlm_ls_stop(ls);
goto restart;
}
spin_unlock(&lslist_lock);
}
| gpl-2.0 |
OneEducation/kernel-rk310-lollipop-cx929 | drivers/staging/wlags49_h2/wl_netdev.c | 2382 | 61141 | /*******************************************************************************
* Agere Systems Inc.
* Wireless device driver for Linux (wlags49).
*
* Copyright (c) 1998-2003 Agere Systems Inc.
* All rights reserved.
* http://www.agere.com
*
* Initially developed by TriplePoint, Inc.
* http://www.triplepoint.com
*
*------------------------------------------------------------------------------
*
* This file contains handler functions registered with the net_device
* structure.
*
*------------------------------------------------------------------------------
*
* SOFTWARE LICENSE
*
* This software is provided subject to the following terms and conditions,
* which you should read carefully before using the software. Using this
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
* Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
* modifications, are permitted provided that the following conditions are met:
*
* . Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following Disclaimer as comments in the code as
* well as in the documentation and/or other materials provided with the
* distribution.
*
* . Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following Disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* . Neither the name of Agere Systems Inc. nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Disclaimer
*
* THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
******************************************************************************/
/*******************************************************************************
* include files
******************************************************************************/
#include <wl_version.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
// #include <linux/sched.h>
// #include <linux/ptrace.h>
// #include <linux/slab.h>
// #include <linux/ctype.h>
// #include <linux/string.h>
//#include <linux/timer.h>
// #include <linux/interrupt.h>
// #include <linux/in.h>
// #include <linux/delay.h>
// #include <linux/skbuff.h>
// #include <asm/io.h>
// // #include <asm/bitops.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
// #include <linux/skbuff.h>
// #include <linux/if_arp.h>
// #include <linux/ioport.h>
#include <debug.h>
#include <hcf.h>
#include <dhf.h>
// #include <hcfdef.h>
#include <wl_if.h>
#include <wl_internal.h>
#include <wl_util.h>
#include <wl_priv.h>
#include <wl_main.h>
#include <wl_netdev.h>
#include <wl_wext.h>
#ifdef USE_PROFILE
#include <wl_profile.h>
#endif /* USE_PROFILE */
#ifdef BUS_PCMCIA
#include <wl_cs.h>
#endif /* BUS_PCMCIA */
#ifdef BUS_PCI
#include <wl_pci.h>
#endif /* BUS_PCI */
/*******************************************************************************
* global variables
******************************************************************************/
#if DBG
extern dbg_info_t *DbgInfo;
#endif /* DBG */
#if HCF_ENCAP
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN - 8)
#else
#define MTU_MAX (HCF_MAX_MSG - ETH_HLEN)
#endif
//static int mtu = MTU_MAX;
//MODULE_PARM(mtu, "i");
//MODULE_PARM_DESC(mtu, "MTU");
/*******************************************************************************
* macros
******************************************************************************/
#define BLOCK_INPUT(buf, len) \
desc->buf_addr = buf; \
desc->BUF_SIZE = len; \
status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
#define BLOCK_INPUT_DMA(buf, len) memcpy( buf, desc_next->buf_addr, pktlen )
/*******************************************************************************
* function prototypes
******************************************************************************/
/*******************************************************************************
* wl_init()
*******************************************************************************
*
* DESCRIPTION:
*
* We never need to do anything when a "Wireless" device is "initialized"
* by the net software, because we only register already-found cards.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* 0 on success
* errno value otherwise
*
******************************************************************************/
int wl_init( struct net_device *dev )
{
// unsigned long flags;
// struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_init" );
DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
/* Nothing to do, but grab the spinlock anyway just in case we ever need
this routine */
// wl_lock( lp, &flags );
// wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return 0;
} // wl_init
/*============================================================================*/
/*******************************************************************************
* wl_config()
*******************************************************************************
*
* DESCRIPTION:
*
* Implement the SIOCSIFMAP interface.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
* map - a pointer to the device's ifmap structure
*
* RETURNS:
*
* 0 on success
* errno otherwise
*
******************************************************************************/
int wl_config( struct net_device *dev, struct ifmap *map )
{
DBG_FUNC( "wl_config" );
DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "map", "0x%p", map );
/* The only thing we care about here is a port change. Since this not needed,
ignore the request. */
DBG_TRACE(DbgInfo, "%s: %s called.\n", dev->name, __func__);
DBG_LEAVE( DbgInfo );
return 0;
} // wl_config
/*============================================================================*/
/*******************************************************************************
* wl_stats()
*******************************************************************************
*
* DESCRIPTION:
*
* Return the current device statistics.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* a pointer to a net_device_stats structure containing the network
* statistics.
*
******************************************************************************/
struct net_device_stats *wl_stats( struct net_device *dev )
{
#ifdef USE_WDS
int count;
#endif /* USE_WDS */
unsigned long flags;
struct net_device_stats *pStats;
struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
//DBG_FUNC( "wl_stats" );
//DBG_ENTER( DbgInfo );
//DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
pStats = NULL;
wl_lock( lp, &flags );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
wl_unlock( lp, &flags );
//DBG_LEAVE( DbgInfo );
return NULL;
}
#endif /* USE_RTS */
/* Return the statistics for the appropriate device */
#ifdef USE_WDS
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( dev == lp->wds_port[count].dev ) {
pStats = &( lp->wds_port[count].stats );
}
}
#endif /* USE_WDS */
/* If pStats is still NULL, then the device is not a WDS port */
if( pStats == NULL ) {
pStats = &( lp->stats );
}
wl_unlock( lp, &flags );
//DBG_LEAVE( DbgInfo );
return pStats;
} // wl_stats
/*============================================================================*/
/*******************************************************************************
* wl_open()
*******************************************************************************
*
* DESCRIPTION:
*
* Open the device.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* 0 on success
* errno otherwise
*
******************************************************************************/
int wl_open(struct net_device *dev)
{
int status = HCF_SUCCESS;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_open" );
DBG_ENTER( DbgInfo );
wl_lock( lp, &flags );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device open, in RTS mode\n" );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
#ifdef USE_PROFILE
parse_config( dev );
#endif
if( lp->portState == WVLAN_PORT_STATE_DISABLED ) {
DBG_TRACE( DbgInfo, "Enabling Port 0\n" );
status = wl_enable( lp );
if( status != HCF_SUCCESS ) {
DBG_TRACE( DbgInfo, "Enable port 0 failed: 0x%x\n", status );
}
}
// Holding the lock too long, make a gap to allow other processes
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
if ( strlen( lp->fw_image_filename ) ) {
DBG_TRACE( DbgInfo, ";???? Kludgy way to force a download\n" );
status = wl_go( lp );
} else {
status = wl_apply( lp );
}
// Holding the lock too long, make a gap to allow other processes
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
if( status != HCF_SUCCESS ) {
// Unsuccessful, try reset of the card to recover
status = wl_reset( dev );
}
// Holding the lock too long, make a gap to allow other processes
wl_unlock(lp, &flags);
wl_lock( lp, &flags );
if( status == HCF_SUCCESS ) {
netif_carrier_on( dev );
WL_WDS_NETIF_CARRIER_ON( lp );
lp->is_handling_int = WL_HANDLING_INT; // Start handling interrupts
wl_act_int_on( lp );
netif_start_queue( dev );
WL_WDS_NETIF_START_QUEUE( lp );
} else {
wl_hcf_error( dev, status ); /* Report the error */
netif_device_detach( dev ); /* Stop the device and queue */
}
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return status;
} // wl_open
/*============================================================================*/
/*******************************************************************************
* wl_close()
*******************************************************************************
*
* DESCRIPTION:
*
* Close the device.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure
*
* RETURNS:
*
* 0 on success
* errno otherwise
*
******************************************************************************/
int wl_close( struct net_device *dev )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
DBG_FUNC("wl_close");
DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
/* Mark the adapter as busy */
netif_stop_queue( dev );
WL_WDS_NETIF_STOP_QUEUE( lp );
netif_carrier_off( dev );
WL_WDS_NETIF_CARRIER_OFF( lp );
/* Shutdown the adapter:
Disable adapter interrupts
Stop Tx/Rx
Update statistics
Set low power mode
*/
wl_lock( lp, &flags );
wl_act_int_off( lp );
lp->is_handling_int = WL_NOT_HANDLING_INT; // Stop handling interrupts
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping device close, in RTS mode\n" );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return -EIO;
}
#endif /* USE_RTS */
/* Disable the ports */
wl_disable( lp );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return 0;
} // wl_close
/*============================================================================*/
static void wl_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION_STR, sizeof(info->version));
// strlcpy(info.fw_version, priv->fw_name,
// sizeof(info.fw_version));
if (dev->dev.parent) {
dev_set_name(dev->dev.parent, "%s", info->bus_info);
//strlcpy(info->bus_info, dev->dev.parent->bus_id,
// sizeof(info->bus_info));
} else {
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA FIXME");
// "PCMCIA 0x%lx", priv->hw.iobase);
}
} // wl_get_drvinfo
static struct ethtool_ops wl_ethtool_ops = {
.get_drvinfo = wl_get_drvinfo,
.get_link = ethtool_op_get_link,
};
/*******************************************************************************
* wl_ioctl()
*******************************************************************************
*
* DESCRIPTION:
*
* The IOCTL handler for the device.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device struct.
* rq - a pointer to the IOCTL request buffer.
* cmd - the IOCTL command code.
*
* RETURNS:
*
* 0 on success
* errno value otherwise
*
******************************************************************************/
int wl_ioctl( struct net_device *dev, struct ifreq *rq, int cmd )
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
int ret = 0;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_ioctl" );
DBG_ENTER(DbgInfo);
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
DBG_PARAM(DbgInfo, "rq", "0x%p", rq);
DBG_PARAM(DbgInfo, "cmd", "0x%04x", cmd);
wl_lock( lp, &flags );
wl_act_int_off( lp );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
/* Handle any RTS IOCTL here */
if( cmd == WL_IOCTL_RTS ) {
DBG_TRACE( DbgInfo, "IOCTL: WL_IOCTL_RTS\n" );
ret = wvlan_rts( (struct rtsreq *)rq, dev->base_addr );
} else {
DBG_TRACE( DbgInfo, "IOCTL not supported in RTS mode: 0x%X\n", cmd );
ret = -EOPNOTSUPP;
}
goto out_act_int_on_unlock;
}
#endif /* USE_RTS */
/* Only handle UIL IOCTL requests when the UIL has the system blocked. */
if( !(( lp->flags & WVLAN2_UIL_BUSY ) && ( cmd != WVLAN2_IOCTL_UIL ))) {
#ifdef USE_UIL
struct uilreq *urq = (struct uilreq *)rq;
#endif /* USE_UIL */
switch( cmd ) {
// ================== Private IOCTLs (up to 16) ==================
#ifdef USE_UIL
case WVLAN2_IOCTL_UIL:
DBG_TRACE( DbgInfo, "IOCTL: WVLAN2_IOCTL_UIL\n" );
ret = wvlan_uil( urq, lp );
break;
#endif /* USE_UIL */
default:
DBG_TRACE(DbgInfo, "IOCTL CODE NOT SUPPORTED: 0x%X\n", cmd );
ret = -EOPNOTSUPP;
break;
}
} else {
DBG_WARNING( DbgInfo, "DEVICE IS BUSY, CANNOT PROCESS REQUEST\n" );
ret = -EBUSY;
}
#ifdef USE_RTS
out_act_int_on_unlock:
#endif /* USE_RTS */
wl_act_int_on( lp );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return ret;
} // wl_ioctl
/*============================================================================*/
#ifdef CONFIG_NET_POLL_CONTROLLER
void wl_poll(struct net_device *dev)
{
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
struct pt_regs regs;
wl_lock( lp, &flags );
wl_isr(dev->irq, dev, ®s);
wl_unlock( lp, &flags );
}
#endif
/*******************************************************************************
* wl_tx_timeout()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler called when, for some reason, a Tx request is not completed.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device struct.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_tx_timeout( struct net_device *dev )
{
#ifdef USE_WDS
int count;
#endif /* USE_WDS */
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
struct net_device_stats *pStats = NULL;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_tx_timeout" );
DBG_ENTER( DbgInfo );
DBG_WARNING( DbgInfo, "%s: Transmit timeout.\n", dev->name );
wl_lock( lp, &flags );
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping tx_timeout handler, in RTS mode\n" );
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
/* Figure out which device (the "root" device or WDS port) this timeout
is for */
#ifdef USE_WDS
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( dev == lp->wds_port[count].dev ) {
pStats = &( lp->wds_port[count].stats );
/* Break the loop so that we can use the counter to access WDS
information in the private structure */
break;
}
}
#endif /* USE_WDS */
/* If pStats is still NULL, then the device is not a WDS port */
if( pStats == NULL ) {
pStats = &( lp->stats );
}
/* Accumulate the timeout error */
pStats->tx_errors++;
wl_unlock( lp, &flags );
DBG_LEAVE( DbgInfo );
} // wl_tx_timeout
/*============================================================================*/
/*******************************************************************************
* wl_send()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data transmits.
*
* PARAMETERS:
*
* lp - a pointer to the device's wl_private struct.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_send( struct wl_private *lp )
{
int status;
DESC_STRCT *desc;
WVLAN_LFRAME *txF = NULL;
struct list_head *element;
int len;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_send" );
if( lp == NULL ) {
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
}
if( lp->dev == NULL ) {
DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
return FALSE;
}
/* Check for the availability of FIDs; if none are available, don't take any
frames off the txQ */
if( lp->hcfCtx.IFB_RscInd == 0 ) {
return FALSE;
}
/* Reclaim the TxQ Elements and place them back on the free queue */
if( !list_empty( &( lp->txQ[0] ))) {
element = lp->txQ[0].next;
txF = (WVLAN_LFRAME * )list_entry( element, WVLAN_LFRAME, node );
if( txF != NULL ) {
lp->txF.skb = txF->frame.skb;
lp->txF.port = txF->frame.port;
txF->frame.skb = NULL;
txF->frame.port = 0;
list_del( &( txF->node ));
list_add( element, &( lp->txFree ));
lp->txQ_count--;
if( lp->txQ_count < TX_Q_LOW_WATER_MARK ) {
if( lp->netif_queue_on == FALSE ) {
DBG_TX( DbgInfo, "Kickstarting Q: %d\n", lp->txQ_count );
netif_wake_queue( lp->dev );
WL_WDS_NETIF_WAKE_QUEUE( lp );
lp->netif_queue_on = TRUE;
}
}
}
}
if( lp->txF.skb == NULL ) {
return FALSE;
}
/* If the device has resources (FIDs) available, then Tx the packet */
/* Format the TxRequest and send it to the adapter */
len = lp->txF.skb->len < ETH_ZLEN ? ETH_ZLEN : lp->txF.skb->len;
desc = &( lp->desc_tx );
desc->buf_addr = lp->txF.skb->data;
desc->BUF_CNT = len;
desc->next_desc_addr = NULL;
status = hcf_send_msg( &( lp->hcfCtx ), desc, lp->txF.port );
if( status == HCF_SUCCESS ) {
lp->dev->trans_start = jiffies;
DBG_TX( DbgInfo, "Transmit...\n" );
if( lp->txF.port == HCF_PORT_0 ) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += lp->txF.skb->len;
}
#ifdef USE_WDS
else
{
lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_packets++;
lp->wds_port[(( lp->txF.port >> 8 ) - 1)].stats.tx_bytes += lp->txF.skb->len;
}
#endif /* USE_WDS */
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
dev_kfree_skb( lp->txF.skb );
lp->txF.skb = NULL;
lp->txF.port = 0;
}
return TRUE;
} // wl_send
/*============================================================================*/
/*******************************************************************************
* wl_tx()
*******************************************************************************
*
* DESCRIPTION:
*
* The Tx handler function for the network layer.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff structure containing the data to transfer.
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_tx( struct sk_buff *skb, struct net_device *dev, int port )
{
unsigned long flags;
struct wl_private *lp = wl_priv(dev);
WVLAN_LFRAME *txF = NULL;
struct list_head *element;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_tx" );
/* Grab the spinlock */
wl_lock( lp, &flags );
if( lp->flags & WVLAN2_UIL_BUSY ) {
DBG_WARNING( DbgInfo, "UIL has device blocked\n" );
/* Start dropping packets here??? */
wl_unlock( lp, &flags );
return 1;
}
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_PRINT( "RTS: we're getting a Tx...\n" );
wl_unlock( lp, &flags );
return 1;
}
#endif /* USE_RTS */
if( !lp->use_dma ) {
/* Get an element from the queue */
element = lp->txFree.next;
txF = (WVLAN_LFRAME *)list_entry( element, WVLAN_LFRAME, node );
if( txF == NULL ) {
DBG_ERROR( DbgInfo, "Problem with list_entry\n" );
wl_unlock( lp, &flags );
return 1;
}
/* Fill out the frame */
txF->frame.skb = skb;
txF->frame.port = port;
/* Move the frame to the txQ */
/* NOTE: Here's where we would do priority queueing */
list_move(&(txF->node), &(lp->txQ[0]));
lp->txQ_count++;
if( lp->txQ_count >= DEFAULT_NUM_TX_FRAMES ) {
DBG_TX( DbgInfo, "Q Full: %d\n", lp->txQ_count );
if( lp->netif_queue_on == TRUE ) {
netif_stop_queue( lp->dev );
WL_WDS_NETIF_STOP_QUEUE( lp );
lp->netif_queue_on = FALSE;
}
}
}
wl_act_int_off( lp ); /* Disable Interrupts */
/* Send the data to the hardware using the appropriate method */
#ifdef ENABLE_DMA
if( lp->use_dma ) {
wl_send_dma( lp, skb, port );
}
else
#endif
{
wl_send( lp );
}
/* Re-enable Interrupts, release the spinlock and return */
wl_act_int_on( lp );
wl_unlock( lp, &flags );
return 0;
} // wl_tx
/*============================================================================*/
/*******************************************************************************
* wl_rx()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data reception.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_rx(struct net_device *dev)
{
int port;
struct sk_buff *skb;
struct wl_private *lp = wl_priv(dev);
int status;
hcf_16 pktlen;
hcf_16 hfs_stat;
DESC_STRCT *desc;
/*------------------------------------------------------------------------*/
DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if(!( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_PRINT( "RTS: We're getting an Rx...\n" );
return -EIO;
}
#endif /* USE_RTS */
/* Read the HFS_STAT register from the lookahead buffer */
hfs_stat = (hcf_16)(( lp->lookAheadBuf[HFS_STAT] ) |
( lp->lookAheadBuf[HFS_STAT + 1] << 8 ));
/* Make sure the frame isn't bad */
if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS ) {
DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
lp->lookAheadBuf[HFS_STAT] );
return -EIO;
}
/* Determine what port this packet is for */
port = ( hfs_stat >> 8 ) & 0x0007;
DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
pktlen = lp->hcfCtx.IFB_RxLen;
if (pktlen != 0) {
skb = ALLOC_SKB(pktlen);
if (skb != NULL) {
/* Set the netdev based on the port */
switch( port ) {
#ifdef USE_WDS
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
skb->dev = lp->wds_port[port-1].dev;
break;
#endif /* USE_WDS */
case 0:
default:
skb->dev = dev;
break;
}
desc = &( lp->desc_rx );
desc->next_desc_addr = NULL;
/*
#define BLOCK_INPUT(buf, len) \
desc->buf_addr = buf; \
desc->BUF_SIZE = len; \
status = hcf_rcv_msg(&(lp->hcfCtx), desc, 0)
*/
GET_PACKET( skb->dev, skb, pktlen );
if( status == HCF_SUCCESS ) {
netif_rx( skb );
if( port == 0 ) {
lp->stats.rx_packets++;
lp->stats.rx_bytes += pktlen;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_packets++;
lp->wds_port[port-1].stats.rx_bytes += pktlen;
}
#endif /* USE_WDS */
dev->last_rx = jiffies;
#ifdef WIRELESS_EXT
#ifdef WIRELESS_SPY
if( lp->spydata.spy_number > 0 ) {
char *srcaddr = skb->mac.raw + MAC_ADDR_SIZE;
wl_spy_gather( dev, srcaddr );
}
#endif /* WIRELESS_SPY */
#endif /* WIRELESS_EXT */
} else {
DBG_ERROR( DbgInfo, "Rx request to card FAILED\n" );
if( port == 0 ) {
lp->stats.rx_dropped++;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_dropped++;
}
#endif /* USE_WDS */
dev_kfree_skb( skb );
}
} else {
DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
if( port == 0 ) {
lp->stats.rx_dropped++;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_dropped++;
}
#endif /* USE_WDS */
}
}
}
return 0;
} // wl_rx
/*============================================================================*/
/*******************************************************************************
* wl_multicast()
*******************************************************************************
*
* DESCRIPTION:
*
* Function to handle multicast packets
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
#ifdef NEW_MULTICAST
void wl_multicast( struct net_device *dev )
{
#if 1 //;? (HCF_TYPE) & HCF_TYPE_STA //;?should we return an error status in AP mode
//;?seems reasonable that even an AP-only driver could afford this small additional footprint
int x;
struct netdev_hw_addr *ha;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_multicast" );
DBG_ENTER( DbgInfo );
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
if( !wl_adapter_is_open( dev )) {
DBG_LEAVE( DbgInfo );
return;
}
#if DBG
if( DBG_FLAGS( DbgInfo ) & DBG_PARAM_ON ) {
DBG_PRINT(" flags: %s%s%s\n",
( dev->flags & IFF_PROMISC ) ? "Promiscuous " : "",
( dev->flags & IFF_MULTICAST ) ? "Multicast " : "",
( dev->flags & IFF_ALLMULTI ) ? "All-Multicast" : "" );
DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
netdev_for_each_mc_addr(ha, dev)
DBG_PRINT(" %pM (%d)\n", ha->addr, dev->addr_len);
}
#endif /* DBG */
if(!( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_TRACE( DbgInfo, "Skipping multicast, in RTS mode\n" );
DBG_LEAVE( DbgInfo );
return;
}
#endif /* USE_RTS */
wl_lock( lp, &flags );
wl_act_int_off( lp );
if ( CNV_INT_TO_LITTLE( lp->hcfCtx.IFB_FWIdentity.comp_id ) == COMP_ID_FW_STA ) {
if( dev->flags & IFF_PROMISC ) {
/* Enable promiscuous mode */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
DBG_PRINT( "Enabling Promiscuous mode (IFF_PROMISC)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
}
else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST) ||
( dev->flags & IFF_ALLMULTI )) {
/* Shutting off this filter will enable all multicast frames to
be sent up from the device; however, this is a static RID, so
a call to wl_apply() is needed */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
DBG_PRINT( "Enabling all multicast mode (IFF_ALLMULTI)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
else if (!netdev_mc_empty(dev)) {
/* Set the multicast addresses */
lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
x = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
ha->addr, ETH_ALEN);
DBG_PRINT( "Setting multicast list\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
} else {
/* Disable promiscuous mode */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_PROMISCUOUS_MODE;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 0 );
DBG_PRINT( "Disabling Promiscuous mode\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
/* Disable multicast mode */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
DBG_PRINT( "Disabling Multicast mode\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
/* Turning on this filter will prevent all multicast frames from
being sent up from the device; however, this is a static RID,
so a call to wl_apply() is needed */
lp->ltvRecord.len = 2;
lp->ltvRecord.typ = CFG_CNF_RX_ALL_GROUP_ADDR;
lp->ltvRecord.u.u16[0] = CNV_INT_TO_LITTLE( 1 );
DBG_PRINT( "Disabling all multicast mode (IFF_ALLMULTI)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
}
wl_act_int_on( lp );
wl_unlock( lp, &flags );
}
DBG_LEAVE( DbgInfo );
#endif /* HCF_STA */
} // wl_multicast
/*============================================================================*/
#else /* NEW_MULTICAST */
void wl_multicast( struct net_device *dev, int num_addrs, void *addrs )
{
DBG_FUNC( "wl_multicast");
DBG_ENTER(DbgInfo);
DBG_PARAM( DbgInfo, "dev", "%s (0x%p)", dev->name, dev );
DBG_PARAM( DbgInfo, "num_addrs", "%d", num_addrs );
DBG_PARAM( DbgInfo, "addrs", "0x%p", addrs );
#error Obsolete set multicast interface!
DBG_LEAVE( DbgInfo );
} // wl_multicast
/*============================================================================*/
#endif /* NEW_MULTICAST */
static const struct net_device_ops wl_netdev_ops =
{
.ndo_start_xmit = &wl_tx_port0,
.ndo_set_config = &wl_config,
.ndo_get_stats = &wl_stats,
.ndo_set_rx_mode = &wl_multicast,
.ndo_init = &wl_insert,
.ndo_open = &wl_adapter_open,
.ndo_stop = &wl_adapter_close,
.ndo_do_ioctl = &wl_ioctl,
.ndo_tx_timeout = &wl_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = wl_poll,
#endif
};
/*******************************************************************************
* wl_device_alloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Create instances of net_device and wl_private for the new adapter
* and register the device's entry points in the net_device structure.
*
* PARAMETERS:
*
* N/A
*
* RETURNS:
*
* a pointer to an allocated and initialized net_device struct for this
* device.
*
******************************************************************************/
struct net_device * wl_device_alloc( void )
{
struct net_device *dev = NULL;
struct wl_private *lp = NULL;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_device_alloc" );
DBG_ENTER( DbgInfo );
/* Alloc a net_device struct */
dev = alloc_etherdev(sizeof(struct wl_private));
if (!dev)
return NULL;
/* Initialize the 'next' pointer in the struct. Currently only used for PCI,
but do it here just in case it's used for other buses in the future */
lp = wl_priv(dev);
/* Check MTU */
if( dev->mtu > MTU_MAX )
{
DBG_WARNING( DbgInfo, "%s: MTU set too high, limiting to %d.\n",
dev->name, MTU_MAX );
dev->mtu = MTU_MAX;
}
/* Setup the function table in the device structure. */
dev->wireless_handlers = (struct iw_handler_def *)&wl_iw_handler_def;
lp->wireless_data.spy_data = &lp->spy_data;
dev->wireless_data = &lp->wireless_data;
dev->netdev_ops = &wl_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &wl_ethtool_ops;
netif_stop_queue( dev );
/* Allocate virtual devices for WDS support if needed */
WL_WDS_DEVICE_ALLOC( lp );
DBG_LEAVE( DbgInfo );
return dev;
} // wl_device_alloc
/*============================================================================*/
/*******************************************************************************
* wl_device_dealloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Free instances of net_device and wl_private strcutres for an adapter
* and perform basic cleanup.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_device_dealloc( struct net_device *dev )
{
// struct wl_private *lp = wl_priv(dev);
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_device_dealloc" );
DBG_ENTER( DbgInfo );
/* Dealloc the WDS ports */
WL_WDS_DEVICE_DEALLOC( lp );
free_netdev( dev );
DBG_LEAVE( DbgInfo );
} // wl_device_dealloc
/*============================================================================*/
/*******************************************************************************
* wl_tx_port0()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_0.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_0.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port0( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 0\n" );
return wl_tx( skb, dev, HCF_PORT_0 );
#ifdef ENABLE_DMA
return wl_tx_dma( skb, dev, HCF_PORT_0 );
#endif
} // wl_tx_port0
/*============================================================================*/
#ifdef USE_WDS
/*******************************************************************************
* wl_tx_port1()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_1.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_1.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port1( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 1\n" );
return wl_tx( skb, dev, HCF_PORT_1 );
} // wl_tx_port1
/*============================================================================*/
/*******************************************************************************
* wl_tx_port2()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_2.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_2.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port2( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 2\n" );
return wl_tx( skb, dev, HCF_PORT_2 );
} // wl_tx_port2
/*============================================================================*/
/*******************************************************************************
* wl_tx_port3()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_3.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_3.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port3( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 3\n" );
return wl_tx( skb, dev, HCF_PORT_3 );
} // wl_tx_port3
/*============================================================================*/
/*******************************************************************************
* wl_tx_port4()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_4.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_4.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port4( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 4\n" );
return wl_tx( skb, dev, HCF_PORT_4 );
} // wl_tx_port4
/*============================================================================*/
/*******************************************************************************
* wl_tx_port5()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_5.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_5.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port5( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 5\n" );
return wl_tx( skb, dev, HCF_PORT_5 );
} // wl_tx_port5
/*============================================================================*/
/*******************************************************************************
* wl_tx_port6()
*******************************************************************************
*
* DESCRIPTION:
*
* The handler routine for Tx over HCF_PORT_6.
*
* PARAMETERS:
*
* skb - a pointer to the sk_buff to transmit.
* dev - a pointer to a net_device structure representing HCF_PORT_6.
*
* RETURNS:
*
* N/A
*
******************************************************************************/
int wl_tx_port6( struct sk_buff *skb, struct net_device *dev )
{
DBG_TX( DbgInfo, "Tx on Port 6\n" );
return wl_tx( skb, dev, HCF_PORT_6 );
} // wl_tx_port6
/*============================================================================*/
/*******************************************************************************
* wl_wds_device_alloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Create instances of net_device to represent the WDS ports, and register
* the device's entry points in the net_device structure.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A, but will place pointers to the allocated and initialized net_device
* structs in the private adapter structure.
*
******************************************************************************/
void wl_wds_device_alloc( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_wds_device_alloc" );
DBG_ENTER( DbgInfo );
/* WDS support requires additional net_device structs to be allocated,
so that user space apps can use these virtual devices to specify the
port on which to Tx/Rx */
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
struct net_device *dev_wds = NULL;
dev_wds = kzalloc(sizeof(struct net_device), GFP_KERNEL);
if (!dev_wds) {
DBG_LEAVE(DbgInfo);
return;
}
ether_setup( dev_wds );
lp->wds_port[count].dev = dev_wds;
/* Re-use wl_init for all the devices, as it currently does nothing, but
is required. Re-use the stats/tx_timeout handler for all as well; the
WDS port which is requesting these operations can be determined by
the net_device pointer. Set the private member of all devices to point
to the same net_device struct; that way, all information gets
funnelled through the one "real" net_device. Name the WDS ports
"wds<n>" */
lp->wds_port[count].dev->init = &wl_init;
lp->wds_port[count].dev->get_stats = &wl_stats;
lp->wds_port[count].dev->tx_timeout = &wl_tx_timeout;
lp->wds_port[count].dev->watchdog_timeo = TX_TIMEOUT;
lp->wds_port[count].dev->priv = lp;
sprintf( lp->wds_port[count].dev->name, "wds%d", count );
}
/* Register the Tx handlers */
lp->wds_port[0].dev->hard_start_xmit = &wl_tx_port1;
lp->wds_port[1].dev->hard_start_xmit = &wl_tx_port2;
lp->wds_port[2].dev->hard_start_xmit = &wl_tx_port3;
lp->wds_port[3].dev->hard_start_xmit = &wl_tx_port4;
lp->wds_port[4].dev->hard_start_xmit = &wl_tx_port5;
lp->wds_port[5].dev->hard_start_xmit = &wl_tx_port6;
WL_WDS_NETIF_STOP_QUEUE( lp );
DBG_LEAVE( DbgInfo );
} // wl_wds_device_alloc
/*============================================================================*/
/*******************************************************************************
* wl_wds_device_dealloc()
*******************************************************************************
*
* DESCRIPTION:
*
* Free instances of net_device structures used to support WDS.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_device_dealloc( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_wds_device_dealloc" );
DBG_ENTER( DbgInfo );
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
struct net_device *dev_wds = NULL;
dev_wds = lp->wds_port[count].dev;
if( dev_wds != NULL ) {
if( dev_wds->flags & IFF_UP ) {
dev_close( dev_wds );
dev_wds->flags &= ~( IFF_UP | IFF_RUNNING );
}
free_netdev(dev_wds);
lp->wds_port[count].dev = NULL;
}
}
DBG_LEAVE( DbgInfo );
} // wl_wds_device_dealloc
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_start_queue()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to start the netif queues of all the "virtual" network devices
* which represent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_start_queue( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered &&
lp->wds_port[count].netif_queue_on == FALSE ) {
netif_start_queue( lp->wds_port[count].dev );
lp->wds_port[count].netif_queue_on = TRUE;
}
}
}
} // wl_wds_netif_start_queue
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_stop_queue()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to stop the netif queues of all the "virtual" network devices
* which represent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_stop_queue( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered &&
lp->wds_port[count].netif_queue_on == TRUE ) {
netif_stop_queue( lp->wds_port[count].dev );
lp->wds_port[count].netif_queue_on = FALSE;
}
}
}
} // wl_wds_netif_stop_queue
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_wake_queue()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to wake the netif queues of all the "virtual" network devices
* which represent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_wake_queue( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered &&
lp->wds_port[count].netif_queue_on == FALSE ) {
netif_wake_queue( lp->wds_port[count].dev );
lp->wds_port[count].netif_queue_on = TRUE;
}
}
}
} // wl_wds_netif_wake_queue
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_carrier_on()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to signal the network layer that carrier is present on all of the
* "virtual" network devices which represent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_carrier_on( struct wl_private *lp )
{
int count;
/*------------------------------------------------------------------------*/
if( lp != NULL ) {
for( count = 0; count < NUM_WDS_PORTS; count++ ) {
if( lp->wds_port[count].is_registered ) {
netif_carrier_on( lp->wds_port[count].dev );
}
}
}
} // wl_wds_netif_carrier_on
/*============================================================================*/
/*******************************************************************************
* wl_wds_netif_carrier_off()
*******************************************************************************
*
* DESCRIPTION:
*
* Used to signal the network layer that carrier is NOT present on all of
* the "virtual" network devices which represent the WDS ports.
*
* PARAMETERS:
*
* lp - a pointer to the device's private adapter structure
*
* RETURNS:
*
* N/A
*
******************************************************************************/
void wl_wds_netif_carrier_off( struct wl_private *lp )
{
int count;
if(lp != NULL) {
for(count = 0; count < NUM_WDS_PORTS; count++) {
if(lp->wds_port[count].is_registered)
netif_carrier_off(lp->wds_port[count].dev);
}
}
} // wl_wds_netif_carrier_off
/*============================================================================*/
#endif /* USE_WDS */
#ifdef ENABLE_DMA
/*******************************************************************************
* wl_send_dma()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data transmits when using busmaster DMA.
*
* PARAMETERS:
*
* lp - a pointer to the device's wl_private struct.
* skb - a pointer to the network layer's data buffer.
* port - the Hermes port on which to transmit.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_send_dma( struct wl_private *lp, struct sk_buff *skb, int port )
{
int len;
DESC_STRCT *desc = NULL;
DESC_STRCT *desc_next = NULL;
/*------------------------------------------------------------------------*/
DBG_FUNC( "wl_send_dma" );
if( lp == NULL ) {
DBG_ERROR( DbgInfo, "Private adapter struct is NULL\n" );
return FALSE;
}
if( lp->dev == NULL ) {
DBG_ERROR( DbgInfo, "net_device struct in wl_private is NULL\n" );
return FALSE;
}
/* AGAIN, ALL THE QUEUEING DONE HERE IN I/O MODE IS NOT PERFORMED */
if( skb == NULL ) {
DBG_WARNING (DbgInfo, "Nothing to send.\n");
return FALSE;
}
len = skb->len;
/* Get a free descriptor */
desc = wl_pci_dma_get_tx_packet( lp );
if( desc == NULL ) {
if( lp->netif_queue_on == TRUE ) {
netif_stop_queue( lp->dev );
WL_WDS_NETIF_STOP_QUEUE( lp );
lp->netif_queue_on = FALSE;
dev_kfree_skb( skb );
return 0;
}
}
SET_BUF_CNT( desc, /*HCF_DMA_FD_CNT*/HFS_ADDR_DEST );
SET_BUF_SIZE( desc, HCF_DMA_TX_BUF1_SIZE );
desc_next = desc->next_desc_addr;
if( desc_next->buf_addr == NULL ) {
DBG_ERROR( DbgInfo, "DMA descriptor buf_addr is NULL\n" );
return FALSE;
}
/* Copy the payload into the DMA packet */
memcpy( desc_next->buf_addr, skb->data, len );
SET_BUF_CNT( desc_next, len );
SET_BUF_SIZE( desc_next, HCF_MAX_PACKET_SIZE );
hcf_dma_tx_put( &( lp->hcfCtx ), desc, 0 );
/* Free the skb and perform queue cleanup, as the buffer was
transmitted successfully */
dev_kfree_skb( skb );
return TRUE;
} // wl_send_dma
/*============================================================================*/
/*******************************************************************************
* wl_rx_dma()
*******************************************************************************
*
* DESCRIPTION:
*
* The routine which performs data reception when using busmaster DMA.
*
* PARAMETERS:
*
* dev - a pointer to the device's net_device structure.
*
* RETURNS:
*
* 0 on success
* 1 on error
*
******************************************************************************/
int wl_rx_dma( struct net_device *dev )
{
int port;
hcf_16 pktlen;
hcf_16 hfs_stat;
struct sk_buff *skb;
struct wl_private *lp = NULL;
DESC_STRCT *desc, *desc_next;
//CFG_MB_INFO_RANGE2_STRCT x;
/*------------------------------------------------------------------------*/
DBG_FUNC("wl_rx")
DBG_PARAM(DbgInfo, "dev", "%s (0x%p)", dev->name, dev);
if((( lp = dev->priv ) != NULL ) &&
!( lp->flags & WVLAN2_UIL_BUSY )) {
#ifdef USE_RTS
if( lp->useRTS == 1 ) {
DBG_PRINT( "RTS: We're getting an Rx...\n" );
return -EIO;
}
#endif /* USE_RTS */
//if( lp->dma.status == 0 )
//{
desc = hcf_dma_rx_get( &( lp->hcfCtx ));
if( desc != NULL )
{
/* Check and see if we rcvd. a WMP frame */
/*
if((( *(hcf_8 *)&desc->buf_addr[HFS_STAT] ) &
( HFS_STAT_MSG_TYPE | HFS_STAT_ERR )) == HFS_STAT_WMP_MSG )
{
DBG_TRACE( DbgInfo, "Got a WMP frame\n" );
x.len = sizeof( CFG_MB_INFO_RANGE2_STRCT ) / sizeof( hcf_16 );
x.typ = CFG_MB_INFO;
x.base_typ = CFG_WMP;
x.frag_cnt = 2;
x.frag_buf[0].frag_len = GET_BUF_CNT( descp ) / sizeof( hcf_16 );
x.frag_buf[0].frag_addr = (hcf_8 *) descp->buf_addr ;
x.frag_buf[1].frag_len = ( GET_BUF_CNT( descp->next_desc_addr ) + 1 ) / sizeof( hcf_16 );
x.frag_buf[1].frag_addr = (hcf_8 *) descp->next_desc_addr->buf_addr ;
hcf_put_info( &( lp->hcfCtx ), (LTVP)&x );
}
*/
desc_next = desc->next_desc_addr;
/* Make sure the buffer isn't empty */
if( GET_BUF_CNT( desc ) == 0 ) {
DBG_WARNING( DbgInfo, "Buffer is empty!\n" );
/* Give the descriptor back to the HCF */
hcf_dma_rx_put( &( lp->hcfCtx ), desc );
return -EIO;
}
/* Read the HFS_STAT register from the lookahead buffer */
hfs_stat = (hcf_16)( desc->buf_addr[HFS_STAT/2] );
/* Make sure the frame isn't bad */
if(( hfs_stat & HFS_STAT_ERR ) != HCF_SUCCESS )
{
DBG_WARNING( DbgInfo, "HFS_STAT_ERROR (0x%x) in Rx Packet\n",
desc->buf_addr[HFS_STAT/2] );
/* Give the descriptor back to the HCF */
hcf_dma_rx_put( &( lp->hcfCtx ), desc );
return -EIO;
}
/* Determine what port this packet is for */
port = ( hfs_stat >> 8 ) & 0x0007;
DBG_RX( DbgInfo, "Rx frame for port %d\n", port );
pktlen = GET_BUF_CNT(desc_next);
if (pktlen != 0) {
skb = ALLOC_SKB(pktlen);
if (skb != NULL) {
switch( port ) {
#ifdef USE_WDS
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
skb->dev = lp->wds_port[port-1].dev;
break;
#endif /* USE_WDS */
case 0:
default:
skb->dev = dev;
break;
}
GET_PACKET_DMA( skb->dev, skb, pktlen );
/* Give the descriptor back to the HCF */
hcf_dma_rx_put( &( lp->hcfCtx ), desc );
netif_rx( skb );
if( port == 0 ) {
lp->stats.rx_packets++;
lp->stats.rx_bytes += pktlen;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_packets++;
lp->wds_port[port-1].stats.rx_bytes += pktlen;
}
#endif /* USE_WDS */
dev->last_rx = jiffies;
} else {
DBG_ERROR( DbgInfo, "Could not alloc skb\n" );
if( port == 0 )
{
lp->stats.rx_dropped++;
}
#ifdef USE_WDS
else
{
lp->wds_port[port-1].stats.rx_dropped++;
}
#endif /* USE_WDS */
}
}
}
//}
}
return 0;
} // wl_rx_dma
/*============================================================================*/
#endif // ENABLE_DMA
| gpl-2.0 |
sakuramilk/sc06d_kernel_ics | drivers/net/myri10ge/myri10ge.c | 2382 | 115986 | /*************************************************************************
* myri10ge.c: Myricom Myri-10G Ethernet driver.
*
* Copyright (C) 2005 - 2009 Myricom, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Myricom, Inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*
* If the eeprom on your board is not recent enough, you will need to get a
* newer firmware image at:
* http://www.myri.com/scs/download-Myri10GE.html
*
* Contact Information:
* <help@myri.com>
* Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
*************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/tcp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/inet_lro.h>
#include <linux/dca.h>
#include <linux/ip.h>
#include <linux/inet.h>
#include <linux/in.h>
#include <linux/ethtool.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <linux/crc32.h>
#include <linux/moduleparam.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <net/checksum.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/processor.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
#define MYRI10GE_VERSION_STR "1.5.2-1.459"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
MODULE_VERSION(MYRI10GE_VERSION_STR);
MODULE_LICENSE("Dual BSD/GPL");
#define MYRI10GE_MAX_ETHER_MTU 9014
#define MYRI10GE_ETH_STOPPED 0
#define MYRI10GE_ETH_STOPPING 1
#define MYRI10GE_ETH_STARTING 2
#define MYRI10GE_ETH_RUNNING 3
#define MYRI10GE_ETH_OPEN_FAILED 4
#define MYRI10GE_EEPROM_STRINGS_SIZE 256
#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
#define MYRI10GE_MAX_LRO_DESCRIPTORS 8
#define MYRI10GE_LRO_MAX_PKTS 64
#define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
#define MYRI10GE_ALLOC_ORDER 0
#define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
#define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
#define MYRI10GE_MAX_SLICES 32
struct myri10ge_rx_buffer_state {
struct page *page;
int page_offset;
DEFINE_DMA_UNMAP_ADDR(bus);
DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_tx_buffer_state {
struct sk_buff *skb;
int last;
DEFINE_DMA_UNMAP_ADDR(bus);
DEFINE_DMA_UNMAP_LEN(len);
};
struct myri10ge_cmd {
u32 data0;
u32 data1;
u32 data2;
};
struct myri10ge_rx_buf {
struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
struct myri10ge_rx_buffer_state *info;
struct page *page;
dma_addr_t bus;
int page_offset;
int cnt;
int fill_cnt;
int alloc_fail;
int mask; /* number of rx slots -1 */
int watchdog_needed;
};
struct myri10ge_tx_buf {
struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
__be32 __iomem *send_go; /* "go" doorbell ptr */
__be32 __iomem *send_stop; /* "stop" doorbell ptr */
struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
char *req_bytes;
struct myri10ge_tx_buffer_state *info;
int mask; /* number of transmit slots -1 */
int req ____cacheline_aligned; /* transmit slots submitted */
int pkt_start; /* packets started */
int stop_queue;
int linearized;
int done ____cacheline_aligned; /* transmit slots completed */
int pkt_done; /* packets completed */
int wake_queue;
int queue_active;
};
struct myri10ge_rx_done {
struct mcp_slot *entry;
dma_addr_t bus;
int cnt;
int idx;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
};
struct myri10ge_slice_netstats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
unsigned long tx_bytes;
unsigned long rx_dropped;
unsigned long tx_dropped;
};
struct myri10ge_slice_state {
struct myri10ge_tx_buf tx; /* transmit ring */
struct myri10ge_rx_buf rx_small;
struct myri10ge_rx_buf rx_big;
struct myri10ge_rx_done rx_done;
struct net_device *dev;
struct napi_struct napi;
struct myri10ge_priv *mgp;
struct myri10ge_slice_netstats stats;
__be32 __iomem *irq_claim;
struct mcp_irq_data *fw_stats;
dma_addr_t fw_stats_bus;
int watchdog_tx_done;
int watchdog_tx_req;
int watchdog_rx_done;
#ifdef CONFIG_MYRI10GE_DCA
int cached_dca_tag;
int cpu;
__be32 __iomem *dca_tag;
#endif
char irq_desc[32];
};
struct myri10ge_priv {
struct myri10ge_slice_state *ss;
int tx_boundary; /* boundary transmits cannot cross */
int num_slices;
int running; /* running? */
int small_bytes;
int big_bytes;
int max_intr_slots;
struct net_device *dev;
spinlock_t stats_lock;
u8 __iomem *sram;
int sram_size;
unsigned long board_span;
unsigned long iomem_base;
__be32 __iomem *irq_deassert;
char *mac_addr_string;
struct mcp_cmd_response *cmd;
dma_addr_t cmd_bus;
struct pci_dev *pdev;
int msi_enabled;
int msix_enabled;
struct msix_entry *msix_vectors;
#ifdef CONFIG_MYRI10GE_DCA
int dca_enabled;
int relaxed_order;
#endif
u32 link_state;
unsigned int rdma_tags_available;
int intr_coal_delay;
__be32 __iomem *intr_coal_delay_ptr;
int mtrr;
int wc_enabled;
int down_cnt;
wait_queue_head_t down_wq;
struct work_struct watchdog_work;
struct timer_list watchdog_timer;
int watchdog_resets;
int watchdog_pause;
int pause;
bool fw_name_allocated;
char *fw_name;
char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
char *product_code_string;
char fw_version[128];
int fw_ver_major;
int fw_ver_minor;
int fw_ver_tiny;
int adopted_rx_filter_bug;
u8 mac_addr[6]; /* eeprom mac address */
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
u32 features;
u32 max_tso6;
u32 read_dma;
u32 write_dma;
u32 read_write_dma;
u32 link_changes;
u32 msg_enable;
unsigned int board_number;
int rebooted;
};
static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
/* Careful: must be accessed under kparam_block_sysfs_write */
static char *myri10ge_fw_name = NULL;
module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
#define MYRI10GE_MAX_BOARDS 8
static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
{[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
0444);
MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
static int myri10ge_ecrc_enable = 1;
module_param(myri10ge_ecrc_enable, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
static int myri10ge_small_bytes = -1; /* -1 == auto */
module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
static int myri10ge_msi = 1; /* enable msi by default */
module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
static int myri10ge_intr_coal_delay = 75;
module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
static int myri10ge_flow_control = 1;
module_param(myri10ge_flow_control, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
static int myri10ge_deassert_wait = 1;
module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_deassert_wait,
"Wait when deasserting legacy interrupts");
static int myri10ge_force_firmware = 0;
module_param(myri10ge_force_firmware, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_force_firmware,
"Force firmware to assume aligned completions");
static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
module_param(myri10ge_initial_mtu, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
static int myri10ge_napi_weight = 64;
module_param(myri10ge_napi_weight, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
static int myri10ge_watchdog_timeout = 1;
module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
static int myri10ge_max_irq_loops = 1048576;
module_param(myri10ge_max_irq_loops, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_max_irq_loops,
"Set stuck legacy IRQ detection threshold");
#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
static int myri10ge_debug = -1; /* defaults above */
module_param(myri10ge_debug, int, 0);
MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_lro_max_pkts,
"Number of LRO packets to be aggregated");
static int myri10ge_fill_thresh = 256;
module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
static int myri10ge_reset_recover = 1;
static int myri10ge_max_slices = 1;
module_param(myri10ge_max_slices, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
module_param(myri10ge_rss_hash, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
static int myri10ge_dca = 1;
module_param(myri10ge_dca, int, S_IRUGO);
MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
#define MYRI10GE_FW_OFFSET 1024*1024
#define MYRI10GE_HIGHPART_TO_U32(X) \
(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
#define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
static void myri10ge_set_multicast_list(struct net_device *dev);
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev);
static inline void put_be32(__be32 val, __be32 __iomem * p)
{
__raw_writel((__force __u32) val, (__force void __iomem *)p);
}
static struct net_device_stats *myri10ge_get_stats(struct net_device *dev);
static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
{
if (mgp->fw_name_allocated)
kfree(mgp->fw_name);
mgp->fw_name = name;
mgp->fw_name_allocated = allocated;
}
static int
myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
struct myri10ge_cmd *data, int atomic)
{
struct mcp_cmd *buf;
char buf_bytes[sizeof(*buf) + 8];
struct mcp_cmd_response *response = mgp->cmd;
char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
u32 dma_low, dma_high, result, value;
int sleep_total = 0;
/* ensure buf is aligned to 8 bytes */
buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
buf->data0 = htonl(data->data0);
buf->data1 = htonl(data->data1);
buf->data2 = htonl(data->data2);
buf->cmd = htonl(cmd);
dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
buf->response_addr.low = htonl(dma_low);
buf->response_addr.high = htonl(dma_high);
response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
mb();
myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
/* wait up to 15ms. Longest command is the DMA benchmark,
* which is capped at 5ms, but runs from a timeout handler
* that runs every 7.8ms. So a 15ms timeout leaves us with
* a 2.2ms margin
*/
if (atomic) {
/* if atomic is set, do not sleep,
* and try to get the completion quickly
* (1ms will be enough for those commands) */
for (sleep_total = 0;
sleep_total < 1000 &&
response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
sleep_total += 10) {
udelay(10);
mb();
}
} else {
/* use msleep for most command */
for (sleep_total = 0;
sleep_total < 15 &&
response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
sleep_total++)
msleep(1);
}
result = ntohl(response->result);
value = ntohl(response->data);
if (result != MYRI10GE_NO_RESPONSE_RESULT) {
if (result == 0) {
data->data0 = value;
return 0;
} else if (result == MXGEFW_CMD_UNKNOWN) {
return -ENOSYS;
} else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
return -E2BIG;
} else if (result == MXGEFW_CMD_ERROR_RANGE &&
cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
(data->
data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
0) {
return -ERANGE;
} else {
dev_err(&mgp->pdev->dev,
"command %d failed, result = %d\n",
cmd, result);
return -ENXIO;
}
}
dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
cmd, result);
return -EAGAIN;
}
/*
* The eeprom strings on the lanaiX have the format
* SN=x\0
* MAC=x:x:x:x:x:x\0
* PT:ddd mmm xx xx:xx:xx xx\0
* PV:ddd mmm xx xx:xx:xx xx\0
*/
static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
{
char *ptr, *limit;
int i;
ptr = mgp->eeprom_strings;
limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
while (*ptr != '\0' && ptr < limit) {
if (memcmp(ptr, "MAC=", 4) == 0) {
ptr += 4;
mgp->mac_addr_string = ptr;
for (i = 0; i < 6; i++) {
if ((ptr + 2) > limit)
goto abort;
mgp->mac_addr[i] =
simple_strtoul(ptr, &ptr, 16);
ptr += 1;
}
}
if (memcmp(ptr, "PC=", 3) == 0) {
ptr += 3;
mgp->product_code_string = ptr;
}
if (memcmp((const void *)ptr, "SN=", 3) == 0) {
ptr += 3;
mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
}
while (ptr < limit && *ptr++) ;
}
return 0;
abort:
dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
return -ENXIO;
}
/*
* Enable or disable periodic RDMAs from the host to make certain
* chipsets resend dropped PCIe messages
*/
static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
{
char __iomem *submit;
__be32 buf[16] __attribute__ ((__aligned__(8)));
u32 dma_low, dma_high;
int i;
/* clear confirmation addr */
mgp->cmd->data = 0;
mb();
/* send a rdma command to the PCIe engine, and wait for the
* response in the confirmation address. The firmware should
* write a -1 there to indicate it is alive and well
*/
dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
buf[0] = htonl(dma_high); /* confirm addr MSW */
buf[1] = htonl(dma_low); /* confirm addr LSW */
buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
buf[3] = htonl(dma_high); /* dummy addr MSW */
buf[4] = htonl(dma_low); /* dummy addr LSW */
buf[5] = htonl(enable); /* enable? */
submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
msleep(1);
if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
(enable ? "enable" : "disable"));
}
static int
myri10ge_validate_firmware(struct myri10ge_priv *mgp,
struct mcp_gen_header *hdr)
{
struct device *dev = &mgp->pdev->dev;
/* check firmware type */
if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
return -EINVAL;
}
/* save firmware version for ethtool */
strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
&mgp->fw_ver_minor, &mgp->fw_ver_tiny);
if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
MXGEFW_VERSION_MINOR);
return -EINVAL;
}
return 0;
}
static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
{
unsigned crc, reread_crc;
const struct firmware *fw;
struct device *dev = &mgp->pdev->dev;
unsigned char *fw_readback;
struct mcp_gen_header *hdr;
size_t hdr_offset;
int status;
unsigned i;
if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
dev_err(dev, "Unable to load %s firmware image via hotplug\n",
mgp->fw_name);
status = -EINVAL;
goto abort_with_nothing;
}
/* check size */
if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
fw->size < MCP_HEADER_PTR_OFFSET + 4) {
dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
status = -EINVAL;
goto abort_with_fw;
}
/* check id */
hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
dev_err(dev, "Bad firmware file\n");
status = -EINVAL;
goto abort_with_fw;
}
hdr = (void *)(fw->data + hdr_offset);
status = myri10ge_validate_firmware(mgp, hdr);
if (status != 0)
goto abort_with_fw;
crc = crc32(~0, fw->data, fw->size);
for (i = 0; i < fw->size; i += 256) {
myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
fw->data + i,
min(256U, (unsigned)(fw->size - i)));
mb();
readb(mgp->sram);
}
fw_readback = vmalloc(fw->size);
if (!fw_readback) {
status = -ENOMEM;
goto abort_with_fw;
}
/* corruption checking is good for parity recovery and buggy chipset */
memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
reread_crc = crc32(~0, fw_readback, fw->size);
vfree(fw_readback);
if (crc != reread_crc) {
dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
(unsigned)fw->size, reread_crc, crc);
status = -EIO;
goto abort_with_fw;
}
*size = (u32) fw->size;
abort_with_fw:
release_firmware(fw);
abort_with_nothing:
return status;
}
static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
{
struct mcp_gen_header *hdr;
struct device *dev = &mgp->pdev->dev;
const size_t bytes = sizeof(struct mcp_gen_header);
size_t hdr_offset;
int status;
/* find running firmware header */
hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
dev_err(dev, "Running firmware has bad header offset (%d)\n",
(int)hdr_offset);
return -EIO;
}
/* copy header of running firmware from SRAM to host memory to
* validate firmware */
hdr = kmalloc(bytes, GFP_KERNEL);
if (hdr == NULL) {
dev_err(dev, "could not malloc firmware hdr\n");
return -ENOMEM;
}
memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
status = myri10ge_validate_firmware(mgp, hdr);
kfree(hdr);
/* check to see if adopted firmware has bug where adopting
* it will cause broadcasts to be filtered unless the NIC
* is kept in ALLMULTI mode */
if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
mgp->adopted_rx_filter_bug = 1;
dev_warn(dev, "Adopting fw %d.%d.%d: "
"working around rx filter bug\n",
mgp->fw_ver_major, mgp->fw_ver_minor,
mgp->fw_ver_tiny);
}
return status;
}
static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
int status;
/* probe for IPv6 TSO support */
mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
&cmd, 0);
if (status == 0) {
mgp->max_tso6 = cmd.data0;
mgp->features |= NETIF_F_TSO6;
}
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
return -ENXIO;
}
mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
return 0;
}
static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
{
char __iomem *submit;
__be32 buf[16] __attribute__ ((__aligned__(8)));
u32 dma_low, dma_high, size;
int status, i;
size = 0;
status = myri10ge_load_hotplug_firmware(mgp, &size);
if (status) {
if (!adopt)
return status;
dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
/* Do not attempt to adopt firmware if there
* was a bad crc */
if (status == -EIO)
return status;
status = myri10ge_adopt_running_firmware(mgp);
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed to adopt running firmware\n");
return status;
}
dev_info(&mgp->pdev->dev,
"Successfully adopted running firmware\n");
if (mgp->tx_boundary == 4096) {
dev_warn(&mgp->pdev->dev,
"Using firmware currently running on NIC"
". For optimal\n");
dev_warn(&mgp->pdev->dev,
"performance consider loading optimized "
"firmware\n");
dev_warn(&mgp->pdev->dev, "via hotplug\n");
}
set_fw_name(mgp, "adopted", false);
mgp->tx_boundary = 2048;
myri10ge_dummy_rdma(mgp, 1);
status = myri10ge_get_firmware_capabilities(mgp);
return status;
}
/* clear confirmation addr */
mgp->cmd->data = 0;
mb();
/* send a reload command to the bootstrap MCP, and wait for the
* response in the confirmation address. The firmware should
* write a -1 there to indicate it is alive and well
*/
dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
buf[0] = htonl(dma_high); /* confirm addr MSW */
buf[1] = htonl(dma_low); /* confirm addr LSW */
buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
/* FIX: All newest firmware should un-protect the bottom of
* the sram before handoff. However, the very first interfaces
* do not. Therefore the handoff copy must skip the first 8 bytes
*/
buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
buf[4] = htonl(size - 8); /* length of code */
buf[5] = htonl(8); /* where to copy to */
buf[6] = htonl(0); /* where to jump to */
submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
myri10ge_pio_copy(submit, &buf, sizeof(buf));
mb();
msleep(1);
mb();
i = 0;
while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
msleep(1 << i);
i++;
}
if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
dev_err(&mgp->pdev->dev, "handoff failed\n");
return -ENXIO;
}
myri10ge_dummy_rdma(mgp, 1);
status = myri10ge_get_firmware_capabilities(mgp);
return status;
}
static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
{
struct myri10ge_cmd cmd;
int status;
cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
| (addr[2] << 8) | addr[3]);
cmd.data1 = ((addr[4] << 8) | (addr[5]));
status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
return status;
}
static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
{
struct myri10ge_cmd cmd;
int status, ctl;
ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
if (status) {
netdev_err(mgp->dev, "Failed to set flow control mode\n");
return status;
}
mgp->pause = pause;
return 0;
}
static void
myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
{
struct myri10ge_cmd cmd;
int status, ctl;
ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
if (status)
netdev_err(mgp->dev, "Failed to set promisc mode\n");
}
static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
{
struct myri10ge_cmd cmd;
int status;
u32 len;
struct page *dmatest_page;
dma_addr_t dmatest_bus;
char *test = " ";
dmatest_page = alloc_page(GFP_KERNEL);
if (!dmatest_page)
return -ENOMEM;
dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
DMA_BIDIRECTIONAL);
/* Run a small DMA test.
* The magic multipliers to the length tell the firmware
* to do DMA read, write, or read+write tests. The
* results are returned in cmd.data0. The upper 16
* bits or the return is the number of transfers completed.
* The lower 16 bits is the time in 0.5us ticks that the
* transfers took to complete.
*/
len = mgp->tx_boundary;
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10000;
status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
if (status != 0) {
test = "read";
goto abort;
}
mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x1;
status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
if (status != 0) {
test = "write";
goto abort;
}
mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
cmd.data2 = len * 0x10001;
status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
if (status != 0) {
test = "read/write";
goto abort;
}
mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
(cmd.data0 & 0xffff);
abort:
pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
put_page(dmatest_page);
if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
test, status);
return status;
}
static int myri10ge_reset(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
struct myri10ge_slice_state *ss;
int i, status;
size_t bytes;
#ifdef CONFIG_MYRI10GE_DCA
unsigned long dca_tag_off;
#endif
/* try to send a reset command to the card to see if it
* is alive */
memset(&cmd, 0, sizeof(cmd));
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed reset\n");
return -ENXIO;
}
(void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
/*
* Use non-ndis mcp_slot (eg, 4 bytes total,
* no toeplitz hash value returned. Older firmware will
* not understand this command, but will use the correct
* sized mcp_slot, so we ignore error returns
*/
cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
(void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
/* Now exchange information about interrupts */
bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
cmd.data0 = (u32) bytes;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
/*
* Even though we already know how many slices are supported
* via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
* has magic side effects, and must be called after a reset.
* It must be called prior to calling any RSS related cmds,
* including assigning an interrupt queue for anything but
* slice 0. It must also be called *after*
* MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
* the firmware to compute offsets.
*/
if (mgp->num_slices > 1) {
/* ask the maximum number of slices it supports */
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
&cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed to get number of slices\n");
}
/*
* MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
* to setting up the interrupt queue DMA
*/
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
if (mgp->dev->real_num_tx_queues > 1)
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
/* Firmware older than 1.4.32 only supports multiple
* RX queues, so if we get an error, first retry using a
* single TX queue before giving up */
if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
netif_set_real_num_tx_queues(mgp->dev, 1);
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
status = myri10ge_send_cmd(mgp,
MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
}
if (status != 0) {
dev_err(&mgp->pdev->dev,
"failed to set number of slices\n");
return status;
}
}
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
cmd.data2 = i;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
&cmd, 0);
};
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
ss->irq_claim =
(__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
}
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
&cmd, 0);
mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
status |= myri10ge_send_cmd
(mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
return status;
}
put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
#ifdef CONFIG_MYRI10GE_DCA
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
dca_tag_off = cmd.data0;
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
if (status == 0) {
ss->dca_tag = (__iomem __be32 *)
(mgp->sram + dca_tag_off + 4 * i);
} else {
ss->dca_tag = NULL;
}
}
#endif /* CONFIG_MYRI10GE_DCA */
/* reset mcp/driver shared state back to 0 */
mgp->link_changes = 0;
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
memset(ss->rx_done.entry, 0, bytes);
ss->tx.req = 0;
ss->tx.done = 0;
ss->tx.pkt_start = 0;
ss->tx.pkt_done = 0;
ss->rx_big.cnt = 0;
ss->rx_small.cnt = 0;
ss->rx_done.idx = 0;
ss->rx_done.cnt = 0;
ss->tx.wake_queue = 0;
ss->tx.stop_queue = 0;
}
status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
myri10ge_change_pause(mgp, mgp->pause);
myri10ge_set_multicast_list(mgp->dev);
return status;
}
#ifdef CONFIG_MYRI10GE_DCA
static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
{
int ret, cap, err;
u16 ctl;
cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!cap)
return 0;
err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
if (ret != on) {
ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
ctl |= (on << 4);
pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
}
return ret;
}
static void
myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
{
ss->cached_dca_tag = tag;
put_be32(htonl(tag), ss->dca_tag);
}
static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
{
int cpu = get_cpu();
int tag;
if (cpu != ss->cpu) {
tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
if (ss->cached_dca_tag != tag)
myri10ge_write_dca(ss, cpu, tag);
ss->cpu = cpu;
}
put_cpu();
}
static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
{
int err, i;
struct pci_dev *pdev = mgp->pdev;
if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
return;
if (!myri10ge_dca) {
dev_err(&pdev->dev, "dca disabled by administrator\n");
return;
}
err = dca_add_requester(&pdev->dev);
if (err) {
if (err != -ENODEV)
dev_err(&pdev->dev,
"dca_add_requester() failed, err=%d\n", err);
return;
}
mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
mgp->dca_enabled = 1;
for (i = 0; i < mgp->num_slices; i++) {
mgp->ss[i].cpu = -1;
mgp->ss[i].cached_dca_tag = -1;
myri10ge_update_dca(&mgp->ss[i]);
}
}
static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int err;
if (!mgp->dca_enabled)
return;
mgp->dca_enabled = 0;
if (mgp->relaxed_order)
myri10ge_toggle_relaxed(pdev, 1);
err = dca_remove_requester(&pdev->dev);
}
static int myri10ge_notify_dca_device(struct device *dev, void *data)
{
struct myri10ge_priv *mgp;
unsigned long event;
mgp = dev_get_drvdata(dev);
event = *(unsigned long *)data;
if (event == DCA_PROVIDER_ADD)
myri10ge_setup_dca(mgp);
else if (event == DCA_PROVIDER_REMOVE)
myri10ge_teardown_dca(mgp);
return 0;
}
#endif /* CONFIG_MYRI10GE_DCA */
static inline void
myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
struct mcp_kreq_ether_recv *src)
{
__be32 low;
low = src->addr_low;
src->addr_low = htonl(DMA_BIT_MASK(32));
myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
mb();
myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
mb();
src->addr_low = low;
put_be32(low, &dst->addr_low);
mb();
}
static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
{
struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
if ((skb->protocol == htons(ETH_P_8021Q)) &&
(vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
skb->csum = hw_csum;
skb->ip_summed = CHECKSUM_COMPLETE;
}
}
static inline void
myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
struct skb_frag_struct *rx_frags, int len, int hlen)
{
struct skb_frag_struct *skb_frags;
skb->len = skb->data_len = len;
skb->truesize = len + sizeof(struct sk_buff);
/* attach the page(s) */
skb_frags = skb_shinfo(skb)->frags;
while (len > 0) {
memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
len -= rx_frags->size;
skb_frags++;
rx_frags++;
skb_shinfo(skb)->nr_frags++;
}
/* pskb_may_pull is not available in irq context, but
* skb_pull() (for ether_pad and eth_type_trans()) requires
* the beginning of the packet in skb_headlen(), move it
* manually */
skb_copy_to_linear_data(skb, va, hlen);
skb_shinfo(skb)->frags[0].page_offset += hlen;
skb_shinfo(skb)->frags[0].size -= hlen;
skb->data_len -= hlen;
skb->tail += hlen;
skb_pull(skb, MXGEFW_PAD);
}
static void
myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
int bytes, int watchdog)
{
struct page *page;
int idx;
#if MYRI10GE_ALLOC_SIZE > 4096
int end_offset;
#endif
if (unlikely(rx->watchdog_needed && !watchdog))
return;
/* try to refill entire ring */
while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
idx = rx->fill_cnt & rx->mask;
if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
/* we can use part of previous page */
get_page(rx->page);
} else {
/* we need a new page */
page =
alloc_pages(GFP_ATOMIC | __GFP_COMP,
MYRI10GE_ALLOC_ORDER);
if (unlikely(page == NULL)) {
if (rx->fill_cnt - rx->cnt < 16)
rx->watchdog_needed = 1;
return;
}
rx->page = page;
rx->page_offset = 0;
rx->bus = pci_map_page(mgp->pdev, page, 0,
MYRI10GE_ALLOC_SIZE,
PCI_DMA_FROMDEVICE);
}
rx->info[idx].page = rx->page;
rx->info[idx].page_offset = rx->page_offset;
/* note that this is the address of the start of the
* page */
dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
rx->shadow[idx].addr_low =
htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
rx->shadow[idx].addr_high =
htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
/* start next packet on a cacheline boundary */
rx->page_offset += SKB_DATA_ALIGN(bytes);
#if MYRI10GE_ALLOC_SIZE > 4096
/* don't cross a 4KB boundary */
end_offset = rx->page_offset + bytes - 1;
if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
rx->page_offset = end_offset & ~4095;
#endif
rx->fill_cnt++;
/* copy 8 descriptors to the firmware at a time */
if ((idx & 7) == 7) {
myri10ge_submit_8rx(&rx->lanai[idx - 7],
&rx->shadow[idx - 7]);
}
}
}
static inline void
myri10ge_unmap_rx_page(struct pci_dev *pdev,
struct myri10ge_rx_buffer_state *info, int bytes)
{
/* unmap the recvd page if we're the only or last user of it */
if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
(info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
& ~(MYRI10GE_ALLOC_SIZE - 1)),
MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
}
}
#define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
* page into an skb */
static inline int
myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
int lro_enabled)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
struct myri10ge_rx_buf *rx;
int i, idx, hlen, remainder, bytes;
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
if (len <= mgp->small_bytes) {
rx = &ss->rx_small;
bytes = mgp->small_bytes;
} else {
rx = &ss->rx_big;
bytes = mgp->big_bytes;
}
len += MXGEFW_PAD;
idx = rx->cnt & rx->mask;
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
prefetch(va);
/* Fill skb_frag_struct(s) with data from our receive */
for (i = 0, remainder = len; remainder > 0; i++) {
myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
rx_frags[i].page = rx->info[idx].page;
rx_frags[i].page_offset = rx->info[idx].page_offset;
if (remainder < MYRI10GE_ALLOC_SIZE)
rx_frags[i].size = remainder;
else
rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
rx->cnt++;
idx = rx->cnt & rx->mask;
remainder -= MYRI10GE_ALLOC_SIZE;
}
if (lro_enabled) {
rx_frags[0].page_offset += MXGEFW_PAD;
rx_frags[0].size -= MXGEFW_PAD;
len -= MXGEFW_PAD;
lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
/* opaque, will come back in get_frag_header */
len, len,
(void *)(__force unsigned long)csum, csum);
return 1;
}
hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
/* allocate an skb to attach the page(s) to. This is done
* after trying LRO, so as to avoid skb allocation overheads */
skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
if (unlikely(skb == NULL)) {
ss->stats.rx_dropped++;
do {
i--;
put_page(rx_frags[i].page);
} while (i != 0);
return 0;
}
/* Attach the pages to the skb, and trim off any padding */
myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
if (skb_shinfo(skb)->frags[0].size <= 0) {
put_page(skb_shinfo(skb)->frags[0].page);
skb_shinfo(skb)->nr_frags = 0;
}
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
if (dev->features & NETIF_F_RXCSUM) {
if ((skb->protocol == htons(ETH_P_IP)) ||
(skb->protocol == htons(ETH_P_IPV6))) {
skb->csum = csum;
skb->ip_summed = CHECKSUM_COMPLETE;
} else
myri10ge_vlan_ip_csum(skb, csum);
}
netif_receive_skb(skb);
return 1;
}
static inline void
myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
{
struct pci_dev *pdev = ss->mgp->pdev;
struct myri10ge_tx_buf *tx = &ss->tx;
struct netdev_queue *dev_queue;
struct sk_buff *skb;
int idx, len;
while (tx->pkt_done != mcp_index) {
idx = tx->done & tx->mask;
skb = tx->info[idx].skb;
/* Mark as free */
tx->info[idx].skb = NULL;
if (tx->info[idx].last) {
tx->pkt_done++;
tx->info[idx].last = 0;
}
tx->done++;
len = dma_unmap_len(&tx->info[idx], len);
dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_bytes += skb->len;
ss->stats.tx_packets++;
dev_kfree_skb_irq(skb);
if (len)
pci_unmap_single(pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
}
dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
/*
* Make a minimal effort to prevent the NIC from polling an
* idle tx queue. If we can't get the lock we leave the queue
* active. In this case, either a thread was about to start
* using the queue anyway, or we lost a race and the NIC will
* waste some of its resources polling an inactive queue for a
* while.
*/
if ((ss->mgp->dev->real_num_tx_queues > 1) &&
__netif_tx_trylock(dev_queue)) {
if (tx->req == tx->done) {
tx->queue_active = 0;
put_be32(htonl(1), tx->send_stop);
mb();
mmiowb();
}
__netif_tx_unlock(dev_queue);
}
/* start the queue if we've stopped it */
if (netif_tx_queue_stopped(dev_queue) &&
tx->req - tx->done < (tx->mask >> 1)) {
tx->wake_queue++;
netif_tx_wake_queue(dev_queue);
}
}
static inline int
myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
{
struct myri10ge_rx_done *rx_done = &ss->rx_done;
struct myri10ge_priv *mgp = ss->mgp;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long rx_ok;
int idx = rx_done->idx;
int cnt = rx_done->cnt;
int work_done = 0;
u16 length;
__wsum checksum;
/*
* Prevent compiler from generating more than one ->features memory
* access to avoid theoretical race condition with functions that
* change NETIF_F_LRO flag at runtime.
*/
bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
rx_done->entry[idx].length = 0;
checksum = csum_unfold(rx_done->entry[idx].checksum);
rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
rx_packets += rx_ok;
rx_bytes += rx_ok * (unsigned long)length;
cnt++;
idx = cnt & (mgp->max_intr_slots - 1);
work_done++;
}
rx_done->idx = idx;
rx_done->cnt = cnt;
ss->stats.rx_packets += rx_packets;
ss->stats.rx_bytes += rx_bytes;
if (lro_enabled)
lro_flush_all(&rx_done->lro_mgr);
/* restock receive rings if needed */
if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
return work_done;
}
static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
{
struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
if (unlikely(stats->stats_updated)) {
unsigned link_up = ntohl(stats->link_up);
if (mgp->link_state != link_up) {
mgp->link_state = link_up;
if (mgp->link_state == MXGEFW_LINK_UP) {
if (netif_msg_link(mgp))
netdev_info(mgp->dev, "link up\n");
netif_carrier_on(mgp->dev);
mgp->link_changes++;
} else {
if (netif_msg_link(mgp))
netdev_info(mgp->dev, "link %s\n",
link_up == MXGEFW_LINK_MYRINET ?
"mismatch (Myrinet detected)" :
"down");
netif_carrier_off(mgp->dev);
mgp->link_changes++;
}
}
if (mgp->rdma_tags_available !=
ntohl(stats->rdma_tags_available)) {
mgp->rdma_tags_available =
ntohl(stats->rdma_tags_available);
netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
mgp->rdma_tags_available);
}
mgp->down_cnt += stats->link_down;
if (stats->link_down)
wake_up(&mgp->down_wq);
}
}
static int myri10ge_poll(struct napi_struct *napi, int budget)
{
struct myri10ge_slice_state *ss =
container_of(napi, struct myri10ge_slice_state, napi);
int work_done;
#ifdef CONFIG_MYRI10GE_DCA
if (ss->mgp->dca_enabled)
myri10ge_update_dca(ss);
#endif
/* process as many rx events as NAPI will allow */
work_done = myri10ge_clean_rx_done(ss, budget);
if (work_done < budget) {
napi_complete(napi);
put_be32(htonl(3), ss->irq_claim);
}
return work_done;
}
static irqreturn_t myri10ge_intr(int irq, void *arg)
{
struct myri10ge_slice_state *ss = arg;
struct myri10ge_priv *mgp = ss->mgp;
struct mcp_irq_data *stats = ss->fw_stats;
struct myri10ge_tx_buf *tx = &ss->tx;
u32 send_done_count;
int i;
/* an interrupt on a non-zero receive-only slice is implicitly
* valid since MSI-X irqs are not shared */
if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
napi_schedule(&ss->napi);
return IRQ_HANDLED;
}
/* make sure it is our IRQ, and that the DMA has finished */
if (unlikely(!stats->valid))
return IRQ_NONE;
/* low bit indicates receives are present, so schedule
* napi poll handler */
if (stats->valid & 1)
napi_schedule(&ss->napi);
if (!mgp->msi_enabled && !mgp->msix_enabled) {
put_be32(0, mgp->irq_deassert);
if (!myri10ge_deassert_wait)
stats->valid = 0;
mb();
} else
stats->valid = 0;
/* Wait for IRQ line to go low, if using INTx */
i = 0;
while (1) {
i++;
/* check for transmit completes and receives */
send_done_count = ntohl(stats->send_done_count);
if (send_done_count != tx->pkt_done)
myri10ge_tx_done(ss, (int)send_done_count);
if (unlikely(i > myri10ge_max_irq_loops)) {
netdev_err(mgp->dev, "irq stuck?\n");
stats->valid = 0;
schedule_work(&mgp->watchdog_work);
}
if (likely(stats->valid == 0))
break;
cpu_relax();
barrier();
}
/* Only slice 0 updates stats */
if (ss == mgp->ss)
myri10ge_check_statblock(mgp);
put_be32(htonl(3), ss->irq_claim + 1);
return IRQ_HANDLED;
}
static int
myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
char *ptr;
int i;
cmd->autoneg = AUTONEG_DISABLE;
ethtool_cmd_speed_set(cmd, SPEED_10000);
cmd->duplex = DUPLEX_FULL;
/*
* parse the product code to deterimine the interface type
* (CX4, XFP, Quad Ribbon Fiber) by looking at the character
* after the 3rd dash in the driver's cached copy of the
* EEPROM's product code string.
*/
ptr = mgp->product_code_string;
if (ptr == NULL) {
netdev_err(netdev, "Missing product code\n");
return 0;
}
for (i = 0; i < 3; i++, ptr++) {
ptr = strchr(ptr, '-');
if (ptr == NULL) {
netdev_err(netdev, "Invalid product code %s\n",
mgp->product_code_string);
return 0;
}
}
if (*ptr == '2')
ptr++;
if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
/* We've found either an XFP, quad ribbon fiber, or SFP+ */
cmd->port = PORT_FIBRE;
cmd->supported |= SUPPORTED_FIBRE;
cmd->advertising |= ADVERTISED_FIBRE;
} else {
cmd->port = PORT_OTHER;
}
if (*ptr == 'R' || *ptr == 'S')
cmd->transceiver = XCVR_EXTERNAL;
else
cmd->transceiver = XCVR_INTERNAL;
return 0;
}
static void
myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
strlcpy(info->driver, "myri10ge", sizeof(info->driver));
strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
}
static int
myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
coal->rx_coalesce_usecs = mgp->intr_coal_delay;
return 0;
}
static int
myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
mgp->intr_coal_delay = coal->rx_coalesce_usecs;
put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
return 0;
}
static void
myri10ge_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
pause->autoneg = 0;
pause->rx_pause = mgp->pause;
pause->tx_pause = mgp->pause;
}
static int
myri10ge_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
if (pause->tx_pause != mgp->pause)
return myri10ge_change_pause(mgp, pause->tx_pause);
if (pause->rx_pause != mgp->pause)
return myri10ge_change_pause(mgp, pause->rx_pause);
if (pause->autoneg != 0)
return -EINVAL;
return 0;
}
static void
myri10ge_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
ring->rx_jumbo_max_pending = 0;
ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
ring->rx_mini_pending = ring->rx_mini_max_pending;
ring->rx_pending = ring->rx_max_pending;
ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
ring->tx_pending = ring->tx_max_pending;
}
static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
"rx_length_errors", "rx_over_errors", "rx_crc_errors",
"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
"tx_heartbeat_errors", "tx_window_errors",
/* device-specific stats */
"tx_boundary", "WC", "irq", "MSI", "MSIX",
"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
"serial_number", "watchdog_resets",
#ifdef CONFIG_MYRI10GE_DCA
"dca_capable_firmware", "dca_device_present",
#endif
"link_changes", "link_up", "dropped_link_overflow",
"dropped_link_error_or_filtered",
"dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
"dropped_unicast_filtered", "dropped_multicast_filtered",
"dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
"dropped_no_big_buffer"
};
static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
"----------- slice ---------",
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "tx_linearized", "LRO aggregated",
"LRO flushed",
"LRO avg aggr", "LRO no_desc"
};
#define MYRI10GE_NET_STATS_LEN 21
#define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
#define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
static void
myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
int i;
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, *myri10ge_gstrings_main_stats,
sizeof(myri10ge_gstrings_main_stats));
data += sizeof(myri10ge_gstrings_main_stats);
for (i = 0; i < mgp->num_slices; i++) {
memcpy(data, *myri10ge_gstrings_slice_stats,
sizeof(myri10ge_gstrings_slice_stats));
data += sizeof(myri10ge_gstrings_slice_stats);
}
break;
}
}
static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
switch (sset) {
case ETH_SS_STATS:
return MYRI10GE_MAIN_STATS_LEN +
mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}
static void
myri10ge_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 * data)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
struct myri10ge_slice_state *ss;
int slice;
int i;
/* force stats update */
(void)myri10ge_get_stats(netdev);
for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
data[i] = ((unsigned long *)&netdev->stats)[i];
data[i++] = (unsigned int)mgp->tx_boundary;
data[i++] = (unsigned int)mgp->wc_enabled;
data[i++] = (unsigned int)mgp->pdev->irq;
data[i++] = (unsigned int)mgp->msi_enabled;
data[i++] = (unsigned int)mgp->msix_enabled;
data[i++] = (unsigned int)mgp->read_dma;
data[i++] = (unsigned int)mgp->write_dma;
data[i++] = (unsigned int)mgp->read_write_dma;
data[i++] = (unsigned int)mgp->serial_number;
data[i++] = (unsigned int)mgp->watchdog_resets;
#ifdef CONFIG_MYRI10GE_DCA
data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
data[i++] = (unsigned int)(mgp->dca_enabled);
#endif
data[i++] = (unsigned int)mgp->link_changes;
/* firmware stats are useful only in the first slice */
ss = &mgp->ss[0];
data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
data[i++] =
(unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
data[i++] =
(unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
for (slice = 0; slice < mgp->num_slices; slice++) {
ss = &mgp->ss[slice];
data[i++] = slice;
data[i++] = (unsigned int)ss->tx.pkt_start;
data[i++] = (unsigned int)ss->tx.pkt_done;
data[i++] = (unsigned int)ss->tx.req;
data[i++] = (unsigned int)ss->tx.done;
data[i++] = (unsigned int)ss->rx_small.cnt;
data[i++] = (unsigned int)ss->rx_big.cnt;
data[i++] = (unsigned int)ss->tx.wake_queue;
data[i++] = (unsigned int)ss->tx.stop_queue;
data[i++] = (unsigned int)ss->tx.linearized;
data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
data[i++] = ss->rx_done.lro_mgr.stats.flushed;
if (ss->rx_done.lro_mgr.stats.flushed)
data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
ss->rx_done.lro_mgr.stats.flushed;
else
data[i++] = 0;
data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
}
}
static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
mgp->msg_enable = value;
}
static u32 myri10ge_get_msglevel(struct net_device *netdev)
{
struct myri10ge_priv *mgp = netdev_priv(netdev);
return mgp->msg_enable;
}
static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_settings = myri10ge_get_settings,
.get_drvinfo = myri10ge_get_drvinfo,
.get_coalesce = myri10ge_get_coalesce,
.set_coalesce = myri10ge_set_coalesce,
.get_pauseparam = myri10ge_get_pauseparam,
.set_pauseparam = myri10ge_set_pauseparam,
.get_ringparam = myri10ge_get_ringparam,
.get_link = ethtool_op_get_link,
.get_strings = myri10ge_get_strings,
.get_sset_count = myri10ge_get_sset_count,
.get_ethtool_stats = myri10ge_get_ethtool_stats,
.set_msglevel = myri10ge_set_msglevel,
.get_msglevel = myri10ge_get_msglevel,
};
static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
{
struct myri10ge_priv *mgp = ss->mgp;
struct myri10ge_cmd cmd;
struct net_device *dev = mgp->dev;
int tx_ring_size, rx_ring_size;
int tx_ring_entries, rx_ring_entries;
int i, slice, status;
size_t bytes;
/* get ring sizes */
slice = ss - mgp->ss;
cmd.data0 = slice;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
tx_ring_size = cmd.data0;
cmd.data0 = slice;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
if (status != 0)
return status;
rx_ring_size = cmd.data0;
tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
ss->tx.mask = tx_ring_entries - 1;
ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
status = -ENOMEM;
/* allocate the host shadow rings */
bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
* sizeof(*ss->tx.req_list);
ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
if (ss->tx.req_bytes == NULL)
goto abort_with_nothing;
/* ensure req_list entries are aligned to 8 bytes */
ss->tx.req_list = (struct mcp_kreq_ether_send *)
ALIGN((unsigned long)ss->tx.req_bytes, 8);
ss->tx.queue_active = 0;
bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_small.shadow == NULL)
goto abort_with_tx_req_bytes;
bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_big.shadow == NULL)
goto abort_with_rx_small_shadow;
/* allocate the host info rings */
bytes = tx_ring_entries * sizeof(*ss->tx.info);
ss->tx.info = kzalloc(bytes, GFP_KERNEL);
if (ss->tx.info == NULL)
goto abort_with_rx_big_shadow;
bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_small.info == NULL)
goto abort_with_tx_info;
bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
if (ss->rx_big.info == NULL)
goto abort_with_rx_small_info;
/* Fill the receive rings */
ss->rx_big.cnt = 0;
ss->rx_small.cnt = 0;
ss->rx_big.fill_cnt = 0;
ss->rx_small.fill_cnt = 0;
ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
ss->rx_small.watchdog_needed = 0;
ss->rx_big.watchdog_needed = 0;
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD, 0);
if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
slice, ss->rx_small.fill_cnt);
goto abort_with_rx_small_ring;
}
myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
slice, ss->rx_big.fill_cnt);
goto abort_with_rx_big_ring;
}
return 0;
abort_with_rx_big_ring:
for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
int idx = i & ss->rx_big.mask;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
mgp->big_bytes);
put_page(ss->rx_big.info[idx].page);
}
abort_with_rx_small_ring:
for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
int idx = i & ss->rx_small.mask;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
mgp->small_bytes + MXGEFW_PAD);
put_page(ss->rx_small.info[idx].page);
}
kfree(ss->rx_big.info);
abort_with_rx_small_info:
kfree(ss->rx_small.info);
abort_with_tx_info:
kfree(ss->tx.info);
abort_with_rx_big_shadow:
kfree(ss->rx_big.shadow);
abort_with_rx_small_shadow:
kfree(ss->rx_small.shadow);
abort_with_tx_req_bytes:
kfree(ss->tx.req_bytes);
ss->tx.req_bytes = NULL;
ss->tx.req_list = NULL;
abort_with_nothing:
return status;
}
static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
{
struct myri10ge_priv *mgp = ss->mgp;
struct sk_buff *skb;
struct myri10ge_tx_buf *tx;
int i, len, idx;
/* If not allocated, skip it */
if (ss->tx.req_list == NULL)
return;
for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
idx = i & ss->rx_big.mask;
if (i == ss->rx_big.fill_cnt - 1)
ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
mgp->big_bytes);
put_page(ss->rx_big.info[idx].page);
}
for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
idx = i & ss->rx_small.mask;
if (i == ss->rx_small.fill_cnt - 1)
ss->rx_small.info[idx].page_offset =
MYRI10GE_ALLOC_SIZE;
myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
mgp->small_bytes + MXGEFW_PAD);
put_page(ss->rx_small.info[idx].page);
}
tx = &ss->tx;
while (tx->done != tx->req) {
idx = tx->done & tx->mask;
skb = tx->info[idx].skb;
/* Mark as free */
tx->info[idx].skb = NULL;
tx->done++;
len = dma_unmap_len(&tx->info[idx], len);
dma_unmap_len_set(&tx->info[idx], len, 0);
if (skb) {
ss->stats.tx_dropped++;
dev_kfree_skb_any(skb);
if (len)
pci_unmap_single(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
} else {
if (len)
pci_unmap_page(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
}
}
kfree(ss->rx_big.info);
kfree(ss->rx_small.info);
kfree(ss->tx.info);
kfree(ss->rx_big.shadow);
kfree(ss->rx_small.shadow);
kfree(ss->tx.req_bytes);
ss->tx.req_bytes = NULL;
ss->tx.req_list = NULL;
}
static int myri10ge_request_irq(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
struct myri10ge_slice_state *ss;
struct net_device *netdev = mgp->dev;
int i;
int status;
mgp->msi_enabled = 0;
mgp->msix_enabled = 0;
status = 0;
if (myri10ge_msi) {
if (mgp->num_slices > 1) {
status =
pci_enable_msix(pdev, mgp->msix_vectors,
mgp->num_slices);
if (status == 0) {
mgp->msix_enabled = 1;
} else {
dev_err(&pdev->dev,
"Error %d setting up MSI-X\n", status);
return status;
}
}
if (mgp->msix_enabled == 0) {
status = pci_enable_msi(pdev);
if (status != 0) {
dev_err(&pdev->dev,
"Error %d setting up MSI; falling back to xPIC\n",
status);
} else {
mgp->msi_enabled = 1;
}
}
}
if (mgp->msix_enabled) {
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
snprintf(ss->irq_desc, sizeof(ss->irq_desc),
"%s:slice-%d", netdev->name, i);
status = request_irq(mgp->msix_vectors[i].vector,
myri10ge_intr, 0, ss->irq_desc,
ss);
if (status != 0) {
dev_err(&pdev->dev,
"slice %d failed to allocate IRQ\n", i);
i--;
while (i >= 0) {
free_irq(mgp->msix_vectors[i].vector,
&mgp->ss[i]);
i--;
}
pci_disable_msix(pdev);
return status;
}
}
} else {
status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
mgp->dev->name, &mgp->ss[0]);
if (status != 0) {
dev_err(&pdev->dev, "failed to allocate IRQ\n");
if (mgp->msi_enabled)
pci_disable_msi(pdev);
}
}
return status;
}
static void myri10ge_free_irq(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int i;
if (mgp->msix_enabled) {
for (i = 0; i < mgp->num_slices; i++)
free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
} else {
free_irq(pdev->irq, &mgp->ss[0]);
}
if (mgp->msi_enabled)
pci_disable_msi(pdev);
if (mgp->msix_enabled)
pci_disable_msix(pdev);
}
static int
myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
void **ip_hdr, void **tcpudp_hdr,
u64 * hdr_flags, void *priv)
{
struct ethhdr *eh;
struct vlan_ethhdr *veh;
struct iphdr *iph;
u8 *va = page_address(frag->page) + frag->page_offset;
unsigned long ll_hlen;
/* passed opaque through lro_receive_frags() */
__wsum csum = (__force __wsum) (unsigned long)priv;
/* find the mac header, aborting if not IPv4 */
eh = (struct ethhdr *)va;
*mac_hdr = eh;
ll_hlen = ETH_HLEN;
if (eh->h_proto != htons(ETH_P_IP)) {
if (eh->h_proto == htons(ETH_P_8021Q)) {
veh = (struct vlan_ethhdr *)va;
if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
return -1;
ll_hlen += VLAN_HLEN;
/*
* HW checksum starts ETH_HLEN bytes into
* frame, so we must subtract off the VLAN
* header's checksum before csum can be used
*/
csum = csum_sub(csum, csum_partial(va + ETH_HLEN,
VLAN_HLEN, 0));
} else {
return -1;
}
}
*hdr_flags = LRO_IPV4;
iph = (struct iphdr *)(va + ll_hlen);
*ip_hdr = iph;
if (iph->protocol != IPPROTO_TCP)
return -1;
if (iph->frag_off & htons(IP_MF | IP_OFFSET))
return -1;
*hdr_flags |= LRO_TCP;
*tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
/* verify the IP checksum */
if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl)))
return -1;
/* verify the checksum */
if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr,
ntohs(iph->tot_len) - (iph->ihl << 2),
IPPROTO_TCP, csum)))
return -1;
return 0;
}
static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
{
struct myri10ge_cmd cmd;
struct myri10ge_slice_state *ss;
int status;
ss = &mgp->ss[slice];
status = 0;
if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
cmd.data0 = slice;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
&cmd, 0);
ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
(mgp->sram + cmd.data0);
}
cmd.data0 = slice;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
&cmd, 0);
ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
(mgp->sram + cmd.data0);
cmd.data0 = slice;
status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
(mgp->sram + cmd.data0);
ss->tx.send_go = (__iomem __be32 *)
(mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
ss->tx.send_stop = (__iomem __be32 *)
(mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
return status;
}
static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
{
struct myri10ge_cmd cmd;
struct myri10ge_slice_state *ss;
int status;
ss = &mgp->ss[slice];
cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
if (status == -ENOSYS) {
dma_addr_t bus = ss->fw_stats_bus;
if (slice != 0)
return -EINVAL;
bus += offsetof(struct mcp_irq_data, send_done_count);
cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
status = myri10ge_send_cmd(mgp,
MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
&cmd, 0);
/* Firmware cannot support multicast without STATS_DMA_V2 */
mgp->fw_multicast_support = 0;
} else {
mgp->fw_multicast_support = 1;
}
return 0;
}
static int myri10ge_open(struct net_device *dev)
{
struct myri10ge_slice_state *ss;
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
int i, status, big_pow2, slice;
u8 *itable;
struct net_lro_mgr *lro_mgr;
if (mgp->running != MYRI10GE_ETH_STOPPED)
return -EBUSY;
mgp->running = MYRI10GE_ETH_STARTING;
status = myri10ge_reset(mgp);
if (status != 0) {
netdev_err(dev, "failed reset\n");
goto abort_with_nothing;
}
if (mgp->num_slices > 1) {
cmd.data0 = mgp->num_slices;
cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
if (mgp->dev->real_num_tx_queues > 1)
cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
if (status != 0) {
netdev_err(dev, "failed to set number of slices\n");
goto abort_with_nothing;
}
/* setup the indirection table */
cmd.data0 = mgp->num_slices;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
&cmd, 0);
status |= myri10ge_send_cmd(mgp,
MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
&cmd, 0);
if (status != 0) {
netdev_err(dev, "failed to setup rss tables\n");
goto abort_with_nothing;
}
/* just enable an identity mapping */
itable = mgp->sram + cmd.data0;
for (i = 0; i < mgp->num_slices; i++)
__raw_writeb(i, &itable[i]);
cmd.data0 = 1;
cmd.data1 = myri10ge_rss_hash;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
&cmd, 0);
if (status != 0) {
netdev_err(dev, "failed to enable slices\n");
goto abort_with_nothing;
}
}
status = myri10ge_request_irq(mgp);
if (status != 0)
goto abort_with_nothing;
/* decide what small buffer size to use. For good TCP rx
* performance, it is important to not receive 1514 byte
* frames into jumbo buffers, as it confuses the socket buffer
* accounting code, leading to drops and erratic performance.
*/
if (dev->mtu <= ETH_DATA_LEN)
/* enough for a TCP header */
mgp->small_bytes = (128 > SMP_CACHE_BYTES)
? (128 - MXGEFW_PAD)
: (SMP_CACHE_BYTES - MXGEFW_PAD);
else
/* enough for a vlan encapsulated ETH_DATA_LEN frame */
mgp->small_bytes = VLAN_ETH_FRAME_LEN;
/* Override the small buffer size? */
if (myri10ge_small_bytes > 0)
mgp->small_bytes = myri10ge_small_bytes;
/* Firmware needs the big buff size as a power of 2. Lie and
* tell him the buffer is larger, because we only use 1
* buffer/pkt, and the mtu will prevent overruns.
*/
big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
while (!is_power_of_2(big_pow2))
big_pow2++;
mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
} else {
big_pow2 = MYRI10GE_ALLOC_SIZE;
mgp->big_bytes = big_pow2;
}
/* setup the per-slice data structures */
for (slice = 0; slice < mgp->num_slices; slice++) {
ss = &mgp->ss[slice];
status = myri10ge_get_txrx(mgp, slice);
if (status != 0) {
netdev_err(dev, "failed to get ring sizes or locations\n");
goto abort_with_rings;
}
status = myri10ge_allocate_rings(ss);
if (status != 0)
goto abort_with_rings;
/* only firmware which supports multiple TX queues
* supports setting up the tx stats on non-zero
* slices */
if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
status = myri10ge_set_stats(mgp, slice);
if (status) {
netdev_err(dev, "Couldn't set stats DMA\n");
goto abort_with_rings;
}
lro_mgr = &ss->rx_done.lro_mgr;
lro_mgr->dev = dev;
lro_mgr->features = LRO_F_NAPI;
lro_mgr->ip_summed = CHECKSUM_COMPLETE;
lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
lro_mgr->lro_arr = ss->rx_done.lro_desc;
lro_mgr->get_frag_header = myri10ge_get_frag_header;
lro_mgr->max_aggr = myri10ge_lro_max_pkts;
lro_mgr->frag_align_pad = 2;
if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
lro_mgr->max_aggr = MAX_SKB_FRAGS;
/* must happen prior to any irq */
napi_enable(&(ss)->napi);
}
/* now give firmware buffers sizes, and MTU */
cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
cmd.data0 = mgp->small_bytes;
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
cmd.data0 = big_pow2;
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
if (status) {
netdev_err(dev, "Couldn't set buffer sizes\n");
goto abort_with_rings;
}
/*
* Set Linux style TSO mode; this is needed only on newer
* firmware versions. Older versions default to Linux
* style TSO
*/
cmd.data0 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
if (status && status != -ENOSYS) {
netdev_err(dev, "Couldn't set TSO mode\n");
goto abort_with_rings;
}
mgp->link_state = ~0U;
mgp->rdma_tags_available = 15;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
if (status) {
netdev_err(dev, "Couldn't bring up link\n");
goto abort_with_rings;
}
mgp->running = MYRI10GE_ETH_RUNNING;
mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
add_timer(&mgp->watchdog_timer);
netif_tx_wake_all_queues(dev);
return 0;
abort_with_rings:
while (slice) {
slice--;
napi_disable(&mgp->ss[slice].napi);
}
for (i = 0; i < mgp->num_slices; i++)
myri10ge_free_rings(&mgp->ss[i]);
myri10ge_free_irq(mgp);
abort_with_nothing:
mgp->running = MYRI10GE_ETH_STOPPED;
return -ENOMEM;
}
static int myri10ge_close(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
int status, old_down_cnt;
int i;
if (mgp->running != MYRI10GE_ETH_RUNNING)
return 0;
if (mgp->ss[0].tx.req_bytes == NULL)
return 0;
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
for (i = 0; i < mgp->num_slices; i++) {
napi_disable(&mgp->ss[i].napi);
}
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
if (mgp->rebooted == 0) {
old_down_cnt = mgp->down_cnt;
mb();
status =
myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
if (status)
netdev_err(dev, "Couldn't bring down link\n");
wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
HZ);
if (old_down_cnt == mgp->down_cnt)
netdev_err(dev, "never got down irq\n");
}
netif_tx_disable(dev);
myri10ge_free_irq(mgp);
for (i = 0; i < mgp->num_slices; i++)
myri10ge_free_rings(&mgp->ss[i]);
mgp->running = MYRI10GE_ETH_STOPPED;
return 0;
}
/* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
* backwards one at a time and handle ring wraps */
static inline void
myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
struct mcp_kreq_ether_send *src, int cnt)
{
int idx, starting_slot;
starting_slot = tx->req;
while (cnt > 1) {
cnt--;
idx = (starting_slot + cnt) & tx->mask;
myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
mb();
}
}
/*
* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
* at most 32 bytes at a time, so as to avoid involving the software
* pio handler in the nic. We re-write the first segment's flags
* to mark them valid only after writing the entire chain.
*/
static inline void
myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
int cnt)
{
int idx, i;
struct mcp_kreq_ether_send __iomem *dstp, *dst;
struct mcp_kreq_ether_send *srcp;
u8 last_flags;
idx = tx->req & tx->mask;
last_flags = src->flags;
src->flags = 0;
mb();
dst = dstp = &tx->lanai[idx];
srcp = src;
if ((idx + cnt) < tx->mask) {
for (i = 0; i < (cnt - 1); i += 2) {
myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
mb(); /* force write every 32 bytes */
srcp += 2;
dstp += 2;
}
} else {
/* submit all but the first request, and ensure
* that it is submitted below */
myri10ge_submit_req_backwards(tx, src, cnt);
i = 0;
}
if (i < cnt) {
/* submit the first request */
myri10ge_pio_copy(dstp, srcp, sizeof(*src));
mb(); /* barrier before setting valid flag */
}
/* re-write the last 32-bits with the valid flags */
src->flags = last_flags;
put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
tx->req += cnt;
mb();
}
/*
* Transmit a packet. We need to split the packet so that a single
* segment does not cross myri10ge->tx_boundary, so this makes segment
* counting tricky. So rather than try to count segments up front, we
* just give up if there are too few segments to hold a reasonably
* fragmented packet currently available. If we run
* out of segments while preparing a packet for DMA, we just linearize
* it and try again.
*/
static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss;
struct mcp_kreq_ether_send *req;
struct myri10ge_tx_buf *tx;
struct skb_frag_struct *frag;
struct netdev_queue *netdev_queue;
dma_addr_t bus;
u32 low;
__be32 high_swapped;
unsigned int len;
int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
u16 pseudo_hdr_offset, cksum_offset, queue;
int cum_len, seglen, boundary, rdma_count;
u8 flags, odd_flag;
queue = skb_get_queue_mapping(skb);
ss = &mgp->ss[queue];
netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
tx = &ss->tx;
again:
req = tx->req_list;
avail = tx->mask - 1 - (tx->req - tx->done);
mss = 0;
max_segments = MXGEFW_MAX_SEND_DESC;
if (skb_is_gso(skb)) {
mss = skb_shinfo(skb)->gso_size;
max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
}
if ((unlikely(avail < max_segments))) {
/* we are out of transmit resources */
tx->stop_queue++;
netif_tx_stop_queue(netdev_queue);
return NETDEV_TX_BUSY;
}
/* Setup checksum offloading, if needed */
cksum_offset = 0;
pseudo_hdr_offset = 0;
odd_flag = 0;
flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
cksum_offset = skb_checksum_start_offset(skb);
pseudo_hdr_offset = cksum_offset + skb->csum_offset;
/* If the headers are excessively large, then we must
* fall back to a software checksum */
if (unlikely(!mss && (cksum_offset > 255 ||
pseudo_hdr_offset > 127))) {
if (skb_checksum_help(skb))
goto drop;
cksum_offset = 0;
pseudo_hdr_offset = 0;
} else {
odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
flags |= MXGEFW_FLAGS_CKSUM;
}
}
cum_len = 0;
if (mss) { /* TSO */
/* this removes any CKSUM flag from before */
flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
/* negative cum_len signifies to the
* send loop that we are still in the
* header portion of the TSO packet.
* TSO header can be at most 1KB long */
cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
/* for IPv6 TSO, the checksum offset stores the
* TCP header length, to save the firmware from
* the need to parse the headers */
if (skb_is_gso_v6(skb)) {
cksum_offset = tcp_hdrlen(skb);
/* Can only handle headers <= max_tso6 long */
if (unlikely(-cum_len > mgp->max_tso6))
return myri10ge_sw_tso(skb, dev);
}
/* for TSO, pseudo_hdr_offset holds mss.
* The firmware figures out where to put
* the checksum by parsing the header. */
pseudo_hdr_offset = mss;
} else
/* Mark small packets, and pad out tiny packets */
if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
flags |= MXGEFW_FLAGS_SMALL;
/* pad frames to at least ETH_ZLEN bytes */
if (unlikely(skb->len < ETH_ZLEN)) {
if (skb_padto(skb, ETH_ZLEN)) {
/* The packet is gone, so we must
* return 0 */
ss->stats.tx_dropped += 1;
return NETDEV_TX_OK;
}
/* adjust the len to account for the zero pad
* so that the nic can know how long it is */
skb->len = ETH_ZLEN;
}
}
/* map the skb for DMA */
len = skb_headlen(skb);
idx = tx->req & tx->mask;
tx->info[idx].skb = skb;
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
frag_cnt = skb_shinfo(skb)->nr_frags;
frag_idx = 0;
count = 0;
rdma_count = 0;
/* "rdma_count" is the number of RDMAs belonging to the
* current packet BEFORE the current send request. For
* non-TSO packets, this is equal to "count".
* For TSO packets, rdma_count needs to be reset
* to 0 after a segment cut.
*
* The rdma_count field of the send request is
* the number of RDMAs of the packet starting at
* that request. For TSO send requests with one ore more cuts
* in the middle, this is the number of RDMAs starting
* after the last cut in the request. All previous
* segments before the last cut implicitly have 1 RDMA.
*
* Since the number of RDMAs is not known beforehand,
* it must be filled-in retroactively - after each
* segmentation cut or at the end of the entire packet.
*/
while (1) {
/* Break the SKB or Fragment up into pieces which
* do not cross mgp->tx_boundary */
low = MYRI10GE_LOWPART_TO_U32(bus);
high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
while (len) {
u8 flags_next;
int cum_len_next;
if (unlikely(count == max_segments))
goto abort_linearize;
boundary =
(low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
seglen = boundary - low;
if (seglen > len)
seglen = len;
flags_next = flags & ~MXGEFW_FLAGS_FIRST;
cum_len_next = cum_len + seglen;
if (mss) { /* TSO */
(req - rdma_count)->rdma_count = rdma_count + 1;
if (likely(cum_len >= 0)) { /* payload */
int next_is_first, chop;
chop = (cum_len_next > mss);
cum_len_next = cum_len_next % mss;
next_is_first = (cum_len_next == 0);
flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
flags_next |= next_is_first *
MXGEFW_FLAGS_FIRST;
rdma_count |= -(chop | next_is_first);
rdma_count += chop & !next_is_first;
} else if (likely(cum_len_next >= 0)) { /* header ends */
int small;
rdma_count = -1;
cum_len_next = 0;
seglen = -cum_len;
small = (mss <= MXGEFW_SEND_SMALL_SIZE);
flags_next = MXGEFW_FLAGS_TSO_PLD |
MXGEFW_FLAGS_FIRST |
(small * MXGEFW_FLAGS_SMALL);
}
}
req->addr_high = high_swapped;
req->addr_low = htonl(low);
req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
req->pad = 0; /* complete solid 16-byte block; does this matter? */
req->rdma_count = 1;
req->length = htons(seglen);
req->cksum_offset = cksum_offset;
req->flags = flags | ((cum_len & 1) * odd_flag);
low += seglen;
len -= seglen;
cum_len = cum_len_next;
flags = flags_next;
req++;
count++;
rdma_count++;
if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
if (unlikely(cksum_offset > seglen))
cksum_offset -= seglen;
else
cksum_offset = 0;
}
}
if (frag_idx == frag_cnt)
break;
/* map next fragment for DMA */
idx = (count + tx->req) & tx->mask;
frag = &skb_shinfo(skb)->frags[frag_idx];
frag_idx++;
len = frag->size;
bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
len, PCI_DMA_TODEVICE);
dma_unmap_addr_set(&tx->info[idx], bus, bus);
dma_unmap_len_set(&tx->info[idx], len, len);
}
(req - rdma_count)->rdma_count = rdma_count;
if (mss)
do {
req--;
req->flags |= MXGEFW_FLAGS_TSO_LAST;
} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
MXGEFW_FLAGS_FIRST)));
idx = ((count - 1) + tx->req) & tx->mask;
tx->info[idx].last = 1;
myri10ge_submit_req(tx, tx->req_list, count);
/* if using multiple tx queues, make sure NIC polls the
* current slice */
if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
tx->queue_active = 1;
put_be32(htonl(1), tx->send_go);
mb();
mmiowb();
}
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
tx->stop_queue++;
netif_tx_stop_queue(netdev_queue);
}
return NETDEV_TX_OK;
abort_linearize:
/* Free any DMA resources we've alloced and clear out the skb
* slot so as to not trip up assertions, and to avoid a
* double-free if linearizing fails */
last_idx = (idx + 1) & tx->mask;
idx = tx->req & tx->mask;
tx->info[idx].skb = NULL;
do {
len = dma_unmap_len(&tx->info[idx], len);
if (len) {
if (tx->info[idx].skb != NULL)
pci_unmap_single(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
else
pci_unmap_page(mgp->pdev,
dma_unmap_addr(&tx->info[idx],
bus), len,
PCI_DMA_TODEVICE);
dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL;
}
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
if (skb_is_gso(skb)) {
netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
goto drop;
}
if (skb_linearize(skb))
goto drop;
tx->linearized++;
goto again;
drop:
dev_kfree_skb_any(skb);
ss->stats.tx_dropped += 1;
return NETDEV_TX_OK;
}
static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
struct net_device *dev)
{
struct sk_buff *segs, *curr;
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_state *ss;
netdev_tx_t status;
segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
if (IS_ERR(segs))
goto drop;
while (segs) {
curr = segs;
segs = segs->next;
curr->next = NULL;
status = myri10ge_xmit(curr, dev);
if (status != 0) {
dev_kfree_skb_any(curr);
if (segs != NULL) {
curr = segs;
segs = segs->next;
curr->next = NULL;
dev_kfree_skb_any(segs);
}
goto drop;
}
}
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
drop:
ss = &mgp->ss[skb_get_queue_mapping(skb)];
dev_kfree_skb_any(skb);
ss->stats.tx_dropped += 1;
return NETDEV_TX_OK;
}
static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_slice_netstats *slice_stats;
struct net_device_stats *stats = &dev->stats;
int i;
spin_lock(&mgp->stats_lock);
memset(stats, 0, sizeof(*stats));
for (i = 0; i < mgp->num_slices; i++) {
slice_stats = &mgp->ss[i].stats;
stats->rx_packets += slice_stats->rx_packets;
stats->tx_packets += slice_stats->tx_packets;
stats->rx_bytes += slice_stats->rx_bytes;
stats->tx_bytes += slice_stats->tx_bytes;
stats->rx_dropped += slice_stats->rx_dropped;
stats->tx_dropped += slice_stats->tx_dropped;
}
spin_unlock(&mgp->stats_lock);
return stats;
}
static void myri10ge_set_multicast_list(struct net_device *dev)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
struct myri10ge_cmd cmd;
struct netdev_hw_addr *ha;
__be32 data[2] = { 0, 0 };
int err;
/* can be called from atomic contexts,
* pass 1 to force atomicity in myri10ge_send_cmd() */
myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
/* This firmware is known to not support multicast */
if (!mgp->fw_multicast_support)
return;
/* Disable multicast filtering */
err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
err);
goto abort;
}
if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
/* request to disable multicast filtering, so quit here */
return;
}
/* Flush the filters */
err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
&cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
err);
goto abort;
}
/* Walk the multicast list, and add each address */
netdev_for_each_mc_addr(ha, dev) {
memcpy(data, &ha->addr, 6);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
&cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
err, ha->addr);
goto abort;
}
}
/* Enable multicast filtering */
err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
err);
goto abort;
}
return;
abort:
return;
}
static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
struct myri10ge_priv *mgp = netdev_priv(dev);
int status;
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
status = myri10ge_update_mac_address(mgp, sa->sa_data);
if (status != 0) {
netdev_err(dev, "changing mac address failed with %d\n",
status);
return status;
}
/* change the dev structure */
memcpy(dev->dev_addr, sa->sa_data, 6);
return 0;
}
static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
{
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
return features;
}
static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
int error = 0;
if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
return -EINVAL;
}
netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
/* if we change the mtu on an active device, we must
* reset the device so the firmware sees the change */
myri10ge_close(dev);
dev->mtu = new_mtu;
myri10ge_open(dev);
} else
dev->mtu = new_mtu;
return error;
}
/*
* Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
* Only do it if the bridge is a root port since we don't want to disturb
* any other device, except if forced with myri10ge_ecrc_enable > 1.
*/
static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
{
struct pci_dev *bridge = mgp->pdev->bus->self;
struct device *dev = &mgp->pdev->dev;
unsigned cap;
unsigned err_cap;
u16 val;
u8 ext_type;
int ret;
if (!myri10ge_ecrc_enable || !bridge)
return;
/* check that the bridge is a root port */
cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
if (myri10ge_ecrc_enable > 1) {
struct pci_dev *prev_bridge, *old_bridge = bridge;
/* Walk the hierarchy up to the root port
* where ECRC has to be enabled */
do {
prev_bridge = bridge;
bridge = bridge->bus->self;
if (!bridge || prev_bridge == bridge) {
dev_err(dev,
"Failed to find root port"
" to force ECRC\n");
return;
}
cap =
pci_find_capability(bridge, PCI_CAP_ID_EXP);
pci_read_config_word(bridge,
cap + PCI_CAP_FLAGS, &val);
ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
} while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
dev_info(dev,
"Forcing ECRC on non-root port %s"
" (enabling on root port %s)\n",
pci_name(old_bridge), pci_name(bridge));
} else {
dev_err(dev,
"Not enabling ECRC on non-root port %s\n",
pci_name(bridge));
return;
}
}
cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
if (!cap)
return;
ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
if (ret) {
dev_err(dev, "failed reading ext-conf-space of %s\n",
pci_name(bridge));
dev_err(dev, "\t pci=nommconf in use? "
"or buggy/incomplete/absent ACPI MCFG attr?\n");
return;
}
if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
return;
err_cap |= PCI_ERR_CAP_ECRC_GENE;
pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
}
/*
* The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
* when the PCI-E Completion packets are aligned on an 8-byte
* boundary. Some PCI-E chip sets always align Completion packets; on
* the ones that do not, the alignment can be enforced by enabling
* ECRC generation (if supported).
*
* When PCI-E Completion packets are not aligned, it is actually more
* efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
*
* If the driver can neither enable ECRC nor verify that it has
* already been enabled, then it must use a firmware image which works
* around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
* should also ensure that it never gives the device a Read-DMA which is
* larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
* enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
* firmware image, and set tx_boundary to 4KB.
*/
static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
struct device *dev = &pdev->dev;
int status;
mgp->tx_boundary = 4096;
/*
* Verify the max read request size was set to 4KB
* before trying the test with 4KB.
*/
status = pcie_get_readrq(pdev);
if (status < 0) {
dev_err(dev, "Couldn't read max read req size: %d\n", status);
goto abort;
}
if (status != 4096) {
dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
mgp->tx_boundary = 2048;
}
/*
* load the optimized firmware (which assumes aligned PCIe
* completions) in order to see if it works on this host.
*/
set_fw_name(mgp, myri10ge_fw_aligned, false);
status = myri10ge_load_firmware(mgp, 1);
if (status != 0) {
goto abort;
}
/*
* Enable ECRC if possible
*/
myri10ge_enable_ecrc(mgp);
/*
* Run a DMA test which watches for unaligned completions and
* aborts on the first one seen.
*/
status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
if (status == 0)
return; /* keep the aligned firmware */
if (status != -E2BIG)
dev_warn(dev, "DMA test failed: %d\n", status);
if (status == -ENOSYS)
dev_warn(dev, "Falling back to ethp! "
"Please install up to date fw\n");
abort:
/* fall back to using the unaligned firmware */
mgp->tx_boundary = 2048;
set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
{
int overridden = 0;
if (myri10ge_force_firmware == 0) {
int link_width, exp_cap;
u16 lnk;
exp_cap = pci_find_capability(mgp->pdev, PCI_CAP_ID_EXP);
pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
link_width = (lnk >> 4) & 0x3f;
/* Check to see if Link is less than 8 or if the
* upstream bridge is known to provide aligned
* completions */
if (link_width < 8) {
dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
link_width);
mgp->tx_boundary = 4096;
set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
myri10ge_firmware_probe(mgp);
}
} else {
if (myri10ge_force_firmware == 1) {
dev_info(&mgp->pdev->dev,
"Assuming aligned completions (forced)\n");
mgp->tx_boundary = 4096;
set_fw_name(mgp, myri10ge_fw_aligned, false);
} else {
dev_info(&mgp->pdev->dev,
"Assuming unaligned completions (forced)\n");
mgp->tx_boundary = 2048;
set_fw_name(mgp, myri10ge_fw_unaligned, false);
}
}
kparam_block_sysfs_write(myri10ge_fw_name);
if (myri10ge_fw_name != NULL) {
char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
if (fw_name) {
overridden = 1;
set_fw_name(mgp, fw_name, true);
}
}
kparam_unblock_sysfs_write(myri10ge_fw_name);
if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
myri10ge_fw_names[mgp->board_number] != NULL &&
strlen(myri10ge_fw_names[mgp->board_number])) {
set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
overridden = 1;
}
if (overridden)
dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
mgp->fw_name);
}
#ifdef CONFIG_PM
static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct myri10ge_priv *mgp;
struct net_device *netdev;
mgp = pci_get_drvdata(pdev);
if (mgp == NULL)
return -EINVAL;
netdev = mgp->dev;
netif_device_detach(netdev);
if (netif_running(netdev)) {
netdev_info(netdev, "closing\n");
rtnl_lock();
myri10ge_close(netdev);
rtnl_unlock();
}
myri10ge_dummy_rdma(mgp, 0);
pci_save_state(pdev);
pci_disable_device(pdev);
return pci_set_power_state(pdev, pci_choose_state(pdev, state));
}
static int myri10ge_resume(struct pci_dev *pdev)
{
struct myri10ge_priv *mgp;
struct net_device *netdev;
int status;
u16 vendor;
mgp = pci_get_drvdata(pdev);
if (mgp == NULL)
return -EINVAL;
netdev = mgp->dev;
pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */
msleep(5); /* give card time to respond */
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
netdev_err(mgp->dev, "device disappeared!\n");
return -EIO;
}
pci_restore_state(pdev);
status = pci_enable_device(pdev);
if (status) {
dev_err(&pdev->dev, "failed to enable device\n");
return status;
}
pci_set_master(pdev);
myri10ge_reset(mgp);
myri10ge_dummy_rdma(mgp, 1);
/* Save configuration space to be restored if the
* nic resets due to a parity error */
pci_save_state(pdev);
if (netif_running(netdev)) {
rtnl_lock();
status = myri10ge_open(netdev);
rtnl_unlock();
if (status != 0)
goto abort_with_enabled;
}
netif_device_attach(netdev);
return 0;
abort_with_enabled:
pci_disable_device(pdev);
return -EIO;
}
#endif /* CONFIG_PM */
static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
{
struct pci_dev *pdev = mgp->pdev;
int vs = mgp->vendor_specific_offset;
u32 reboot;
/*enter read32 mode */
pci_write_config_byte(pdev, vs + 0x10, 0x3);
/*read REBOOT_STATUS (0xfffffff0) */
pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
pci_read_config_dword(pdev, vs + 0x14, &reboot);
return reboot;
}
/*
* This watchdog is used to check whether the board has suffered
* from a parity error and needs to be recovered.
*/
static void myri10ge_watchdog(struct work_struct *work)
{
struct myri10ge_priv *mgp =
container_of(work, struct myri10ge_priv, watchdog_work);
struct myri10ge_tx_buf *tx;
u32 reboot;
int status, rebooted;
int i;
u16 cmd, vendor;
mgp->watchdog_resets++;
pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
rebooted = 0;
if ((cmd & PCI_COMMAND_MASTER) == 0) {
/* Bus master DMA disabled? Check to see
* if the card rebooted due to a parity error
* For now, just report it */
reboot = myri10ge_read_reboot(mgp);
netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
reboot,
myri10ge_reset_recover ? "" : " not");
if (myri10ge_reset_recover == 0)
return;
rtnl_lock();
mgp->rebooted = 1;
rebooted = 1;
myri10ge_close(mgp->dev);
myri10ge_reset_recover--;
mgp->rebooted = 0;
/*
* A rebooted nic will come back with config space as
* it was after power was applied to PCIe bus.
* Attempt to restore config space which was saved
* when the driver was loaded, or the last time the
* nic was resumed from power saving mode.
*/
pci_restore_state(mgp->pdev);
/* save state again for accounting reasons */
pci_save_state(mgp->pdev);
} else {
/* if we get back -1's from our slot, perhaps somebody
* powered off our card. Don't try to reset it in
* this case */
if (cmd == 0xffff) {
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
netdev_err(mgp->dev, "device disappeared!\n");
return;
}
}
/* Perhaps it is a software error. Try to reset */
netdev_err(mgp->dev, "device timeout, resetting\n");
for (i = 0; i < mgp->num_slices; i++) {
tx = &mgp->ss[i].tx;
netdev_err(mgp->dev, "(%d): %d %d %d %d %d %d\n",
i, tx->queue_active, tx->req,
tx->done, tx->pkt_start, tx->pkt_done,
(int)ntohl(mgp->ss[i].fw_stats->
send_done_count));
msleep(2000);
netdev_info(mgp->dev, "(%d): %d %d %d %d %d %d\n",
i, tx->queue_active, tx->req,
tx->done, tx->pkt_start, tx->pkt_done,
(int)ntohl(mgp->ss[i].fw_stats->
send_done_count));
}
}
if (!rebooted) {
rtnl_lock();
myri10ge_close(mgp->dev);
}
status = myri10ge_load_firmware(mgp, 1);
if (status != 0)
netdev_err(mgp->dev, "failed to load firmware\n");
else
myri10ge_open(mgp->dev);
rtnl_unlock();
}
/*
* We use our own timer routine rather than relying upon
* netdev->tx_timeout because we have a very large hardware transmit
* queue. Due to the large queue, the netdev->tx_timeout function
* cannot detect a NIC with a parity error in a timely fashion if the
* NIC is lightly loaded.
*/
static void myri10ge_watchdog_timer(unsigned long arg)
{
struct myri10ge_priv *mgp;
struct myri10ge_slice_state *ss;
int i, reset_needed, busy_slice_cnt;
u32 rx_pause_cnt;
u16 cmd;
mgp = (struct myri10ge_priv *)arg;
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
for (i = 0, reset_needed = 0;
i < mgp->num_slices && reset_needed == 0; ++i) {
ss = &mgp->ss[i];
if (ss->rx_small.watchdog_needed) {
myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
mgp->small_bytes + MXGEFW_PAD,
1);
if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
myri10ge_fill_thresh)
ss->rx_small.watchdog_needed = 0;
}
if (ss->rx_big.watchdog_needed) {
myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
mgp->big_bytes, 1);
if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
myri10ge_fill_thresh)
ss->rx_big.watchdog_needed = 0;
}
if (ss->tx.req != ss->tx.done &&
ss->tx.done == ss->watchdog_tx_done &&
ss->watchdog_tx_req != ss->watchdog_tx_done) {
/* nic seems like it might be stuck.. */
if (rx_pause_cnt != mgp->watchdog_pause) {
if (net_ratelimit())
netdev_err(mgp->dev, "slice %d: TX paused, check link partner\n",
i);
} else {
netdev_warn(mgp->dev, "slice %d stuck:", i);
reset_needed = 1;
}
}
if (ss->watchdog_tx_done != ss->tx.done ||
ss->watchdog_rx_done != ss->rx_done.cnt) {
busy_slice_cnt++;
}
ss->watchdog_tx_done = ss->tx.done;
ss->watchdog_tx_req = ss->tx.req;
ss->watchdog_rx_done = ss->rx_done.cnt;
}
/* if we've sent or received no traffic, poll the NIC to
* ensure it is still there. Otherwise, we risk not noticing
* an error in a timely fashion */
if (busy_slice_cnt == 0) {
pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
if ((cmd & PCI_COMMAND_MASTER) == 0) {
reset_needed = 1;
}
}
mgp->watchdog_pause = rx_pause_cnt;
if (reset_needed) {
schedule_work(&mgp->watchdog_work);
} else {
/* rearm timer */
mod_timer(&mgp->watchdog_timer,
jiffies + myri10ge_watchdog_timeout * HZ);
}
}
static void myri10ge_free_slices(struct myri10ge_priv *mgp)
{
struct myri10ge_slice_state *ss;
struct pci_dev *pdev = mgp->pdev;
size_t bytes;
int i;
if (mgp->ss == NULL)
return;
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
if (ss->rx_done.entry != NULL) {
bytes = mgp->max_intr_slots *
sizeof(*ss->rx_done.entry);
dma_free_coherent(&pdev->dev, bytes,
ss->rx_done.entry, ss->rx_done.bus);
ss->rx_done.entry = NULL;
}
if (ss->fw_stats != NULL) {
bytes = sizeof(*ss->fw_stats);
dma_free_coherent(&pdev->dev, bytes,
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
netif_napi_del(&ss->napi);
}
}
kfree(mgp->ss);
mgp->ss = NULL;
}
static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
{
struct myri10ge_slice_state *ss;
struct pci_dev *pdev = mgp->pdev;
size_t bytes;
int i;
bytes = sizeof(*mgp->ss) * mgp->num_slices;
mgp->ss = kzalloc(bytes, GFP_KERNEL);
if (mgp->ss == NULL) {
return -ENOMEM;
}
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
&ss->rx_done.bus,
GFP_KERNEL);
if (ss->rx_done.entry == NULL)
goto abort;
memset(ss->rx_done.entry, 0, bytes);
bytes = sizeof(*ss->fw_stats);
ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
&ss->fw_stats_bus,
GFP_KERNEL);
if (ss->fw_stats == NULL)
goto abort;
ss->mgp = mgp;
ss->dev = mgp->dev;
netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
myri10ge_napi_weight);
}
return 0;
abort:
myri10ge_free_slices(mgp);
return -ENOMEM;
}
/*
* This function determines the number of slices supported.
* The number slices is the minimum of the number of CPUS,
* the number of MSI-X irqs supported, the number of slices
* supported by the firmware
*/
static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
struct pci_dev *pdev = mgp->pdev;
char *old_fw;
bool old_allocated;
int i, status, ncpus, msix_cap;
mgp->num_slices = 1;
msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
ncpus = num_online_cpus();
if (myri10ge_max_slices == 1 || msix_cap == 0 ||
(myri10ge_max_slices == -1 && ncpus < 2))
return;
/* try to load the slice aware rss firmware */
old_fw = mgp->fw_name;
old_allocated = mgp->fw_name_allocated;
/* don't free old_fw if we override it. */
mgp->fw_name_allocated = false;
if (myri10ge_fw_name != NULL) {
dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
myri10ge_fw_name);
set_fw_name(mgp, myri10ge_fw_name, false);
} else if (old_fw == myri10ge_fw_aligned)
set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
else
set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
status = myri10ge_load_firmware(mgp, 0);
if (status != 0) {
dev_info(&pdev->dev, "Rss firmware not found\n");
if (old_allocated)
kfree(old_fw);
return;
}
/* hit the board with a reset to ensure it is alive */
memset(&cmd, 0, sizeof(cmd));
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed reset\n");
goto abort_with_fw;
}
mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
/* tell it the size of the interrupt queues */
cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
if (status != 0) {
dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
goto abort_with_fw;
}
/* ask the maximum number of slices it supports */
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
if (status != 0)
goto abort_with_fw;
else
mgp->num_slices = cmd.data0;
/* Only allow multiple slices if MSI-X is usable */
if (!myri10ge_msi) {
goto abort_with_fw;
}
/* if the admin did not specify a limit to how many
* slices we should use, cap it automatically to the
* number of CPUs currently online */
if (myri10ge_max_slices == -1)
myri10ge_max_slices = ncpus;
if (mgp->num_slices > myri10ge_max_slices)
mgp->num_slices = myri10ge_max_slices;
/* Now try to allocate as many MSI-X vectors as we have
* slices. We give up on MSI-X if we can only get a single
* vector. */
mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
GFP_KERNEL);
if (mgp->msix_vectors == NULL)
goto disable_msix;
for (i = 0; i < mgp->num_slices; i++) {
mgp->msix_vectors[i].entry = i;
}
while (mgp->num_slices > 1) {
/* make sure it is a power of two */
while (!is_power_of_2(mgp->num_slices))
mgp->num_slices--;
if (mgp->num_slices == 1)
goto disable_msix;
status = pci_enable_msix(pdev, mgp->msix_vectors,
mgp->num_slices);
if (status == 0) {
pci_disable_msix(pdev);
if (old_allocated)
kfree(old_fw);
return;
}
if (status > 0)
mgp->num_slices = status;
else
goto disable_msix;
}
disable_msix:
if (mgp->msix_vectors != NULL) {
kfree(mgp->msix_vectors);
mgp->msix_vectors = NULL;
}
abort_with_fw:
mgp->num_slices = 1;
set_fw_name(mgp, old_fw, old_allocated);
myri10ge_load_firmware(mgp, 0);
}
static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_open = myri10ge_open,
.ndo_stop = myri10ge_close,
.ndo_start_xmit = myri10ge_xmit,
.ndo_get_stats = myri10ge_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_fix_features = myri10ge_fix_features,
.ndo_set_multicast_list = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
};
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct myri10ge_priv *mgp;
struct device *dev = &pdev->dev;
int i;
int status = -ENXIO;
int dac_enabled;
unsigned hdr_offset, ss_offset;
static int board_number;
netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
if (netdev == NULL) {
dev_err(dev, "Could not allocate ethernet device\n");
return -ENOMEM;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
mgp = netdev_priv(netdev);
mgp->dev = netdev;
mgp->pdev = pdev;
mgp->pause = myri10ge_flow_control;
mgp->intr_coal_delay = myri10ge_intr_coal_delay;
mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
mgp->board_number = board_number;
init_waitqueue_head(&mgp->down_wq);
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "pci_enable_device call failed\n");
status = -ENODEV;
goto abort_with_netdev;
}
/* Find the vendor-specific cap so we can check
* the reboot register later on */
mgp->vendor_specific_offset
= pci_find_capability(pdev, PCI_CAP_ID_VNDR);
/* Set our max read request to 4KB */
status = pcie_set_readrq(pdev, 4096);
if (status != 0) {
dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
status);
goto abort_with_enabled;
}
pci_set_master(pdev);
dac_enabled = 1;
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (status != 0) {
dac_enabled = 0;
dev_err(&pdev->dev,
"64-bit pci address mask was refused, "
"trying 32-bit\n");
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
}
if (status != 0) {
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
goto abort_with_enabled;
}
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL);
if (mgp->cmd == NULL)
goto abort_with_enabled;
mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0);
mgp->mtrr = -1;
mgp->wc_enabled = 0;
#ifdef CONFIG_MTRR
mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
MTRR_TYPE_WRCOMB, 1);
if (mgp->mtrr >= 0)
mgp->wc_enabled = 1;
#endif
mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
if (mgp->sram == NULL) {
dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
mgp->board_span, mgp->iomem_base);
status = -ENXIO;
goto abort_with_mtrr;
}
hdr_offset =
ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
mgp->sram_size = ntohl(__raw_readl(mgp->sram + ss_offset));
if (mgp->sram_size > mgp->board_span ||
mgp->sram_size <= MYRI10GE_FW_OFFSET) {
dev_err(&pdev->dev,
"invalid sram_size %dB or board span %ldB\n",
mgp->sram_size, mgp->board_span);
goto abort_with_ioremap;
}
memcpy_fromio(mgp->eeprom_strings,
mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
status = myri10ge_read_mac_addr(mgp);
if (status)
goto abort_with_ioremap;
for (i = 0; i < ETH_ALEN; i++)
netdev->dev_addr[i] = mgp->mac_addr[i];
myri10ge_select_firmware(mgp);
status = myri10ge_load_firmware(mgp, 1);
if (status != 0) {
dev_err(&pdev->dev, "failed to load firmware\n");
goto abort_with_ioremap;
}
myri10ge_probe_slices(mgp);
status = myri10ge_alloc_slices(mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed to alloc slice state\n");
goto abort_with_firmware;
}
netif_set_real_num_tx_queues(netdev, mgp->num_slices);
netif_set_real_num_rx_queues(netdev, mgp->num_slices);
status = myri10ge_reset(mgp);
if (status != 0) {
dev_err(&pdev->dev, "failed reset\n");
goto abort_with_slices;
}
#ifdef CONFIG_MYRI10GE_DCA
myri10ge_setup_dca(mgp);
#endif
pci_set_drvdata(pdev, mgp);
if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
myri10ge_initial_mtu = 68;
netdev->netdev_ops = &myri10ge_netdev_ops;
netdev->mtu = myri10ge_initial_mtu;
netdev->base_addr = mgp->iomem_base;
netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
netdev->features = netdev->hw_features;
if (dac_enabled)
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= mgp->features;
if (mgp->fw_ver_tiny < 37)
netdev->vlan_features &= ~NETIF_F_TSO6;
if (mgp->fw_ver_tiny < 32)
netdev->vlan_features &= ~NETIF_F_TSO;
/* make sure we can get an irq, and that MSI can be
* setup (if available). Also ensure netdev->irq
* is set to correct value if MSI is enabled */
status = myri10ge_request_irq(mgp);
if (status != 0)
goto abort_with_firmware;
netdev->irq = pdev->irq;
myri10ge_free_irq(mgp);
/* Save configuration space to be restored if the
* nic resets due to a parity error */
pci_save_state(pdev);
/* Setup the watchdog timer */
setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
(unsigned long)mgp);
spin_lock_init(&mgp->stats_lock);
SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
if (status != 0) {
dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
goto abort_with_state;
}
if (mgp->msix_enabled)
dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
(mgp->wc_enabled ? "Enabled" : "Disabled"));
else
dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
mgp->msi_enabled ? "MSI" : "xPIC",
netdev->irq, mgp->tx_boundary, mgp->fw_name,
(mgp->wc_enabled ? "Enabled" : "Disabled"));
board_number++;
return 0;
abort_with_state:
pci_restore_state(pdev);
abort_with_slices:
myri10ge_free_slices(mgp);
abort_with_firmware:
myri10ge_dummy_rdma(mgp, 0);
abort_with_ioremap:
if (mgp->mac_addr_string != NULL)
dev_err(&pdev->dev,
"myri10ge_probe() failed: MAC=%s, SN=%ld\n",
mgp->mac_addr_string, mgp->serial_number);
iounmap(mgp->sram);
abort_with_mtrr:
#ifdef CONFIG_MTRR
if (mgp->mtrr >= 0)
mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
#endif
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
abort_with_enabled:
pci_disable_device(pdev);
abort_with_netdev:
set_fw_name(mgp, NULL, false);
free_netdev(netdev);
return status;
}
/*
* myri10ge_remove
*
* Does what is necessary to shutdown one Myrinet device. Called
* once for each Myrinet card by the kernel when a module is
* unloaded.
*/
static void myri10ge_remove(struct pci_dev *pdev)
{
struct myri10ge_priv *mgp;
struct net_device *netdev;
mgp = pci_get_drvdata(pdev);
if (mgp == NULL)
return;
cancel_work_sync(&mgp->watchdog_work);
netdev = mgp->dev;
unregister_netdev(netdev);
#ifdef CONFIG_MYRI10GE_DCA
myri10ge_teardown_dca(mgp);
#endif
myri10ge_dummy_rdma(mgp, 0);
/* avoid a memory leak */
pci_restore_state(pdev);
iounmap(mgp->sram);
#ifdef CONFIG_MTRR
if (mgp->mtrr >= 0)
mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
#endif
myri10ge_free_slices(mgp);
if (mgp->msix_vectors != NULL)
kfree(mgp->msix_vectors);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
set_fw_name(mgp, NULL, false);
free_netdev(netdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
{PCI_DEVICE
(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
{0},
};
MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
static struct pci_driver myri10ge_driver = {
.name = "myri10ge",
.probe = myri10ge_probe,
.remove = myri10ge_remove,
.id_table = myri10ge_pci_tbl,
#ifdef CONFIG_PM
.suspend = myri10ge_suspend,
.resume = myri10ge_resume,
#endif
};
#ifdef CONFIG_MYRI10GE_DCA
static int
myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
{
int err = driver_for_each_device(&myri10ge_driver.driver,
NULL, &event,
myri10ge_notify_dca_device);
if (err)
return NOTIFY_BAD;
return NOTIFY_DONE;
}
static struct notifier_block myri10ge_dca_notifier = {
.notifier_call = myri10ge_notify_dca,
.next = NULL,
.priority = 0,
};
#endif /* CONFIG_MYRI10GE_DCA */
static __init int myri10ge_init_module(void)
{
pr_info("Version %s\n", MYRI10GE_VERSION_STR);
if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
pr_err("Illegal rssh hash type %d, defaulting to source port\n",
myri10ge_rss_hash);
myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
}
#ifdef CONFIG_MYRI10GE_DCA
dca_register_notify(&myri10ge_dca_notifier);
#endif
if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
myri10ge_max_slices = MYRI10GE_MAX_SLICES;
return pci_register_driver(&myri10ge_driver);
}
module_init(myri10ge_init_module);
static __exit void myri10ge_cleanup_module(void)
{
#ifdef CONFIG_MYRI10GE_DCA
dca_unregister_notify(&myri10ge_dca_notifier);
#endif
pci_unregister_driver(&myri10ge_driver);
}
module_exit(myri10ge_cleanup_module);
| gpl-2.0 |
hayoung-lee/willow_kernel | drivers/media/video/omap/omap_voutlib.c | 2382 | 9660 | /*
* omap_voutlib.c
*
* Copyright (C) 2005-2010 Texas Instruments.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
* Based on the OMAP2 camera driver
* Video-for-Linux (Version 2) camera capture driver for
* the OMAP24xx camera controller.
*
* Author: Andy Lowe (source@mvista.com)
*
* Copyright (C) 2004 MontaVista Software, Inc.
* Copyright (C) 2010 Texas Instruments.
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <plat/cpu.h>
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("OMAP Video library");
MODULE_LICENSE("GPL");
/* Return the default overlay cropping rectangle in crop given the image
* size in pix and the video display size in fbuf. The default
* cropping rectangle is the largest rectangle no larger than the capture size
* that will fit on the display. The default cropping rectangle is centered in
* the image. All dimensions and offsets are rounded down to even numbers.
*/
void omap_vout_default_crop(struct v4l2_pix_format *pix,
struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop)
{
crop->width = (pix->width < fbuf->fmt.width) ?
pix->width : fbuf->fmt.width;
crop->height = (pix->height < fbuf->fmt.height) ?
pix->height : fbuf->fmt.height;
crop->width &= ~1;
crop->height &= ~1;
crop->left = ((pix->width - crop->width) >> 1) & ~1;
crop->top = ((pix->height - crop->height) >> 1) & ~1;
}
EXPORT_SYMBOL_GPL(omap_vout_default_crop);
/* Given a new render window in new_win, adjust the window to the
* nearest supported configuration. The adjusted window parameters are
* returned in new_win.
* Returns zero if successful, or -EINVAL if the requested window is
* impossible and cannot reasonably be adjusted.
*/
int omap_vout_try_window(struct v4l2_framebuffer *fbuf,
struct v4l2_window *new_win)
{
struct v4l2_rect try_win;
/* make a working copy of the new_win rectangle */
try_win = new_win->w;
/* adjust the preview window so it fits on the display by clipping any
* offscreen areas
*/
if (try_win.left < 0) {
try_win.width += try_win.left;
try_win.left = 0;
}
if (try_win.top < 0) {
try_win.height += try_win.top;
try_win.top = 0;
}
try_win.width = (try_win.width < fbuf->fmt.width) ?
try_win.width : fbuf->fmt.width;
try_win.height = (try_win.height < fbuf->fmt.height) ?
try_win.height : fbuf->fmt.height;
if (try_win.left + try_win.width > fbuf->fmt.width)
try_win.width = fbuf->fmt.width - try_win.left;
if (try_win.top + try_win.height > fbuf->fmt.height)
try_win.height = fbuf->fmt.height - try_win.top;
try_win.width &= ~1;
try_win.height &= ~1;
if (try_win.width <= 0 || try_win.height <= 0)
return -EINVAL;
/* We now have a valid preview window, so go with it */
new_win->w = try_win;
new_win->field = V4L2_FIELD_ANY;
return 0;
}
EXPORT_SYMBOL_GPL(omap_vout_try_window);
/* Given a new render window in new_win, adjust the window to the
* nearest supported configuration. The image cropping window in crop
* will also be adjusted if necessary. Preference is given to keeping the
* the window as close to the requested configuration as possible. If
* successful, new_win, vout->win, and crop are updated.
* Returns zero if successful, or -EINVAL if the requested preview window is
* impossible and cannot reasonably be adjusted.
*/
int omap_vout_new_window(struct v4l2_rect *crop,
struct v4l2_window *win, struct v4l2_framebuffer *fbuf,
struct v4l2_window *new_win)
{
int err;
err = omap_vout_try_window(fbuf, new_win);
if (err)
return err;
/* update our preview window */
win->w = new_win->w;
win->field = new_win->field;
win->chromakey = new_win->chromakey;
/* Adjust the cropping window to allow for resizing limitation */
if (cpu_is_omap24xx()) {
/* For 24xx limit is 8x to 1/2x scaling. */
if ((crop->height/win->w.height) >= 2)
crop->height = win->w.height * 2;
if ((crop->width/win->w.width) >= 2)
crop->width = win->w.width * 2;
if (crop->width > 768) {
/* The OMAP2420 vertical resizing line buffer is 768
* pixels wide. If the cropped image is wider than
* 768 pixels then it cannot be vertically resized.
*/
if (crop->height != win->w.height)
crop->width = 768;
}
} else if (cpu_is_omap34xx()) {
/* For 34xx limit is 8x to 1/4x scaling. */
if ((crop->height/win->w.height) >= 4)
crop->height = win->w.height * 4;
if ((crop->width/win->w.width) >= 4)
crop->width = win->w.width * 4;
}
return 0;
}
EXPORT_SYMBOL_GPL(omap_vout_new_window);
/* Given a new cropping rectangle in new_crop, adjust the cropping rectangle to
* the nearest supported configuration. The image render window in win will
* also be adjusted if necessary. The preview window is adjusted such that the
* horizontal and vertical rescaling ratios stay constant. If the render
* window would fall outside the display boundaries, the cropping rectangle
* will also be adjusted to maintain the rescaling ratios. If successful, crop
* and win are updated.
* Returns zero if successful, or -EINVAL if the requested cropping rectangle is
* impossible and cannot reasonably be adjusted.
*/
int omap_vout_new_crop(struct v4l2_pix_format *pix,
struct v4l2_rect *crop, struct v4l2_window *win,
struct v4l2_framebuffer *fbuf, const struct v4l2_rect *new_crop)
{
struct v4l2_rect try_crop;
unsigned long vresize, hresize;
/* make a working copy of the new_crop rectangle */
try_crop = *new_crop;
/* adjust the cropping rectangle so it fits in the image */
if (try_crop.left < 0) {
try_crop.width += try_crop.left;
try_crop.left = 0;
}
if (try_crop.top < 0) {
try_crop.height += try_crop.top;
try_crop.top = 0;
}
try_crop.width = (try_crop.width < pix->width) ?
try_crop.width : pix->width;
try_crop.height = (try_crop.height < pix->height) ?
try_crop.height : pix->height;
if (try_crop.left + try_crop.width > pix->width)
try_crop.width = pix->width - try_crop.left;
if (try_crop.top + try_crop.height > pix->height)
try_crop.height = pix->height - try_crop.top;
try_crop.width &= ~1;
try_crop.height &= ~1;
if (try_crop.width <= 0 || try_crop.height <= 0)
return -EINVAL;
if (cpu_is_omap24xx()) {
if (try_crop.height != win->w.height) {
/* If we're resizing vertically, we can't support a
* crop width wider than 768 pixels.
*/
if (try_crop.width > 768)
try_crop.width = 768;
}
}
/* vertical resizing */
vresize = (1024 * try_crop.height) / win->w.height;
if (cpu_is_omap24xx() && (vresize > 2048))
vresize = 2048;
else if (cpu_is_omap34xx() && (vresize > 4096))
vresize = 4096;
win->w.height = ((1024 * try_crop.height) / vresize) & ~1;
if (win->w.height == 0)
win->w.height = 2;
if (win->w.height + win->w.top > fbuf->fmt.height) {
/* We made the preview window extend below the bottom of the
* display, so clip it to the display boundary and resize the
* cropping height to maintain the vertical resizing ratio.
*/
win->w.height = (fbuf->fmt.height - win->w.top) & ~1;
if (try_crop.height == 0)
try_crop.height = 2;
}
/* horizontal resizing */
hresize = (1024 * try_crop.width) / win->w.width;
if (cpu_is_omap24xx() && (hresize > 2048))
hresize = 2048;
else if (cpu_is_omap34xx() && (hresize > 4096))
hresize = 4096;
win->w.width = ((1024 * try_crop.width) / hresize) & ~1;
if (win->w.width == 0)
win->w.width = 2;
if (win->w.width + win->w.left > fbuf->fmt.width) {
/* We made the preview window extend past the right side of the
* display, so clip it to the display boundary and resize the
* cropping width to maintain the horizontal resizing ratio.
*/
win->w.width = (fbuf->fmt.width - win->w.left) & ~1;
if (try_crop.width == 0)
try_crop.width = 2;
}
if (cpu_is_omap24xx()) {
if ((try_crop.height/win->w.height) >= 2)
try_crop.height = win->w.height * 2;
if ((try_crop.width/win->w.width) >= 2)
try_crop.width = win->w.width * 2;
if (try_crop.width > 768) {
/* The OMAP2420 vertical resizing line buffer is
* 768 pixels wide. If the cropped image is wider
* than 768 pixels then it cannot be vertically resized.
*/
if (try_crop.height != win->w.height)
try_crop.width = 768;
}
} else if (cpu_is_omap34xx()) {
if ((try_crop.height/win->w.height) >= 4)
try_crop.height = win->w.height * 4;
if ((try_crop.width/win->w.width) >= 4)
try_crop.width = win->w.width * 4;
}
/* update our cropping rectangle and we're done */
*crop = try_crop;
return 0;
}
EXPORT_SYMBOL_GPL(omap_vout_new_crop);
/* Given a new format in pix and fbuf, crop and win
* structures are initialized to default values. crop
* is initialized to the largest window size that will fit on the display. The
* crop window is centered in the image. win is initialized to
* the same size as crop and is centered on the display.
* All sizes and offsets are constrained to be even numbers.
*/
void omap_vout_new_format(struct v4l2_pix_format *pix,
struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop,
struct v4l2_window *win)
{
/* crop defines the preview source window in the image capture
* buffer
*/
omap_vout_default_crop(pix, fbuf, crop);
/* win defines the preview target window on the display */
win->w.width = crop->width;
win->w.height = crop->height;
win->w.left = ((fbuf->fmt.width - win->w.width) >> 1) & ~1;
win->w.top = ((fbuf->fmt.height - win->w.height) >> 1) & ~1;
}
EXPORT_SYMBOL_GPL(omap_vout_new_format);
| gpl-2.0 |
RittikBhowmik/Project-X5pro-Kernel-u8800pro | drivers/net/wireless/hostap/hostap_hw.c | 3150 | 95479 | /*
* Host AP (software wireless LAN access point) driver for
* Intersil Prism2/2.5/3.
*
* Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
* <j@w1.fi>
* Copyright (c) 2002-2005, Jouni Malinen <j@w1.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*
* FIX:
* - there is currently no way of associating TX packets to correct wds device
* when TX Exc/OK event occurs, so all tx_packets and some
* tx_errors/tx_dropped are added to the main netdevice; using sw_support
* field in txdesc might be used to fix this (using Alloc event to increment
* tx_packets would need some further info in txfid table)
*
* Buffer Access Path (BAP) usage:
* Prism2 cards have two separate BAPs for accessing the card memory. These
* should allow concurrent access to two different frames and the driver
* previously used BAP0 for sending data and BAP1 for receiving data.
* However, there seems to be number of issues with concurrent access and at
* least one know hardware bug in using BAP0 and BAP1 concurrently with PCI
* Prism2.5. Therefore, the driver now only uses BAP0 for moving data between
* host and card memories. BAP0 accesses are protected with local->baplock
* (spin_lock_bh) to prevent concurrent use.
*/
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/proc_fs.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/random.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/rtnetlink.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <net/lib80211.h>
#include <asm/irq.h>
#include "hostap_80211.h"
#include "hostap.h"
#include "hostap_ap.h"
/* #define final_version */
static int mtu = 1500;
module_param(mtu, int, 0444);
MODULE_PARM_DESC(mtu, "Maximum transfer unit");
static int channel[MAX_PARM_DEVICES] = { 3, DEF_INTS };
module_param_array(channel, int, NULL, 0444);
MODULE_PARM_DESC(channel, "Initial channel");
static char essid[33] = "test";
module_param_string(essid, essid, sizeof(essid), 0444);
MODULE_PARM_DESC(essid, "Host AP's ESSID");
static int iw_mode[MAX_PARM_DEVICES] = { IW_MODE_MASTER, DEF_INTS };
module_param_array(iw_mode, int, NULL, 0444);
MODULE_PARM_DESC(iw_mode, "Initial operation mode");
static int beacon_int[MAX_PARM_DEVICES] = { 100, DEF_INTS };
module_param_array(beacon_int, int, NULL, 0444);
MODULE_PARM_DESC(beacon_int, "Beacon interval (1 = 1024 usec)");
static int dtim_period[MAX_PARM_DEVICES] = { 1, DEF_INTS };
module_param_array(dtim_period, int, NULL, 0444);
MODULE_PARM_DESC(dtim_period, "DTIM period");
static char dev_template[16] = "wlan%d";
module_param_string(dev_template, dev_template, sizeof(dev_template), 0444);
MODULE_PARM_DESC(dev_template, "Prefix for network device name (default: "
"wlan%d)");
#ifdef final_version
#define EXTRA_EVENTS_WTERR 0
#else
/* check WTERR events (Wait Time-out) in development versions */
#define EXTRA_EVENTS_WTERR HFA384X_EV_WTERR
#endif
/* Events that will be using BAP0 */
#define HFA384X_BAP0_EVENTS \
(HFA384X_EV_TXEXC | HFA384X_EV_RX | HFA384X_EV_INFO | HFA384X_EV_TX)
/* event mask, i.e., events that will result in an interrupt */
#define HFA384X_EVENT_MASK \
(HFA384X_BAP0_EVENTS | HFA384X_EV_ALLOC | HFA384X_EV_INFDROP | \
HFA384X_EV_CMD | HFA384X_EV_TICK | \
EXTRA_EVENTS_WTERR)
/* Default TX control flags: use 802.11 headers and request interrupt for
* failed transmits. Frames that request ACK callback, will add
* _TX_OK flag and _ALT_RTRY flag may be used to select different retry policy.
*/
#define HFA384X_TX_CTRL_FLAGS \
(HFA384X_TX_CTRL_802_11 | HFA384X_TX_CTRL_TX_EX)
/* ca. 1 usec */
#define HFA384X_CMD_BUSY_TIMEOUT 5000
#define HFA384X_BAP_BUSY_TIMEOUT 50000
/* ca. 10 usec */
#define HFA384X_CMD_COMPL_TIMEOUT 20000
#define HFA384X_DL_COMPL_TIMEOUT 1000000
/* Wait times for initialization; yield to other processes to avoid busy
* waiting for long time. */
#define HFA384X_INIT_TIMEOUT (HZ / 2) /* 500 ms */
#define HFA384X_ALLOC_COMPL_TIMEOUT (HZ / 20) /* 50 ms */
static void prism2_hw_reset(struct net_device *dev);
static void prism2_check_sta_fw_version(local_info_t *local);
#ifdef PRISM2_DOWNLOAD_SUPPORT
/* hostap_download.c */
static int prism2_download_aux_dump(struct net_device *dev,
unsigned int addr, int len, u8 *buf);
static u8 * prism2_read_pda(struct net_device *dev);
static int prism2_download(local_info_t *local,
struct prism2_download_param *param);
static void prism2_download_free_data(struct prism2_download_data *dl);
static int prism2_download_volatile(local_info_t *local,
struct prism2_download_data *param);
static int prism2_download_genesis(local_info_t *local,
struct prism2_download_data *param);
static int prism2_get_ram_size(local_info_t *local);
#endif /* PRISM2_DOWNLOAD_SUPPORT */
#ifndef final_version
/* magic value written to SWSUPPORT0 reg. for detecting whether card is still
* present */
#define HFA384X_MAGIC 0x8A32
#endif
static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
{
return HFA384X_INW(reg);
}
static void hfa384x_read_regs(struct net_device *dev,
struct hfa384x_regs *regs)
{
regs->cmd = HFA384X_INW(HFA384X_CMD_OFF);
regs->evstat = HFA384X_INW(HFA384X_EVSTAT_OFF);
regs->offset0 = HFA384X_INW(HFA384X_OFFSET0_OFF);
regs->offset1 = HFA384X_INW(HFA384X_OFFSET1_OFF);
regs->swsupport0 = HFA384X_INW(HFA384X_SWSUPPORT0_OFF);
}
/**
* __hostap_cmd_queue_free - Free Prism2 command queue entry (private)
* @local: pointer to private Host AP driver data
* @entry: Prism2 command queue entry to be freed
* @del_req: request the entry to be removed
*
* Internal helper function for freeing Prism2 command queue entries.
* Caller must have acquired local->cmdlock before calling this function.
*/
static inline void __hostap_cmd_queue_free(local_info_t *local,
struct hostap_cmd_queue *entry,
int del_req)
{
if (del_req) {
entry->del_req = 1;
if (!list_empty(&entry->list)) {
list_del_init(&entry->list);
local->cmd_queue_len--;
}
}
if (atomic_dec_and_test(&entry->usecnt) && entry->del_req)
kfree(entry);
}
/**
* hostap_cmd_queue_free - Free Prism2 command queue entry
* @local: pointer to private Host AP driver data
* @entry: Prism2 command queue entry to be freed
* @del_req: request the entry to be removed
*
* Free a Prism2 command queue entry.
*/
static inline void hostap_cmd_queue_free(local_info_t *local,
struct hostap_cmd_queue *entry,
int del_req)
{
unsigned long flags;
spin_lock_irqsave(&local->cmdlock, flags);
__hostap_cmd_queue_free(local, entry, del_req);
spin_unlock_irqrestore(&local->cmdlock, flags);
}
/**
* prism2_clear_cmd_queue - Free all pending Prism2 command queue entries
* @local: pointer to private Host AP driver data
*/
static void prism2_clear_cmd_queue(local_info_t *local)
{
struct list_head *ptr, *n;
unsigned long flags;
struct hostap_cmd_queue *entry;
spin_lock_irqsave(&local->cmdlock, flags);
list_for_each_safe(ptr, n, &local->cmd_queue) {
entry = list_entry(ptr, struct hostap_cmd_queue, list);
atomic_inc(&entry->usecnt);
printk(KERN_DEBUG "%s: removed pending cmd_queue entry "
"(type=%d, cmd=0x%04x, param0=0x%04x)\n",
local->dev->name, entry->type, entry->cmd,
entry->param0);
__hostap_cmd_queue_free(local, entry, 1);
}
if (local->cmd_queue_len) {
/* This should not happen; print debug message and clear
* queue length. */
printk(KERN_DEBUG "%s: cmd_queue_len (%d) not zero after "
"flush\n", local->dev->name, local->cmd_queue_len);
local->cmd_queue_len = 0;
}
spin_unlock_irqrestore(&local->cmdlock, flags);
}
/**
* hfa384x_cmd_issue - Issue a Prism2 command to the hardware
* @dev: pointer to net_device
* @entry: Prism2 command queue entry to be issued
*/
static int hfa384x_cmd_issue(struct net_device *dev,
struct hostap_cmd_queue *entry)
{
struct hostap_interface *iface;
local_info_t *local;
int tries;
u16 reg;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
if (local->func->card_present && !local->func->card_present(local))
return -ENODEV;
if (entry->issued) {
printk(KERN_DEBUG "%s: driver bug - re-issuing command @%p\n",
dev->name, entry);
}
/* wait until busy bit is clear; this should always be clear since the
* commands are serialized */
tries = HFA384X_CMD_BUSY_TIMEOUT;
while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
tries--;
udelay(1);
}
#ifndef final_version
if (tries != HFA384X_CMD_BUSY_TIMEOUT) {
prism2_io_debug_error(dev, 1);
printk(KERN_DEBUG "%s: hfa384x_cmd_issue: cmd reg was busy "
"for %d usec\n", dev->name,
HFA384X_CMD_BUSY_TIMEOUT - tries);
}
#endif
if (tries == 0) {
reg = HFA384X_INW(HFA384X_CMD_OFF);
prism2_io_debug_error(dev, 2);
printk(KERN_DEBUG "%s: hfa384x_cmd_issue - timeout - "
"reg=0x%04x\n", dev->name, reg);
return -ETIMEDOUT;
}
/* write command */
spin_lock_irqsave(&local->cmdlock, flags);
HFA384X_OUTW(entry->param0, HFA384X_PARAM0_OFF);
HFA384X_OUTW(entry->param1, HFA384X_PARAM1_OFF);
HFA384X_OUTW(entry->cmd, HFA384X_CMD_OFF);
entry->issued = 1;
spin_unlock_irqrestore(&local->cmdlock, flags);
return 0;
}
/**
* hfa384x_cmd - Issue a Prism2 command and wait (sleep) for completion
* @dev: pointer to net_device
* @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
* @param0: value for Param0 register
* @param1: value for Param1 register (pointer; %NULL if not used)
* @resp0: pointer for Resp0 data or %NULL if Resp0 is not needed
*
* Issue given command (possibly after waiting in command queue) and sleep
* until the command is completed (or timed out or interrupted). This can be
* called only from user process context.
*/
static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0,
u16 *param1, u16 *resp0)
{
struct hostap_interface *iface;
local_info_t *local;
int err, res, issue, issued = 0;
unsigned long flags;
struct hostap_cmd_queue *entry;
DECLARE_WAITQUEUE(wait, current);
iface = netdev_priv(dev);
local = iface->local;
if (in_interrupt()) {
printk(KERN_DEBUG "%s: hfa384x_cmd called from interrupt "
"context\n", dev->name);
return -1;
}
if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN) {
printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
dev->name);
return -1;
}
if (signal_pending(current))
return -EINTR;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL) {
printk(KERN_DEBUG "%s: hfa384x_cmd - kmalloc failed\n",
dev->name);
return -ENOMEM;
}
atomic_set(&entry->usecnt, 1);
entry->type = CMD_SLEEP;
entry->cmd = cmd;
entry->param0 = param0;
if (param1)
entry->param1 = *param1;
init_waitqueue_head(&entry->compl);
/* prepare to wait for command completion event, but do not sleep yet
*/
add_wait_queue(&entry->compl, &wait);
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&local->cmdlock, flags);
issue = list_empty(&local->cmd_queue);
if (issue)
entry->issuing = 1;
list_add_tail(&entry->list, &local->cmd_queue);
local->cmd_queue_len++;
spin_unlock_irqrestore(&local->cmdlock, flags);
err = 0;
if (!issue)
goto wait_completion;
if (signal_pending(current))
err = -EINTR;
if (!err) {
if (hfa384x_cmd_issue(dev, entry))
err = -ETIMEDOUT;
else
issued = 1;
}
wait_completion:
if (!err && entry->type != CMD_COMPLETED) {
/* sleep until command is completed or timed out */
res = schedule_timeout(2 * HZ);
} else
res = -1;
if (!err && signal_pending(current))
err = -EINTR;
if (err && issued) {
/* the command was issued, so a CmdCompl event should occur
* soon; however, there's a pending signal and
* schedule_timeout() would be interrupted; wait a short period
* of time to avoid removing entry from the list before
* CmdCompl event */
udelay(300);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&entry->compl, &wait);
/* If entry->list is still in the list, it must be removed
* first and in this case prism2_cmd_ev() does not yet have
* local reference to it, and the data can be kfree()'d
* here. If the command completion event is still generated,
* it will be assigned to next (possibly) pending command, but
* the driver will reset the card anyway due to timeout
*
* If the entry is not in the list prism2_cmd_ev() has a local
* reference to it, but keeps cmdlock as long as the data is
* needed, so the data can be kfree()'d here. */
/* FIX: if the entry->list is in the list, it has not been completed
* yet, so removing it here is somewhat wrong.. this could cause
* references to freed memory and next list_del() causing NULL pointer
* dereference.. it would probably be better to leave the entry in the
* list and the list should be emptied during hw reset */
spin_lock_irqsave(&local->cmdlock, flags);
if (!list_empty(&entry->list)) {
printk(KERN_DEBUG "%s: hfa384x_cmd: entry still in list? "
"(entry=%p, type=%d, res=%d)\n", dev->name, entry,
entry->type, res);
list_del_init(&entry->list);
local->cmd_queue_len--;
}
spin_unlock_irqrestore(&local->cmdlock, flags);
if (err) {
printk(KERN_DEBUG "%s: hfa384x_cmd: interrupted; err=%d\n",
dev->name, err);
res = err;
goto done;
}
if (entry->type != CMD_COMPLETED) {
u16 reg = HFA384X_INW(HFA384X_EVSTAT_OFF);
printk(KERN_DEBUG "%s: hfa384x_cmd: command was not "
"completed (res=%d, entry=%p, type=%d, cmd=0x%04x, "
"param0=0x%04x, EVSTAT=%04x INTEN=%04x)\n", dev->name,
res, entry, entry->type, entry->cmd, entry->param0, reg,
HFA384X_INW(HFA384X_INTEN_OFF));
if (reg & HFA384X_EV_CMD) {
/* Command completion event is pending, but the
* interrupt was not delivered - probably an issue
* with pcmcia-cs configuration. */
printk(KERN_WARNING "%s: interrupt delivery does not "
"seem to work\n", dev->name);
}
prism2_io_debug_error(dev, 3);
res = -ETIMEDOUT;
goto done;
}
if (resp0 != NULL)
*resp0 = entry->resp0;
#ifndef final_version
if (entry->res) {
printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x, "
"resp0=0x%04x\n",
dev->name, cmd, entry->res, entry->resp0);
}
#endif /* final_version */
res = entry->res;
done:
hostap_cmd_queue_free(local, entry, 1);
return res;
}
/**
* hfa384x_cmd_callback - Issue a Prism2 command; callback when completed
* @dev: pointer to net_device
* @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
* @param0: value for Param0 register
* @callback: command completion callback function (%NULL = no callback)
* @context: context data to be given to the callback function
*
* Issue given command (possibly after waiting in command queue) and use
* callback function to indicate command completion. This can be called both
* from user and interrupt context. The callback function will be called in
* hardware IRQ context. It can be %NULL, when no function is called when
* command is completed.
*/
static int hfa384x_cmd_callback(struct net_device *dev, u16 cmd, u16 param0,
void (*callback)(struct net_device *dev,
long context, u16 resp0,
u16 status),
long context)
{
struct hostap_interface *iface;
local_info_t *local;
int issue, ret;
unsigned long flags;
struct hostap_cmd_queue *entry;
iface = netdev_priv(dev);
local = iface->local;
if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN + 2) {
printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
dev->name);
return -1;
}
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL) {
printk(KERN_DEBUG "%s: hfa384x_cmd_callback - kmalloc "
"failed\n", dev->name);
return -ENOMEM;
}
atomic_set(&entry->usecnt, 1);
entry->type = CMD_CALLBACK;
entry->cmd = cmd;
entry->param0 = param0;
entry->callback = callback;
entry->context = context;
spin_lock_irqsave(&local->cmdlock, flags);
issue = list_empty(&local->cmd_queue);
if (issue)
entry->issuing = 1;
list_add_tail(&entry->list, &local->cmd_queue);
local->cmd_queue_len++;
spin_unlock_irqrestore(&local->cmdlock, flags);
if (issue && hfa384x_cmd_issue(dev, entry))
ret = -ETIMEDOUT;
else
ret = 0;
hostap_cmd_queue_free(local, entry, ret);
return ret;
}
/**
* __hfa384x_cmd_no_wait - Issue a Prism2 command (private)
* @dev: pointer to net_device
* @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
* @param0: value for Param0 register
* @io_debug_num: I/O debug error number
*
* Shared helper function for hfa384x_cmd_wait() and hfa384x_cmd_no_wait().
*/
static int __hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd, u16 param0,
int io_debug_num)
{
int tries;
u16 reg;
/* wait until busy bit is clear; this should always be clear since the
* commands are serialized */
tries = HFA384X_CMD_BUSY_TIMEOUT;
while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
tries--;
udelay(1);
}
if (tries == 0) {
reg = HFA384X_INW(HFA384X_CMD_OFF);
prism2_io_debug_error(dev, io_debug_num);
printk(KERN_DEBUG "%s: __hfa384x_cmd_no_wait(%d) - timeout - "
"reg=0x%04x\n", dev->name, io_debug_num, reg);
return -ETIMEDOUT;
}
/* write command */
HFA384X_OUTW(param0, HFA384X_PARAM0_OFF);
HFA384X_OUTW(cmd, HFA384X_CMD_OFF);
return 0;
}
/**
* hfa384x_cmd_wait - Issue a Prism2 command and busy wait for completion
* @dev: pointer to net_device
* @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
* @param0: value for Param0 register
*/
static int hfa384x_cmd_wait(struct net_device *dev, u16 cmd, u16 param0)
{
int res, tries;
u16 reg;
res = __hfa384x_cmd_no_wait(dev, cmd, param0, 4);
if (res)
return res;
/* wait for command completion */
if ((cmd & HFA384X_CMDCODE_MASK) == HFA384X_CMDCODE_DOWNLOAD)
tries = HFA384X_DL_COMPL_TIMEOUT;
else
tries = HFA384X_CMD_COMPL_TIMEOUT;
while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) &&
tries > 0) {
tries--;
udelay(10);
}
if (tries == 0) {
reg = HFA384X_INW(HFA384X_EVSTAT_OFF);
prism2_io_debug_error(dev, 5);
printk(KERN_DEBUG "%s: hfa384x_cmd_wait - timeout2 - "
"reg=0x%04x\n", dev->name, reg);
return -ETIMEDOUT;
}
res = (HFA384X_INW(HFA384X_STATUS_OFF) &
(BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) | BIT(9) |
BIT(8))) >> 8;
#ifndef final_version
if (res) {
printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x\n",
dev->name, cmd, res);
}
#endif
HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
return res;
}
/**
* hfa384x_cmd_no_wait - Issue a Prism2 command; do not wait for completion
* @dev: pointer to net_device
* @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
* @param0: value for Param0 register
*/
static inline int hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd,
u16 param0)
{
return __hfa384x_cmd_no_wait(dev, cmd, param0, 6);
}
/**
* prism2_cmd_ev - Prism2 command completion event handler
* @dev: pointer to net_device
*
* Interrupt handler for command completion events. Called by the main
* interrupt handler in hardware IRQ context. Read Resp0 and status registers
* from the hardware and ACK the event. Depending on the issued command type
* either wake up the sleeping process that is waiting for command completion
* or call the callback function. Issue the next command, if one is pending.
*/
static void prism2_cmd_ev(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
struct hostap_cmd_queue *entry = NULL;
iface = netdev_priv(dev);
local = iface->local;
spin_lock(&local->cmdlock);
if (!list_empty(&local->cmd_queue)) {
entry = list_entry(local->cmd_queue.next,
struct hostap_cmd_queue, list);
atomic_inc(&entry->usecnt);
list_del_init(&entry->list);
local->cmd_queue_len--;
if (!entry->issued) {
printk(KERN_DEBUG "%s: Command completion event, but "
"cmd not issued\n", dev->name);
__hostap_cmd_queue_free(local, entry, 1);
entry = NULL;
}
}
spin_unlock(&local->cmdlock);
if (!entry) {
HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
printk(KERN_DEBUG "%s: Command completion event, but no "
"pending commands\n", dev->name);
return;
}
entry->resp0 = HFA384X_INW(HFA384X_RESP0_OFF);
entry->res = (HFA384X_INW(HFA384X_STATUS_OFF) &
(BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) |
BIT(9) | BIT(8))) >> 8;
HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
/* TODO: rest of the CmdEv handling could be moved to tasklet */
if (entry->type == CMD_SLEEP) {
entry->type = CMD_COMPLETED;
wake_up_interruptible(&entry->compl);
} else if (entry->type == CMD_CALLBACK) {
if (entry->callback)
entry->callback(dev, entry->context, entry->resp0,
entry->res);
} else {
printk(KERN_DEBUG "%s: Invalid command completion type %d\n",
dev->name, entry->type);
}
hostap_cmd_queue_free(local, entry, 1);
/* issue next command, if pending */
entry = NULL;
spin_lock(&local->cmdlock);
if (!list_empty(&local->cmd_queue)) {
entry = list_entry(local->cmd_queue.next,
struct hostap_cmd_queue, list);
if (entry->issuing) {
/* hfa384x_cmd() has already started issuing this
* command, so do not start here */
entry = NULL;
}
if (entry)
atomic_inc(&entry->usecnt);
}
spin_unlock(&local->cmdlock);
if (entry) {
/* issue next command; if command issuing fails, remove the
* entry from cmd_queue */
int res = hfa384x_cmd_issue(dev, entry);
spin_lock(&local->cmdlock);
__hostap_cmd_queue_free(local, entry, res);
spin_unlock(&local->cmdlock);
}
}
static int hfa384x_wait_offset(struct net_device *dev, u16 o_off)
{
int tries = HFA384X_BAP_BUSY_TIMEOUT;
int res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY;
while (res && tries > 0) {
tries--;
udelay(1);
res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY;
}
return res;
}
/* Offset must be even */
static int hfa384x_setup_bap(struct net_device *dev, u16 bap, u16 id,
int offset)
{
u16 o_off, s_off;
int ret = 0;
if (offset % 2 || bap > 1)
return -EINVAL;
if (bap == BAP1) {
o_off = HFA384X_OFFSET1_OFF;
s_off = HFA384X_SELECT1_OFF;
} else {
o_off = HFA384X_OFFSET0_OFF;
s_off = HFA384X_SELECT0_OFF;
}
if (hfa384x_wait_offset(dev, o_off)) {
prism2_io_debug_error(dev, 7);
printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout before\n",
dev->name);
ret = -ETIMEDOUT;
goto out;
}
HFA384X_OUTW(id, s_off);
HFA384X_OUTW(offset, o_off);
if (hfa384x_wait_offset(dev, o_off)) {
prism2_io_debug_error(dev, 8);
printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout after\n",
dev->name);
ret = -ETIMEDOUT;
goto out;
}
#ifndef final_version
if (HFA384X_INW(o_off) & HFA384X_OFFSET_ERR) {
prism2_io_debug_error(dev, 9);
printk(KERN_DEBUG "%s: hfa384x_setup_bap - offset error "
"(%d,0x04%x,%d); reg=0x%04x\n",
dev->name, bap, id, offset, HFA384X_INW(o_off));
ret = -EINVAL;
}
#endif
out:
return ret;
}
static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
int exact_len)
{
struct hostap_interface *iface;
local_info_t *local;
int res, rlen = 0;
struct hfa384x_rid_hdr rec;
iface = netdev_priv(dev);
local = iface->local;
if (local->no_pri) {
printk(KERN_DEBUG "%s: cannot get RID %04x (len=%d) - no PRI "
"f/w\n", dev->name, rid, len);
return -ENOTTY; /* Well.. not really correct, but return
* something unique enough.. */
}
if ((local->func->card_present && !local->func->card_present(local)) ||
local->hw_downloading)
return -ENODEV;
res = mutex_lock_interruptible(&local->rid_bap_mtx);
if (res)
return res;
res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS, rid, NULL, NULL);
if (res) {
printk(KERN_DEBUG "%s: hfa384x_get_rid: CMDCODE_ACCESS failed "
"(res=%d, rid=%04x, len=%d)\n",
dev->name, res, rid, len);
mutex_unlock(&local->rid_bap_mtx);
return res;
}
spin_lock_bh(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, rid, 0);
if (!res)
res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
if (le16_to_cpu(rec.len) == 0) {
/* RID not available */
res = -ENODATA;
}
rlen = (le16_to_cpu(rec.len) - 1) * 2;
if (!res && exact_len && rlen != len) {
printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
"rid=0x%04x, len=%d (expected %d)\n",
dev->name, rid, rlen, len);
res = -ENODATA;
}
if (!res)
res = hfa384x_from_bap(dev, BAP0, buf, len);
spin_unlock_bh(&local->baplock);
mutex_unlock(&local->rid_bap_mtx);
if (res) {
if (res != -ENODATA)
printk(KERN_DEBUG "%s: hfa384x_get_rid (rid=%04x, "
"len=%d) - failed - res=%d\n", dev->name, rid,
len, res);
if (res == -ETIMEDOUT)
prism2_hw_reset(dev);
return res;
}
return rlen;
}
static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len)
{
struct hostap_interface *iface;
local_info_t *local;
struct hfa384x_rid_hdr rec;
int res;
iface = netdev_priv(dev);
local = iface->local;
if (local->no_pri) {
printk(KERN_DEBUG "%s: cannot set RID %04x (len=%d) - no PRI "
"f/w\n", dev->name, rid, len);
return -ENOTTY; /* Well.. not really correct, but return
* something unique enough.. */
}
if ((local->func->card_present && !local->func->card_present(local)) ||
local->hw_downloading)
return -ENODEV;
rec.rid = cpu_to_le16(rid);
/* RID len in words and +1 for rec.rid */
rec.len = cpu_to_le16(len / 2 + len % 2 + 1);
res = mutex_lock_interruptible(&local->rid_bap_mtx);
if (res)
return res;
spin_lock_bh(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, rid, 0);
if (!res)
res = hfa384x_to_bap(dev, BAP0, &rec, sizeof(rec));
if (!res)
res = hfa384x_to_bap(dev, BAP0, buf, len);
spin_unlock_bh(&local->baplock);
if (res) {
printk(KERN_DEBUG "%s: hfa384x_set_rid (rid=%04x, len=%d) - "
"failed - res=%d\n", dev->name, rid, len, res);
mutex_unlock(&local->rid_bap_mtx);
return res;
}
res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL);
mutex_unlock(&local->rid_bap_mtx);
if (res) {
printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE "
"failed (res=%d, rid=%04x, len=%d)\n",
dev->name, res, rid, len);
if (res == -ETIMEDOUT)
prism2_hw_reset(dev);
}
return res;
}
static void hfa384x_disable_interrupts(struct net_device *dev)
{
/* disable interrupts and clear event status */
HFA384X_OUTW(0, HFA384X_INTEN_OFF);
HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
}
static void hfa384x_enable_interrupts(struct net_device *dev)
{
/* ack pending events and enable interrupts from selected events */
HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF);
}
static void hfa384x_events_no_bap0(struct net_device *dev)
{
HFA384X_OUTW(HFA384X_EVENT_MASK & ~HFA384X_BAP0_EVENTS,
HFA384X_INTEN_OFF);
}
static void hfa384x_events_all(struct net_device *dev)
{
HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF);
}
static void hfa384x_events_only_cmd(struct net_device *dev)
{
HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_INTEN_OFF);
}
static u16 hfa384x_allocate_fid(struct net_device *dev, int len)
{
u16 fid;
unsigned long delay;
/* FIX: this could be replace with hfa384x_cmd() if the Alloc event
* below would be handled like CmdCompl event (sleep here, wake up from
* interrupt handler */
if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_ALLOC, len)) {
printk(KERN_DEBUG "%s: cannot allocate fid, len=%d\n",
dev->name, len);
return 0xffff;
}
delay = jiffies + HFA384X_ALLOC_COMPL_TIMEOUT;
while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC) &&
time_before(jiffies, delay))
yield();
if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC)) {
printk("%s: fid allocate, len=%d - timeout\n", dev->name, len);
return 0xffff;
}
fid = HFA384X_INW(HFA384X_ALLOCFID_OFF);
HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF);
return fid;
}
static int prism2_reset_port(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
int res;
iface = netdev_priv(dev);
local = iface->local;
if (!local->dev_enabled)
return 0;
res = hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0,
NULL, NULL);
if (res)
printk(KERN_DEBUG "%s: reset port failed to disable port\n",
dev->name);
else {
res = hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0,
NULL, NULL);
if (res)
printk(KERN_DEBUG "%s: reset port failed to enable "
"port\n", dev->name);
}
/* It looks like at least some STA firmware versions reset
* fragmentation threshold back to 2346 after enable command. Restore
* the configured value, if it differs from this default. */
if (local->fragm_threshold != 2346 &&
hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
local->fragm_threshold)) {
printk(KERN_DEBUG "%s: failed to restore fragmentation "
"threshold (%d) after Port0 enable\n",
dev->name, local->fragm_threshold);
}
/* Some firmwares lose antenna selection settings on reset */
(void) hostap_set_antsel(local);
return res;
}
static int prism2_get_version_info(struct net_device *dev, u16 rid,
const char *txt)
{
struct hfa384x_comp_ident comp;
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
if (local->no_pri) {
/* PRI f/w not yet available - cannot read RIDs */
return -1;
}
if (hfa384x_get_rid(dev, rid, &comp, sizeof(comp), 1) < 0) {
printk(KERN_DEBUG "Could not get RID for component %s\n", txt);
return -1;
}
printk(KERN_INFO "%s: %s: id=0x%02x v%d.%d.%d\n", dev->name, txt,
__le16_to_cpu(comp.id), __le16_to_cpu(comp.major),
__le16_to_cpu(comp.minor), __le16_to_cpu(comp.variant));
return 0;
}
static int prism2_setup_rids(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
__le16 tmp;
int ret = 0;
iface = netdev_priv(dev);
local = iface->local;
hostap_set_word(dev, HFA384X_RID_TICKTIME, 2000);
if (!local->fw_ap) {
u16 tmp1 = hostap_get_porttype(local);
ret = hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, tmp1);
if (ret) {
printk("%s: Port type setting to %d failed\n",
dev->name, tmp1);
goto fail;
}
}
/* Setting SSID to empty string seems to kill the card in Host AP mode
*/
if (local->iw_mode != IW_MODE_MASTER || local->essid[0] != '\0') {
ret = hostap_set_string(dev, HFA384X_RID_CNFOWNSSID,
local->essid);
if (ret) {
printk("%s: AP own SSID setting failed\n", dev->name);
goto fail;
}
}
ret = hostap_set_word(dev, HFA384X_RID_CNFMAXDATALEN,
PRISM2_DATA_MAXLEN);
if (ret) {
printk("%s: MAC data length setting to %d failed\n",
dev->name, PRISM2_DATA_MAXLEN);
goto fail;
}
if (hfa384x_get_rid(dev, HFA384X_RID_CHANNELLIST, &tmp, 2, 1) < 0) {
printk("%s: Channel list read failed\n", dev->name);
ret = -EINVAL;
goto fail;
}
local->channel_mask = le16_to_cpu(tmp);
if (local->channel < 1 || local->channel > 14 ||
!(local->channel_mask & (1 << (local->channel - 1)))) {
printk(KERN_WARNING "%s: Channel setting out of range "
"(%d)!\n", dev->name, local->channel);
ret = -EBUSY;
goto fail;
}
ret = hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel);
if (ret) {
printk("%s: Channel setting to %d failed\n",
dev->name, local->channel);
goto fail;
}
ret = hostap_set_word(dev, HFA384X_RID_CNFBEACONINT,
local->beacon_int);
if (ret) {
printk("%s: Beacon interval setting to %d failed\n",
dev->name, local->beacon_int);
/* this may fail with Symbol/Lucent firmware */
if (ret == -ETIMEDOUT)
goto fail;
}
ret = hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD,
local->dtim_period);
if (ret) {
printk("%s: DTIM period setting to %d failed\n",
dev->name, local->dtim_period);
/* this may fail with Symbol/Lucent firmware */
if (ret == -ETIMEDOUT)
goto fail;
}
ret = hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
local->is_promisc);
if (ret)
printk(KERN_INFO "%s: Setting promiscuous mode (%d) failed\n",
dev->name, local->is_promisc);
if (!local->fw_ap) {
ret = hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID,
local->essid);
if (ret) {
printk("%s: Desired SSID setting failed\n", dev->name);
goto fail;
}
}
/* Setup TXRateControl, defaults to allow use of 1, 2, 5.5, and
* 11 Mbps in automatic TX rate fallback and 1 and 2 Mbps as basic
* rates */
if (local->tx_rate_control == 0) {
local->tx_rate_control =
HFA384X_RATES_1MBPS |
HFA384X_RATES_2MBPS |
HFA384X_RATES_5MBPS |
HFA384X_RATES_11MBPS;
}
if (local->basic_rates == 0)
local->basic_rates = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS;
if (!local->fw_ap) {
ret = hostap_set_word(dev, HFA384X_RID_TXRATECONTROL,
local->tx_rate_control);
if (ret) {
printk("%s: TXRateControl setting to %d failed\n",
dev->name, local->tx_rate_control);
goto fail;
}
ret = hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES,
local->tx_rate_control);
if (ret) {
printk("%s: cnfSupportedRates setting to %d failed\n",
dev->name, local->tx_rate_control);
}
ret = hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
local->basic_rates);
if (ret) {
printk("%s: cnfBasicRates setting to %d failed\n",
dev->name, local->basic_rates);
}
ret = hostap_set_word(dev, HFA384X_RID_CREATEIBSS, 1);
if (ret) {
printk("%s: Create IBSS setting to 1 failed\n",
dev->name);
}
}
if (local->name_set)
(void) hostap_set_string(dev, HFA384X_RID_CNFOWNNAME,
local->name);
if (hostap_set_encryption(local)) {
printk(KERN_INFO "%s: could not configure encryption\n",
dev->name);
}
(void) hostap_set_antsel(local);
if (hostap_set_roaming(local)) {
printk(KERN_INFO "%s: could not set host roaming\n",
dev->name);
}
if (local->sta_fw_ver >= PRISM2_FW_VER(1,6,3) &&
hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY, local->enh_sec))
printk(KERN_INFO "%s: cnfEnhSecurity setting to 0x%x failed\n",
dev->name, local->enh_sec);
/* 32-bit tallies were added in STA f/w 0.8.0, but they were apparently
* not working correctly (last seven counters report bogus values).
* This has been fixed in 0.8.2, so enable 32-bit tallies only
* beginning with that firmware version. Another bug fix for 32-bit
* tallies in 1.4.0; should 16-bit tallies be used for some other
* versions, too? */
if (local->sta_fw_ver >= PRISM2_FW_VER(0,8,2)) {
if (hostap_set_word(dev, HFA384X_RID_CNFTHIRTY2TALLY, 1)) {
printk(KERN_INFO "%s: cnfThirty2Tally setting "
"failed\n", dev->name);
local->tallies32 = 0;
} else
local->tallies32 = 1;
} else
local->tallies32 = 0;
hostap_set_auth_algs(local);
if (hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
local->fragm_threshold)) {
printk(KERN_INFO "%s: setting FragmentationThreshold to %d "
"failed\n", dev->name, local->fragm_threshold);
}
if (hostap_set_word(dev, HFA384X_RID_RTSTHRESHOLD,
local->rts_threshold)) {
printk(KERN_INFO "%s: setting RTSThreshold to %d failed\n",
dev->name, local->rts_threshold);
}
if (local->manual_retry_count >= 0 &&
hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT,
local->manual_retry_count)) {
printk(KERN_INFO "%s: setting cnfAltRetryCount to %d failed\n",
dev->name, local->manual_retry_count);
}
if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1) &&
hfa384x_get_rid(dev, HFA384X_RID_CNFDBMADJUST, &tmp, 2, 1) == 2) {
local->rssi_to_dBm = le16_to_cpu(tmp);
}
if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->wpa &&
hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1)) {
printk(KERN_INFO "%s: setting ssnHandlingMode to 1 failed\n",
dev->name);
}
if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->generic_elem &&
hfa384x_set_rid(dev, HFA384X_RID_GENERICELEMENT,
local->generic_elem, local->generic_elem_len)) {
printk(KERN_INFO "%s: setting genericElement failed\n",
dev->name);
}
fail:
return ret;
}
static int prism2_hw_init(struct net_device *dev, int initial)
{
struct hostap_interface *iface;
local_info_t *local;
int ret, first = 1;
unsigned long start, delay;
PDEBUG(DEBUG_FLOW, "prism2_hw_init()\n");
iface = netdev_priv(dev);
local = iface->local;
clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits);
init:
/* initialize HFA 384x */
ret = hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_INIT, 0);
if (ret) {
printk(KERN_INFO "%s: first command failed - assuming card "
"does not have primary firmware\n", dev_info);
}
if (first && (HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) {
/* EvStat has Cmd bit set in some cases, so retry once if no
* wait was needed */
HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
printk(KERN_DEBUG "%s: init command completed too quickly - "
"retrying\n", dev->name);
first = 0;
goto init;
}
start = jiffies;
delay = jiffies + HFA384X_INIT_TIMEOUT;
while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) &&
time_before(jiffies, delay))
yield();
if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) {
printk(KERN_DEBUG "%s: assuming no Primary image in "
"flash - card initialization not completed\n",
dev_info);
local->no_pri = 1;
#ifdef PRISM2_DOWNLOAD_SUPPORT
if (local->sram_type == -1)
local->sram_type = prism2_get_ram_size(local);
#endif /* PRISM2_DOWNLOAD_SUPPORT */
return 1;
}
local->no_pri = 0;
printk(KERN_DEBUG "prism2_hw_init: initialized in %lu ms\n",
(jiffies - start) * 1000 / HZ);
HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
return 0;
}
static int prism2_hw_init2(struct net_device *dev, int initial)
{
struct hostap_interface *iface;
local_info_t *local;
int i;
iface = netdev_priv(dev);
local = iface->local;
#ifdef PRISM2_DOWNLOAD_SUPPORT
kfree(local->pda);
if (local->no_pri)
local->pda = NULL;
else
local->pda = prism2_read_pda(dev);
#endif /* PRISM2_DOWNLOAD_SUPPORT */
hfa384x_disable_interrupts(dev);
#ifndef final_version
HFA384X_OUTW(HFA384X_MAGIC, HFA384X_SWSUPPORT0_OFF);
if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) {
printk("SWSUPPORT0 write/read failed: %04X != %04X\n",
HFA384X_INW(HFA384X_SWSUPPORT0_OFF), HFA384X_MAGIC);
goto failed;
}
#endif
if (initial || local->pri_only) {
hfa384x_events_only_cmd(dev);
/* get card version information */
if (prism2_get_version_info(dev, HFA384X_RID_NICID, "NIC") ||
prism2_get_version_info(dev, HFA384X_RID_PRIID, "PRI")) {
hfa384x_disable_interrupts(dev);
goto failed;
}
if (prism2_get_version_info(dev, HFA384X_RID_STAID, "STA")) {
printk(KERN_DEBUG "%s: Failed to read STA f/w version "
"- only Primary f/w present\n", dev->name);
local->pri_only = 1;
return 0;
}
local->pri_only = 0;
hfa384x_disable_interrupts(dev);
}
/* FIX: could convert allocate_fid to use sleeping CmdCompl wait and
* enable interrupts before this. This would also require some sort of
* sleeping AllocEv waiting */
/* allocate TX FIDs */
local->txfid_len = PRISM2_TXFID_LEN;
for (i = 0; i < PRISM2_TXFID_COUNT; i++) {
local->txfid[i] = hfa384x_allocate_fid(dev, local->txfid_len);
if (local->txfid[i] == 0xffff && local->txfid_len > 1600) {
local->txfid[i] = hfa384x_allocate_fid(dev, 1600);
if (local->txfid[i] != 0xffff) {
printk(KERN_DEBUG "%s: Using shorter TX FID "
"(1600 bytes)\n", dev->name);
local->txfid_len = 1600;
}
}
if (local->txfid[i] == 0xffff)
goto failed;
local->intransmitfid[i] = PRISM2_TXFID_EMPTY;
}
hfa384x_events_only_cmd(dev);
if (initial) {
struct list_head *ptr;
prism2_check_sta_fw_version(local);
if (hfa384x_get_rid(dev, HFA384X_RID_CNFOWNMACADDR,
dev->dev_addr, 6, 1) < 0) {
printk("%s: could not get own MAC address\n",
dev->name);
}
list_for_each(ptr, &local->hostap_interfaces) {
iface = list_entry(ptr, struct hostap_interface, list);
memcpy(iface->dev->dev_addr, dev->dev_addr, ETH_ALEN);
}
} else if (local->fw_ap)
prism2_check_sta_fw_version(local);
prism2_setup_rids(dev);
/* MAC is now configured, but port 0 is not yet enabled */
return 0;
failed:
if (!local->no_pri)
printk(KERN_WARNING "%s: Initialization failed\n", dev_info);
return 1;
}
static int prism2_hw_enable(struct net_device *dev, int initial)
{
struct hostap_interface *iface;
local_info_t *local;
int was_resetting;
iface = netdev_priv(dev);
local = iface->local;
was_resetting = local->hw_resetting;
if (hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL, NULL)) {
printk("%s: MAC port 0 enabling failed\n", dev->name);
return 1;
}
local->hw_ready = 1;
local->hw_reset_tries = 0;
local->hw_resetting = 0;
hfa384x_enable_interrupts(dev);
/* at least D-Link DWL-650 seems to require additional port reset
* before it starts acting as an AP, so reset port automatically
* here just in case */
if (initial && prism2_reset_port(dev)) {
printk("%s: MAC port 0 reseting failed\n", dev->name);
return 1;
}
if (was_resetting && netif_queue_stopped(dev)) {
/* If hw_reset() was called during pending transmit, netif
* queue was stopped. Wake it up now since the wlan card has
* been resetted. */
netif_wake_queue(dev);
}
return 0;
}
static int prism2_hw_config(struct net_device *dev, int initial)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
if (local->hw_downloading)
return 1;
if (prism2_hw_init(dev, initial)) {
return local->no_pri ? 0 : 1;
}
if (prism2_hw_init2(dev, initial))
return 1;
/* Enable firmware if secondary image is loaded and at least one of the
* netdevices is up. */
if (!local->pri_only &&
(initial == 0 || (initial == 2 && local->num_dev_open > 0))) {
if (!local->dev_enabled)
prism2_callback(local, PRISM2_CALLBACK_ENABLE);
local->dev_enabled = 1;
return prism2_hw_enable(dev, initial);
}
return 0;
}
static void prism2_hw_shutdown(struct net_device *dev, int no_disable)
{
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
/* Allow only command completion events during disable */
hfa384x_events_only_cmd(dev);
local->hw_ready = 0;
if (local->dev_enabled)
prism2_callback(local, PRISM2_CALLBACK_DISABLE);
local->dev_enabled = 0;
if (local->func->card_present && !local->func->card_present(local)) {
printk(KERN_DEBUG "%s: card already removed or not configured "
"during shutdown\n", dev->name);
return;
}
if ((no_disable & HOSTAP_HW_NO_DISABLE) == 0 &&
hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL, NULL))
printk(KERN_WARNING "%s: Shutdown failed\n", dev_info);
hfa384x_disable_interrupts(dev);
if (no_disable & HOSTAP_HW_ENABLE_CMDCOMPL)
hfa384x_events_only_cmd(dev);
else
prism2_clear_cmd_queue(local);
}
static void prism2_hw_reset(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
#if 0
static long last_reset = 0;
/* do not reset card more than once per second to avoid ending up in a
* busy loop reseting the card */
if (time_before_eq(jiffies, last_reset + HZ))
return;
last_reset = jiffies;
#endif
iface = netdev_priv(dev);
local = iface->local;
if (in_interrupt()) {
printk(KERN_DEBUG "%s: driver bug - prism2_hw_reset() called "
"in interrupt context\n", dev->name);
return;
}
if (local->hw_downloading)
return;
if (local->hw_resetting) {
printk(KERN_WARNING "%s: %s: already resetting card - "
"ignoring reset request\n", dev_info, dev->name);
return;
}
local->hw_reset_tries++;
if (local->hw_reset_tries > 10) {
printk(KERN_WARNING "%s: too many reset tries, skipping\n",
dev->name);
return;
}
printk(KERN_WARNING "%s: %s: resetting card\n", dev_info, dev->name);
hfa384x_disable_interrupts(dev);
local->hw_resetting = 1;
if (local->func->cor_sreset) {
/* Host system seems to hang in some cases with high traffic
* load or shared interrupts during COR sreset. Disable shared
* interrupts during reset to avoid these crashes. COS sreset
* takes quite a long time, so it is unfortunate that this
* seems to be needed. Anyway, I do not know of any better way
* of avoiding the crash. */
disable_irq(dev->irq);
local->func->cor_sreset(local);
enable_irq(dev->irq);
}
prism2_hw_shutdown(dev, 1);
prism2_hw_config(dev, 0);
local->hw_resetting = 0;
#ifdef PRISM2_DOWNLOAD_SUPPORT
if (local->dl_pri) {
printk(KERN_DEBUG "%s: persistent download of primary "
"firmware\n", dev->name);
if (prism2_download_genesis(local, local->dl_pri) < 0)
printk(KERN_WARNING "%s: download (PRI) failed\n",
dev->name);
}
if (local->dl_sec) {
printk(KERN_DEBUG "%s: persistent download of secondary "
"firmware\n", dev->name);
if (prism2_download_volatile(local, local->dl_sec) < 0)
printk(KERN_WARNING "%s: download (SEC) failed\n",
dev->name);
}
#endif /* PRISM2_DOWNLOAD_SUPPORT */
/* TODO: restore beacon TIM bits for STAs that have buffered frames */
}
static void prism2_schedule_reset(local_info_t *local)
{
schedule_work(&local->reset_queue);
}
/* Called only as scheduled task after noticing card timeout in interrupt
* context */
static void handle_reset_queue(struct work_struct *work)
{
local_info_t *local = container_of(work, local_info_t, reset_queue);
printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
prism2_hw_reset(local->dev);
if (netif_queue_stopped(local->dev)) {
int i;
for (i = 0; i < PRISM2_TXFID_COUNT; i++)
if (local->intransmitfid[i] == PRISM2_TXFID_EMPTY) {
PDEBUG(DEBUG_EXTRA, "prism2_tx_timeout: "
"wake up queue\n");
netif_wake_queue(local->dev);
break;
}
}
}
static int prism2_get_txfid_idx(local_info_t *local)
{
int idx, end;
unsigned long flags;
spin_lock_irqsave(&local->txfidlock, flags);
end = idx = local->next_txfid;
do {
if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) {
local->intransmitfid[idx] = PRISM2_TXFID_RESERVED;
spin_unlock_irqrestore(&local->txfidlock, flags);
return idx;
}
idx++;
if (idx >= PRISM2_TXFID_COUNT)
idx = 0;
} while (idx != end);
spin_unlock_irqrestore(&local->txfidlock, flags);
PDEBUG(DEBUG_EXTRA2, "prism2_get_txfid_idx: no room in txfid buf: "
"packet dropped\n");
local->dev->stats.tx_dropped++;
return -1;
}
/* Called only from hardware IRQ */
static void prism2_transmit_cb(struct net_device *dev, long context,
u16 resp0, u16 res)
{
struct hostap_interface *iface;
local_info_t *local;
int idx = (int) context;
iface = netdev_priv(dev);
local = iface->local;
if (res) {
printk(KERN_DEBUG "%s: prism2_transmit_cb - res=0x%02x\n",
dev->name, res);
return;
}
if (idx < 0 || idx >= PRISM2_TXFID_COUNT) {
printk(KERN_DEBUG "%s: prism2_transmit_cb called with invalid "
"idx=%d\n", dev->name, idx);
return;
}
if (!test_and_clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
printk(KERN_DEBUG "%s: driver bug: prism2_transmit_cb called "
"with no pending transmit\n", dev->name);
}
if (netif_queue_stopped(dev)) {
/* ready for next TX, so wake up queue that was stopped in
* prism2_transmit() */
netif_wake_queue(dev);
}
spin_lock(&local->txfidlock);
/* With reclaim, Resp0 contains new txfid for transmit; the old txfid
* will be automatically allocated for the next TX frame */
local->intransmitfid[idx] = resp0;
PDEBUG(DEBUG_FID, "%s: prism2_transmit_cb: txfid[%d]=0x%04x, "
"resp0=0x%04x, transmit_txfid=0x%04x\n",
dev->name, idx, local->txfid[idx],
resp0, local->intransmitfid[local->next_txfid]);
idx++;
if (idx >= PRISM2_TXFID_COUNT)
idx = 0;
local->next_txfid = idx;
/* check if all TX buffers are occupied */
do {
if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) {
spin_unlock(&local->txfidlock);
return;
}
idx++;
if (idx >= PRISM2_TXFID_COUNT)
idx = 0;
} while (idx != local->next_txfid);
spin_unlock(&local->txfidlock);
/* no empty TX buffers, stop queue */
netif_stop_queue(dev);
}
/* Called only from software IRQ if PCI bus master is not used (with bus master
* this can be called both from software and hardware IRQ) */
static int prism2_transmit(struct net_device *dev, int idx)
{
struct hostap_interface *iface;
local_info_t *local;
int res;
iface = netdev_priv(dev);
local = iface->local;
/* The driver tries to stop netif queue so that there would not be
* more than one attempt to transmit frames going on; check that this
* is really the case */
if (test_and_set_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
printk(KERN_DEBUG "%s: driver bug - prism2_transmit() called "
"when previous TX was pending\n", dev->name);
return -1;
}
/* stop the queue for the time that transmit is pending */
netif_stop_queue(dev);
/* transmit packet */
res = hfa384x_cmd_callback(
dev,
HFA384X_CMDCODE_TRANSMIT | HFA384X_CMD_TX_RECLAIM,
local->txfid[idx],
prism2_transmit_cb, (long) idx);
if (res) {
printk(KERN_DEBUG "%s: prism2_transmit: CMDCODE_TRANSMIT "
"failed (res=%d)\n", dev->name, res);
dev->stats.tx_dropped++;
netif_wake_queue(dev);
return -1;
}
dev->trans_start = jiffies;
/* Since we did not wait for command completion, the card continues
* to process on the background and we will finish handling when
* command completion event is handled (prism2_cmd_ev() function) */
return 0;
}
/* Send IEEE 802.11 frame (convert the header into Prism2 TX descriptor and
* send the payload with this descriptor) */
/* Called only from software IRQ */
static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
struct hfa384x_tx_frame txdesc;
struct hostap_skb_tx_data *meta;
int hdr_len, data_len, idx, res, ret = -1;
u16 tx_control, fc;
iface = netdev_priv(dev);
local = iface->local;
meta = (struct hostap_skb_tx_data *) skb->cb;
prism2_callback(local, PRISM2_CALLBACK_TX_START);
if ((local->func->card_present && !local->func->card_present(local)) ||
!local->hw_ready || local->hw_downloading || local->pri_only) {
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: prism2_tx_80211: hw not ready -"
" skipping\n", dev->name);
}
goto fail;
}
memset(&txdesc, 0, sizeof(txdesc));
/* skb->data starts with txdesc->frame_control */
hdr_len = 24;
skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len);
fc = le16_to_cpu(txdesc.frame_control);
if (ieee80211_is_data(txdesc.frame_control) &&
ieee80211_has_a4(txdesc.frame_control) &&
skb->len >= 30) {
/* Addr4 */
skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4,
ETH_ALEN);
hdr_len += ETH_ALEN;
}
tx_control = local->tx_control;
if (meta->tx_cb_idx) {
tx_control |= HFA384X_TX_CTRL_TX_OK;
txdesc.sw_support = cpu_to_le32(meta->tx_cb_idx);
}
txdesc.tx_control = cpu_to_le16(tx_control);
txdesc.tx_rate = meta->rate;
data_len = skb->len - hdr_len;
txdesc.data_len = cpu_to_le16(data_len);
txdesc.len = cpu_to_be16(data_len);
idx = prism2_get_txfid_idx(local);
if (idx < 0)
goto fail;
if (local->frame_dump & PRISM2_DUMP_TX_HDR)
hostap_dump_tx_header(dev->name, &txdesc);
spin_lock(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, local->txfid[idx], 0);
if (!res)
res = hfa384x_to_bap(dev, BAP0, &txdesc, sizeof(txdesc));
if (!res)
res = hfa384x_to_bap(dev, BAP0, skb->data + hdr_len,
skb->len - hdr_len);
spin_unlock(&local->baplock);
if (!res)
res = prism2_transmit(dev, idx);
if (res) {
printk(KERN_DEBUG "%s: prism2_tx_80211 - to BAP0 failed\n",
dev->name);
local->intransmitfid[idx] = PRISM2_TXFID_EMPTY;
schedule_work(&local->reset_queue);
goto fail;
}
ret = 0;
fail:
prism2_callback(local, PRISM2_CALLBACK_TX_END);
return ret;
}
/* Some SMP systems have reported number of odd errors with hostap_pci. fid
* register has changed values between consecutive reads for an unknown reason.
* This should really not happen, so more debugging is needed. This test
* version is a bit slower, but it will detect most of such register changes
* and will try to get the correct fid eventually. */
#define EXTRA_FID_READ_TESTS
static u16 prism2_read_fid_reg(struct net_device *dev, u16 reg)
{
#ifdef EXTRA_FID_READ_TESTS
u16 val, val2, val3;
int i;
for (i = 0; i < 10; i++) {
val = HFA384X_INW(reg);
val2 = HFA384X_INW(reg);
val3 = HFA384X_INW(reg);
if (val == val2 && val == val3)
return val;
printk(KERN_DEBUG "%s: detected fid change (try=%d, reg=%04x):"
" %04x %04x %04x\n",
dev->name, i, reg, val, val2, val3);
if ((val == val2 || val == val3) && val != 0)
return val;
if (val2 == val3 && val2 != 0)
return val2;
}
printk(KERN_WARNING "%s: Uhhuh.. could not read good fid from reg "
"%04x (%04x %04x %04x)\n", dev->name, reg, val, val2, val3);
return val;
#else /* EXTRA_FID_READ_TESTS */
return HFA384X_INW(reg);
#endif /* EXTRA_FID_READ_TESTS */
}
/* Called only as a tasklet (software IRQ) */
static void prism2_rx(local_info_t *local)
{
struct net_device *dev = local->dev;
int res, rx_pending = 0;
u16 len, hdr_len, rxfid, status, macport;
struct hfa384x_rx_frame rxdesc;
struct sk_buff *skb = NULL;
prism2_callback(local, PRISM2_CALLBACK_RX_START);
rxfid = prism2_read_fid_reg(dev, HFA384X_RXFID_OFF);
#ifndef final_version
if (rxfid == 0) {
rxfid = HFA384X_INW(HFA384X_RXFID_OFF);
printk(KERN_DEBUG "prism2_rx: rxfid=0 (next 0x%04x)\n",
rxfid);
if (rxfid == 0) {
schedule_work(&local->reset_queue);
goto rx_dropped;
}
/* try to continue with the new rxfid value */
}
#endif
spin_lock(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, rxfid, 0);
if (!res)
res = hfa384x_from_bap(dev, BAP0, &rxdesc, sizeof(rxdesc));
if (res) {
spin_unlock(&local->baplock);
printk(KERN_DEBUG "%s: copy from BAP0 failed %d\n", dev->name,
res);
if (res == -ETIMEDOUT) {
schedule_work(&local->reset_queue);
}
goto rx_dropped;
}
len = le16_to_cpu(rxdesc.data_len);
hdr_len = sizeof(rxdesc);
status = le16_to_cpu(rxdesc.status);
macport = (status >> 8) & 0x07;
/* Drop frames with too large reported payload length. Monitor mode
* seems to sometimes pass frames (e.g., ctrl::ack) with signed and
* negative value, so allow also values 65522 .. 65534 (-14 .. -2) for
* macport 7 */
if (len > PRISM2_DATA_MAXLEN + 8 /* WEP */) {
if (macport == 7 && local->iw_mode == IW_MODE_MONITOR) {
if (len >= (u16) -14) {
hdr_len -= 65535 - len;
hdr_len--;
}
len = 0;
} else {
spin_unlock(&local->baplock);
printk(KERN_DEBUG "%s: Received frame with invalid "
"length 0x%04x\n", dev->name, len);
hostap_dump_rx_header(dev->name, &rxdesc);
goto rx_dropped;
}
}
skb = dev_alloc_skb(len + hdr_len);
if (!skb) {
spin_unlock(&local->baplock);
printk(KERN_DEBUG "%s: RX failed to allocate skb\n",
dev->name);
goto rx_dropped;
}
skb->dev = dev;
memcpy(skb_put(skb, hdr_len), &rxdesc, hdr_len);
if (len > 0)
res = hfa384x_from_bap(dev, BAP0, skb_put(skb, len), len);
spin_unlock(&local->baplock);
if (res) {
printk(KERN_DEBUG "%s: RX failed to read "
"frame data\n", dev->name);
goto rx_dropped;
}
skb_queue_tail(&local->rx_list, skb);
tasklet_schedule(&local->rx_tasklet);
rx_exit:
prism2_callback(local, PRISM2_CALLBACK_RX_END);
if (!rx_pending) {
HFA384X_OUTW(HFA384X_EV_RX, HFA384X_EVACK_OFF);
}
return;
rx_dropped:
dev->stats.rx_dropped++;
if (skb)
dev_kfree_skb(skb);
goto rx_exit;
}
/* Called only as a tasklet (software IRQ) */
static void hostap_rx_skb(local_info_t *local, struct sk_buff *skb)
{
struct hfa384x_rx_frame *rxdesc;
struct net_device *dev = skb->dev;
struct hostap_80211_rx_status stats;
int hdrlen, rx_hdrlen;
rx_hdrlen = sizeof(*rxdesc);
if (skb->len < sizeof(*rxdesc)) {
/* Allow monitor mode to receive shorter frames */
if (local->iw_mode == IW_MODE_MONITOR &&
skb->len >= sizeof(*rxdesc) - 30) {
rx_hdrlen = skb->len;
} else {
dev_kfree_skb(skb);
return;
}
}
rxdesc = (struct hfa384x_rx_frame *) skb->data;
if (local->frame_dump & PRISM2_DUMP_RX_HDR &&
skb->len >= sizeof(*rxdesc))
hostap_dump_rx_header(dev->name, rxdesc);
if (le16_to_cpu(rxdesc->status) & HFA384X_RX_STATUS_FCSERR &&
(!local->monitor_allow_fcserr ||
local->iw_mode != IW_MODE_MONITOR))
goto drop;
if (skb->len > PRISM2_DATA_MAXLEN) {
printk(KERN_DEBUG "%s: RX: len(%d) > MAX(%d)\n",
dev->name, skb->len, PRISM2_DATA_MAXLEN);
goto drop;
}
stats.mac_time = le32_to_cpu(rxdesc->time);
stats.signal = rxdesc->signal - local->rssi_to_dBm;
stats.noise = rxdesc->silence - local->rssi_to_dBm;
stats.rate = rxdesc->rate;
/* Convert Prism2 RX structure into IEEE 802.11 header */
hdrlen = hostap_80211_get_hdrlen(rxdesc->frame_control);
if (hdrlen > rx_hdrlen)
hdrlen = rx_hdrlen;
memmove(skb_pull(skb, rx_hdrlen - hdrlen),
&rxdesc->frame_control, hdrlen);
hostap_80211_rx(dev, skb, &stats);
return;
drop:
dev_kfree_skb(skb);
}
/* Called only as a tasklet (software IRQ) */
static void hostap_rx_tasklet(unsigned long data)
{
local_info_t *local = (local_info_t *) data;
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->rx_list)) != NULL)
hostap_rx_skb(local, skb);
}
/* Called only from hardware IRQ */
static void prism2_alloc_ev(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
int idx;
u16 fid;
iface = netdev_priv(dev);
local = iface->local;
fid = prism2_read_fid_reg(dev, HFA384X_ALLOCFID_OFF);
PDEBUG(DEBUG_FID, "FID: interrupt: ALLOC - fid=0x%04x\n", fid);
spin_lock(&local->txfidlock);
idx = local->next_alloc;
do {
if (local->txfid[idx] == fid) {
PDEBUG(DEBUG_FID, "FID: found matching txfid[%d]\n",
idx);
#ifndef final_version
if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY)
printk("Already released txfid found at idx "
"%d\n", idx);
if (local->intransmitfid[idx] == PRISM2_TXFID_RESERVED)
printk("Already reserved txfid found at idx "
"%d\n", idx);
#endif
local->intransmitfid[idx] = PRISM2_TXFID_EMPTY;
idx++;
local->next_alloc = idx >= PRISM2_TXFID_COUNT ? 0 :
idx;
if (!test_bit(HOSTAP_BITS_TRANSMIT, &local->bits) &&
netif_queue_stopped(dev))
netif_wake_queue(dev);
spin_unlock(&local->txfidlock);
return;
}
idx++;
if (idx >= PRISM2_TXFID_COUNT)
idx = 0;
} while (idx != local->next_alloc);
printk(KERN_WARNING "%s: could not find matching txfid (0x%04x, new "
"read 0x%04x) for alloc event\n", dev->name, fid,
HFA384X_INW(HFA384X_ALLOCFID_OFF));
printk(KERN_DEBUG "TXFIDs:");
for (idx = 0; idx < PRISM2_TXFID_COUNT; idx++)
printk(" %04x[%04x]", local->txfid[idx],
local->intransmitfid[idx]);
printk("\n");
spin_unlock(&local->txfidlock);
/* FIX: should probably schedule reset; reference to one txfid was lost
* completely.. Bad things will happen if we run out of txfids
* Actually, this will cause netdev watchdog to notice TX timeout and
* then card reset after all txfids have been leaked. */
}
/* Called only as a tasklet (software IRQ) */
static void hostap_tx_callback(local_info_t *local,
struct hfa384x_tx_frame *txdesc, int ok,
char *payload)
{
u16 sw_support, hdrlen, len;
struct sk_buff *skb;
struct hostap_tx_callback_info *cb;
/* Make sure that frame was from us. */
if (memcmp(txdesc->addr2, local->dev->dev_addr, ETH_ALEN)) {
printk(KERN_DEBUG "%s: TX callback - foreign frame\n",
local->dev->name);
return;
}
sw_support = le32_to_cpu(txdesc->sw_support);
spin_lock(&local->lock);
cb = local->tx_callback;
while (cb != NULL && cb->idx != sw_support)
cb = cb->next;
spin_unlock(&local->lock);
if (cb == NULL) {
printk(KERN_DEBUG "%s: could not find TX callback (idx %d)\n",
local->dev->name, sw_support);
return;
}
hdrlen = hostap_80211_get_hdrlen(txdesc->frame_control);
len = le16_to_cpu(txdesc->data_len);
skb = dev_alloc_skb(hdrlen + len);
if (skb == NULL) {
printk(KERN_DEBUG "%s: hostap_tx_callback failed to allocate "
"skb\n", local->dev->name);
return;
}
memcpy(skb_put(skb, hdrlen), (void *) &txdesc->frame_control, hdrlen);
if (payload)
memcpy(skb_put(skb, len), payload, len);
skb->dev = local->dev;
skb_reset_mac_header(skb);
cb->func(skb, ok, cb->data);
}
/* Called only as a tasklet (software IRQ) */
static int hostap_tx_compl_read(local_info_t *local, int error,
struct hfa384x_tx_frame *txdesc,
char **payload)
{
u16 fid, len;
int res, ret = 0;
struct net_device *dev = local->dev;
fid = prism2_read_fid_reg(dev, HFA384X_TXCOMPLFID_OFF);
PDEBUG(DEBUG_FID, "interrupt: TX (err=%d) - fid=0x%04x\n", fid, error);
spin_lock(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, fid, 0);
if (!res)
res = hfa384x_from_bap(dev, BAP0, txdesc, sizeof(*txdesc));
if (res) {
PDEBUG(DEBUG_EXTRA, "%s: TX (err=%d) - fid=0x%04x - could not "
"read txdesc\n", dev->name, error, fid);
if (res == -ETIMEDOUT) {
schedule_work(&local->reset_queue);
}
ret = -1;
goto fail;
}
if (txdesc->sw_support) {
len = le16_to_cpu(txdesc->data_len);
if (len < PRISM2_DATA_MAXLEN) {
*payload = kmalloc(len, GFP_ATOMIC);
if (*payload == NULL ||
hfa384x_from_bap(dev, BAP0, *payload, len)) {
PDEBUG(DEBUG_EXTRA, "%s: could not read TX "
"frame payload\n", dev->name);
kfree(*payload);
*payload = NULL;
ret = -1;
goto fail;
}
}
}
fail:
spin_unlock(&local->baplock);
return ret;
}
/* Called only as a tasklet (software IRQ) */
static void prism2_tx_ev(local_info_t *local)
{
struct net_device *dev = local->dev;
char *payload = NULL;
struct hfa384x_tx_frame txdesc;
if (hostap_tx_compl_read(local, 0, &txdesc, &payload))
goto fail;
if (local->frame_dump & PRISM2_DUMP_TX_HDR) {
PDEBUG(DEBUG_EXTRA, "%s: TX - status=0x%04x "
"retry_count=%d tx_rate=%d seq_ctrl=%d "
"duration_id=%d\n",
dev->name, le16_to_cpu(txdesc.status),
txdesc.retry_count, txdesc.tx_rate,
le16_to_cpu(txdesc.seq_ctrl),
le16_to_cpu(txdesc.duration_id));
}
if (txdesc.sw_support)
hostap_tx_callback(local, &txdesc, 1, payload);
kfree(payload);
fail:
HFA384X_OUTW(HFA384X_EV_TX, HFA384X_EVACK_OFF);
}
/* Called only as a tasklet (software IRQ) */
static void hostap_sta_tx_exc_tasklet(unsigned long data)
{
local_info_t *local = (local_info_t *) data;
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->sta_tx_exc_list)) != NULL) {
struct hfa384x_tx_frame *txdesc =
(struct hfa384x_tx_frame *) skb->data;
if (skb->len >= sizeof(*txdesc)) {
/* Convert Prism2 RX structure into IEEE 802.11 header
*/
int hdrlen = hostap_80211_get_hdrlen(txdesc->frame_control);
memmove(skb_pull(skb, sizeof(*txdesc) - hdrlen),
&txdesc->frame_control, hdrlen);
hostap_handle_sta_tx_exc(local, skb);
}
dev_kfree_skb(skb);
}
}
/* Called only as a tasklet (software IRQ) */
static void prism2_txexc(local_info_t *local)
{
struct net_device *dev = local->dev;
u16 status, fc;
int show_dump, res;
char *payload = NULL;
struct hfa384x_tx_frame txdesc;
show_dump = local->frame_dump & PRISM2_DUMP_TXEXC_HDR;
dev->stats.tx_errors++;
res = hostap_tx_compl_read(local, 1, &txdesc, &payload);
HFA384X_OUTW(HFA384X_EV_TXEXC, HFA384X_EVACK_OFF);
if (res)
return;
status = le16_to_cpu(txdesc.status);
/* We produce a TXDROP event only for retry or lifetime
* exceeded, because that's the only status that really mean
* that this particular node went away.
* Other errors means that *we* screwed up. - Jean II */
if (status & (HFA384X_TX_STATUS_RETRYERR | HFA384X_TX_STATUS_AGEDERR))
{
union iwreq_data wrqu;
/* Copy 802.11 dest address. */
memcpy(wrqu.addr.sa_data, txdesc.addr1, ETH_ALEN);
wrqu.addr.sa_family = ARPHRD_ETHER;
wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL);
} else
show_dump = 1;
if (local->iw_mode == IW_MODE_MASTER ||
local->iw_mode == IW_MODE_REPEAT ||
local->wds_type & HOSTAP_WDS_AP_CLIENT) {
struct sk_buff *skb;
skb = dev_alloc_skb(sizeof(txdesc));
if (skb) {
memcpy(skb_put(skb, sizeof(txdesc)), &txdesc,
sizeof(txdesc));
skb_queue_tail(&local->sta_tx_exc_list, skb);
tasklet_schedule(&local->sta_tx_exc_tasklet);
}
}
if (txdesc.sw_support)
hostap_tx_callback(local, &txdesc, 0, payload);
kfree(payload);
if (!show_dump)
return;
PDEBUG(DEBUG_EXTRA, "%s: TXEXC - status=0x%04x (%s%s%s%s)"
" tx_control=%04x\n",
dev->name, status,
status & HFA384X_TX_STATUS_RETRYERR ? "[RetryErr]" : "",
status & HFA384X_TX_STATUS_AGEDERR ? "[AgedErr]" : "",
status & HFA384X_TX_STATUS_DISCON ? "[Discon]" : "",
status & HFA384X_TX_STATUS_FORMERR ? "[FormErr]" : "",
le16_to_cpu(txdesc.tx_control));
fc = le16_to_cpu(txdesc.frame_control);
PDEBUG(DEBUG_EXTRA, " retry_count=%d tx_rate=%d fc=0x%04x "
"(%s%s%s::%d%s%s)\n",
txdesc.retry_count, txdesc.tx_rate, fc,
ieee80211_is_mgmt(txdesc.frame_control) ? "Mgmt" : "",
ieee80211_is_ctl(txdesc.frame_control) ? "Ctrl" : "",
ieee80211_is_data(txdesc.frame_control) ? "Data" : "",
(fc & IEEE80211_FCTL_STYPE) >> 4,
ieee80211_has_tods(txdesc.frame_control) ? " ToDS" : "",
ieee80211_has_fromds(txdesc.frame_control) ? " FromDS" : "");
PDEBUG(DEBUG_EXTRA, " A1=%pM A2=%pM A3=%pM A4=%pM\n",
txdesc.addr1, txdesc.addr2,
txdesc.addr3, txdesc.addr4);
}
/* Called only as a tasklet (software IRQ) */
static void hostap_info_tasklet(unsigned long data)
{
local_info_t *local = (local_info_t *) data;
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->info_list)) != NULL) {
hostap_info_process(local, skb);
dev_kfree_skb(skb);
}
}
/* Called only as a tasklet (software IRQ) */
static void prism2_info(local_info_t *local)
{
struct net_device *dev = local->dev;
u16 fid;
int res, left;
struct hfa384x_info_frame info;
struct sk_buff *skb;
fid = HFA384X_INW(HFA384X_INFOFID_OFF);
spin_lock(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, fid, 0);
if (!res)
res = hfa384x_from_bap(dev, BAP0, &info, sizeof(info));
if (res) {
spin_unlock(&local->baplock);
printk(KERN_DEBUG "Could not get info frame (fid=0x%04x)\n",
fid);
if (res == -ETIMEDOUT) {
schedule_work(&local->reset_queue);
}
goto out;
}
left = (le16_to_cpu(info.len) - 1) * 2;
if (info.len & cpu_to_le16(0x8000) || info.len == 0 || left > 2060) {
/* data register seems to give 0x8000 in some error cases even
* though busy bit is not set in offset register;
* in addition, length must be at least 1 due to type field */
spin_unlock(&local->baplock);
printk(KERN_DEBUG "%s: Received info frame with invalid "
"length 0x%04x (type 0x%04x)\n", dev->name,
le16_to_cpu(info.len), le16_to_cpu(info.type));
goto out;
}
skb = dev_alloc_skb(sizeof(info) + left);
if (skb == NULL) {
spin_unlock(&local->baplock);
printk(KERN_DEBUG "%s: Could not allocate skb for info "
"frame\n", dev->name);
goto out;
}
memcpy(skb_put(skb, sizeof(info)), &info, sizeof(info));
if (left > 0 && hfa384x_from_bap(dev, BAP0, skb_put(skb, left), left))
{
spin_unlock(&local->baplock);
printk(KERN_WARNING "%s: Info frame read failed (fid=0x%04x, "
"len=0x%04x, type=0x%04x\n", dev->name, fid,
le16_to_cpu(info.len), le16_to_cpu(info.type));
dev_kfree_skb(skb);
goto out;
}
spin_unlock(&local->baplock);
skb_queue_tail(&local->info_list, skb);
tasklet_schedule(&local->info_tasklet);
out:
HFA384X_OUTW(HFA384X_EV_INFO, HFA384X_EVACK_OFF);
}
/* Called only as a tasklet (software IRQ) */
static void hostap_bap_tasklet(unsigned long data)
{
local_info_t *local = (local_info_t *) data;
struct net_device *dev = local->dev;
u16 ev;
int frames = 30;
if (local->func->card_present && !local->func->card_present(local))
return;
set_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits);
/* Process all pending BAP events without generating new interrupts
* for them */
while (frames-- > 0) {
ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
if (ev == 0xffff || !(ev & HFA384X_BAP0_EVENTS))
break;
if (ev & HFA384X_EV_RX)
prism2_rx(local);
if (ev & HFA384X_EV_INFO)
prism2_info(local);
if (ev & HFA384X_EV_TX)
prism2_tx_ev(local);
if (ev & HFA384X_EV_TXEXC)
prism2_txexc(local);
}
set_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits);
clear_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits);
/* Enable interrupts for new BAP events */
hfa384x_events_all(dev);
clear_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits);
}
/* Called only from hardware IRQ */
static void prism2_infdrop(struct net_device *dev)
{
static unsigned long last_inquire = 0;
PDEBUG(DEBUG_EXTRA, "%s: INFDROP event\n", dev->name);
/* some firmware versions seem to get stuck with
* full CommTallies in high traffic load cases; every
* packet will then cause INFDROP event and CommTallies
* info frame will not be sent automatically. Try to
* get out of this state by inquiring CommTallies. */
if (!last_inquire || time_after(jiffies, last_inquire + HZ)) {
hfa384x_cmd_callback(dev, HFA384X_CMDCODE_INQUIRE,
HFA384X_INFO_COMMTALLIES, NULL, 0);
last_inquire = jiffies;
}
}
/* Called only from hardware IRQ */
static void prism2_ev_tick(struct net_device *dev)
{
struct hostap_interface *iface;
local_info_t *local;
u16 evstat, inten;
static int prev_stuck = 0;
iface = netdev_priv(dev);
local = iface->local;
if (time_after(jiffies, local->last_tick_timer + 5 * HZ) &&
local->last_tick_timer) {
evstat = HFA384X_INW(HFA384X_EVSTAT_OFF);
inten = HFA384X_INW(HFA384X_INTEN_OFF);
if (!prev_stuck) {
printk(KERN_INFO "%s: SW TICK stuck? "
"bits=0x%lx EvStat=%04x IntEn=%04x\n",
dev->name, local->bits, evstat, inten);
}
local->sw_tick_stuck++;
if ((evstat & HFA384X_BAP0_EVENTS) &&
(inten & HFA384X_BAP0_EVENTS)) {
printk(KERN_INFO "%s: trying to recover from IRQ "
"hang\n", dev->name);
hfa384x_events_no_bap0(dev);
}
prev_stuck = 1;
} else
prev_stuck = 0;
}
/* Called only from hardware IRQ */
static void prism2_check_magic(local_info_t *local)
{
/* at least PCI Prism2.5 with bus mastering seems to sometimes
* return 0x0000 in SWSUPPORT0 for unknown reason, but re-reading the
* register once or twice seems to get the correct value.. PCI cards
* cannot anyway be removed during normal operation, so there is not
* really any need for this verification with them. */
#ifndef PRISM2_PCI
#ifndef final_version
static unsigned long last_magic_err = 0;
struct net_device *dev = local->dev;
if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) {
if (!local->hw_ready)
return;
HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
if (time_after(jiffies, last_magic_err + 10 * HZ)) {
printk("%s: Interrupt, but SWSUPPORT0 does not match: "
"%04X != %04X - card removed?\n", dev->name,
HFA384X_INW(HFA384X_SWSUPPORT0_OFF),
HFA384X_MAGIC);
last_magic_err = jiffies;
} else if (net_ratelimit()) {
printk(KERN_DEBUG "%s: interrupt - SWSUPPORT0=%04x "
"MAGIC=%04x\n", dev->name,
HFA384X_INW(HFA384X_SWSUPPORT0_OFF),
HFA384X_MAGIC);
}
if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != 0xffff)
schedule_work(&local->reset_queue);
return;
}
#endif /* final_version */
#endif /* !PRISM2_PCI */
}
/* Called only from hardware IRQ */
static irqreturn_t prism2_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct hostap_interface *iface;
local_info_t *local;
int events = 0;
u16 ev;
iface = netdev_priv(dev);
local = iface->local;
/* Detect early interrupt before driver is fully configured */
spin_lock(&local->irq_init_lock);
if (!dev->base_addr) {
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
dev->name);
}
spin_unlock(&local->irq_init_lock);
return IRQ_HANDLED;
}
spin_unlock(&local->irq_init_lock);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0);
if (local->func->card_present && !local->func->card_present(local)) {
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: Interrupt, but dev not OK\n",
dev->name);
}
return IRQ_HANDLED;
}
prism2_check_magic(local);
for (;;) {
ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
if (ev == 0xffff) {
if (local->shutdown)
return IRQ_HANDLED;
HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
printk(KERN_DEBUG "%s: prism2_interrupt: ev=0xffff\n",
dev->name);
return IRQ_HANDLED;
}
ev &= HFA384X_INW(HFA384X_INTEN_OFF);
if (ev == 0)
break;
if (ev & HFA384X_EV_CMD) {
prism2_cmd_ev(dev);
}
/* Above events are needed even before hw is ready, but other
* events should be skipped during initialization. This may
* change for AllocEv if allocate_fid is implemented without
* busy waiting. */
if (!local->hw_ready || local->hw_resetting ||
!local->dev_enabled) {
ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
if (ev & HFA384X_EV_CMD)
goto next_event;
if ((ev & HFA384X_EVENT_MASK) == 0)
return IRQ_HANDLED;
if (local->dev_enabled && (ev & ~HFA384X_EV_TICK) &&
net_ratelimit()) {
printk(KERN_DEBUG "%s: prism2_interrupt: hw "
"not ready; skipping events 0x%04x "
"(IntEn=0x%04x)%s%s%s\n",
dev->name, ev,
HFA384X_INW(HFA384X_INTEN_OFF),
!local->hw_ready ? " (!hw_ready)" : "",
local->hw_resetting ?
" (hw_resetting)" : "",
!local->dev_enabled ?
" (!dev_enabled)" : "");
}
HFA384X_OUTW(ev, HFA384X_EVACK_OFF);
return IRQ_HANDLED;
}
if (ev & HFA384X_EV_TICK) {
prism2_ev_tick(dev);
HFA384X_OUTW(HFA384X_EV_TICK, HFA384X_EVACK_OFF);
}
if (ev & HFA384X_EV_ALLOC) {
prism2_alloc_ev(dev);
HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF);
}
/* Reading data from the card is quite time consuming, so do it
* in tasklets. TX, TXEXC, RX, and INFO events will be ACKed
* and unmasked after needed data has been read completely. */
if (ev & HFA384X_BAP0_EVENTS) {
hfa384x_events_no_bap0(dev);
tasklet_schedule(&local->bap_tasklet);
}
#ifndef final_version
if (ev & HFA384X_EV_WTERR) {
PDEBUG(DEBUG_EXTRA, "%s: WTERR event\n", dev->name);
HFA384X_OUTW(HFA384X_EV_WTERR, HFA384X_EVACK_OFF);
}
#endif /* final_version */
if (ev & HFA384X_EV_INFDROP) {
prism2_infdrop(dev);
HFA384X_OUTW(HFA384X_EV_INFDROP, HFA384X_EVACK_OFF);
}
next_event:
events++;
if (events >= PRISM2_MAX_INTERRUPT_EVENTS) {
PDEBUG(DEBUG_EXTRA, "prism2_interrupt: >%d events "
"(EvStat=0x%04x)\n",
PRISM2_MAX_INTERRUPT_EVENTS,
HFA384X_INW(HFA384X_EVSTAT_OFF));
break;
}
}
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 1);
return IRQ_RETVAL(events);
}
static void prism2_check_sta_fw_version(local_info_t *local)
{
struct hfa384x_comp_ident comp;
int id, variant, major, minor;
if (hfa384x_get_rid(local->dev, HFA384X_RID_STAID,
&comp, sizeof(comp), 1) < 0)
return;
local->fw_ap = 0;
id = le16_to_cpu(comp.id);
if (id != HFA384X_COMP_ID_STA) {
if (id == HFA384X_COMP_ID_FW_AP)
local->fw_ap = 1;
return;
}
major = __le16_to_cpu(comp.major);
minor = __le16_to_cpu(comp.minor);
variant = __le16_to_cpu(comp.variant);
local->sta_fw_ver = PRISM2_FW_VER(major, minor, variant);
/* Station firmware versions before 1.4.x seem to have a bug in
* firmware-based WEP encryption when using Host AP mode, so use
* host_encrypt as a default for them. Firmware version 1.4.9 is the
* first one that has been seen to produce correct encryption, but the
* bug might be fixed before that (although, at least 1.4.2 is broken).
*/
local->fw_encrypt_ok = local->sta_fw_ver >= PRISM2_FW_VER(1,4,9);
if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt &&
!local->fw_encrypt_ok) {
printk(KERN_DEBUG "%s: defaulting to host-based encryption as "
"a workaround for firmware bug in Host AP mode WEP\n",
local->dev->name);
local->host_encrypt = 1;
}
/* IEEE 802.11 standard compliant WDS frames (4 addresses) were broken
* in station firmware versions before 1.5.x. With these versions, the
* driver uses a workaround with bogus frame format (4th address after
* the payload). This is not compatible with other AP devices. Since
* the firmware bug is fixed in the latest station firmware versions,
* automatically enable standard compliant mode for cards using station
* firmware version 1.5.0 or newer. */
if (local->sta_fw_ver >= PRISM2_FW_VER(1,5,0))
local->wds_type |= HOSTAP_WDS_STANDARD_FRAME;
else {
printk(KERN_DEBUG "%s: defaulting to bogus WDS frame as a "
"workaround for firmware bug in Host AP mode WDS\n",
local->dev->name);
}
hostap_check_sta_fw_version(local->ap, local->sta_fw_ver);
}
static void hostap_passive_scan(unsigned long data)
{
local_info_t *local = (local_info_t *) data;
struct net_device *dev = local->dev;
u16 chan;
if (local->passive_scan_interval <= 0)
return;
if (local->passive_scan_state == PASSIVE_SCAN_LISTEN) {
int max_tries = 16;
/* Even though host system does not really know when the WLAN
* MAC is sending frames, try to avoid changing channels for
* passive scanning when a host-generated frame is being
* transmitted */
if (test_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
printk(KERN_DEBUG "%s: passive scan detected pending "
"TX - delaying\n", dev->name);
local->passive_scan_timer.expires = jiffies + HZ / 10;
add_timer(&local->passive_scan_timer);
return;
}
do {
local->passive_scan_channel++;
if (local->passive_scan_channel > 14)
local->passive_scan_channel = 1;
max_tries--;
} while (!(local->channel_mask &
(1 << (local->passive_scan_channel - 1))) &&
max_tries > 0);
if (max_tries == 0) {
printk(KERN_INFO "%s: no allowed passive scan channels"
" found\n", dev->name);
return;
}
printk(KERN_DEBUG "%s: passive scan channel %d\n",
dev->name, local->passive_scan_channel);
chan = local->passive_scan_channel;
local->passive_scan_state = PASSIVE_SCAN_WAIT;
local->passive_scan_timer.expires = jiffies + HZ / 10;
} else {
chan = local->channel;
local->passive_scan_state = PASSIVE_SCAN_LISTEN;
local->passive_scan_timer.expires = jiffies +
local->passive_scan_interval * HZ;
}
if (hfa384x_cmd_callback(dev, HFA384X_CMDCODE_TEST |
(HFA384X_TEST_CHANGE_CHANNEL << 8),
chan, NULL, 0))
printk(KERN_ERR "%s: passive scan channel set %d "
"failed\n", dev->name, chan);
add_timer(&local->passive_scan_timer);
}
/* Called only as a scheduled task when communications quality values should
* be updated. */
static void handle_comms_qual_update(struct work_struct *work)
{
local_info_t *local =
container_of(work, local_info_t, comms_qual_update);
prism2_update_comms_qual(local->dev);
}
/* Software watchdog - called as a timer. Hardware interrupt (Tick event) is
* used to monitor that local->last_tick_timer is being updated. If not,
* interrupt busy-loop is assumed and driver tries to recover by masking out
* some events. */
static void hostap_tick_timer(unsigned long data)
{
static unsigned long last_inquire = 0;
local_info_t *local = (local_info_t *) data;
local->last_tick_timer = jiffies;
/* Inquire CommTallies every 10 seconds to keep the statistics updated
* more often during low load and when using 32-bit tallies. */
if ((!last_inquire || time_after(jiffies, last_inquire + 10 * HZ)) &&
!local->hw_downloading && local->hw_ready &&
!local->hw_resetting && local->dev_enabled) {
hfa384x_cmd_callback(local->dev, HFA384X_CMDCODE_INQUIRE,
HFA384X_INFO_COMMTALLIES, NULL, 0);
last_inquire = jiffies;
}
if ((local->last_comms_qual_update == 0 ||
time_after(jiffies, local->last_comms_qual_update + 10 * HZ)) &&
(local->iw_mode == IW_MODE_INFRA ||
local->iw_mode == IW_MODE_ADHOC)) {
schedule_work(&local->comms_qual_update);
}
local->tick_timer.expires = jiffies + 2 * HZ;
add_timer(&local->tick_timer);
}
#ifndef PRISM2_NO_PROCFS_DEBUG
static int prism2_registers_proc_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
char *p = page;
local_info_t *local = (local_info_t *) data;
if (off != 0) {
*eof = 1;
return 0;
}
#define SHOW_REG(n) \
p += sprintf(p, #n "=%04x\n", hfa384x_read_reg(local->dev, HFA384X_##n##_OFF))
SHOW_REG(CMD);
SHOW_REG(PARAM0);
SHOW_REG(PARAM1);
SHOW_REG(PARAM2);
SHOW_REG(STATUS);
SHOW_REG(RESP0);
SHOW_REG(RESP1);
SHOW_REG(RESP2);
SHOW_REG(INFOFID);
SHOW_REG(CONTROL);
SHOW_REG(SELECT0);
SHOW_REG(SELECT1);
SHOW_REG(OFFSET0);
SHOW_REG(OFFSET1);
SHOW_REG(RXFID);
SHOW_REG(ALLOCFID);
SHOW_REG(TXCOMPLFID);
SHOW_REG(SWSUPPORT0);
SHOW_REG(SWSUPPORT1);
SHOW_REG(SWSUPPORT2);
SHOW_REG(EVSTAT);
SHOW_REG(INTEN);
SHOW_REG(EVACK);
/* Do not read data registers, because they change the state of the
* MAC (offset += 2) */
/* SHOW_REG(DATA0); */
/* SHOW_REG(DATA1); */
SHOW_REG(AUXPAGE);
SHOW_REG(AUXOFFSET);
/* SHOW_REG(AUXDATA); */
#ifdef PRISM2_PCI
SHOW_REG(PCICOR);
SHOW_REG(PCIHCR);
SHOW_REG(PCI_M0_ADDRH);
SHOW_REG(PCI_M0_ADDRL);
SHOW_REG(PCI_M0_LEN);
SHOW_REG(PCI_M0_CTL);
SHOW_REG(PCI_STATUS);
SHOW_REG(PCI_M1_ADDRH);
SHOW_REG(PCI_M1_ADDRL);
SHOW_REG(PCI_M1_LEN);
SHOW_REG(PCI_M1_CTL);
#endif /* PRISM2_PCI */
return (p - page);
}
#endif /* PRISM2_NO_PROCFS_DEBUG */
struct set_tim_data {
struct list_head list;
int aid;
int set;
};
static int prism2_set_tim(struct net_device *dev, int aid, int set)
{
struct list_head *ptr;
struct set_tim_data *new_entry;
struct hostap_interface *iface;
local_info_t *local;
iface = netdev_priv(dev);
local = iface->local;
new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
if (new_entry == NULL) {
printk(KERN_DEBUG "%s: prism2_set_tim: kmalloc failed\n",
local->dev->name);
return -ENOMEM;
}
new_entry->aid = aid;
new_entry->set = set;
spin_lock_bh(&local->set_tim_lock);
list_for_each(ptr, &local->set_tim_list) {
struct set_tim_data *entry =
list_entry(ptr, struct set_tim_data, list);
if (entry->aid == aid) {
PDEBUG(DEBUG_PS2, "%s: prism2_set_tim: aid=%d "
"set=%d ==> %d\n",
local->dev->name, aid, entry->set, set);
entry->set = set;
kfree(new_entry);
new_entry = NULL;
break;
}
}
if (new_entry)
list_add_tail(&new_entry->list, &local->set_tim_list);
spin_unlock_bh(&local->set_tim_lock);
schedule_work(&local->set_tim_queue);
return 0;
}
static void handle_set_tim_queue(struct work_struct *work)
{
local_info_t *local = container_of(work, local_info_t, set_tim_queue);
struct set_tim_data *entry;
u16 val;
for (;;) {
entry = NULL;
spin_lock_bh(&local->set_tim_lock);
if (!list_empty(&local->set_tim_list)) {
entry = list_entry(local->set_tim_list.next,
struct set_tim_data, list);
list_del(&entry->list);
}
spin_unlock_bh(&local->set_tim_lock);
if (!entry)
break;
PDEBUG(DEBUG_PS2, "%s: handle_set_tim_queue: aid=%d set=%d\n",
local->dev->name, entry->aid, entry->set);
val = entry->aid;
if (entry->set)
val |= 0x8000;
if (hostap_set_word(local->dev, HFA384X_RID_CNFTIMCTRL, val)) {
printk(KERN_DEBUG "%s: set_tim failed (aid=%d "
"set=%d)\n",
local->dev->name, entry->aid, entry->set);
}
kfree(entry);
}
}
static void prism2_clear_set_tim_queue(local_info_t *local)
{
struct list_head *ptr, *n;
list_for_each_safe(ptr, n, &local->set_tim_list) {
struct set_tim_data *entry;
entry = list_entry(ptr, struct set_tim_data, list);
list_del(&entry->list);
kfree(entry);
}
}
/*
* HostAP uses two layers of net devices, where the inner
* layer gets called all the time from the outer layer.
* This is a natural nesting, which needs a split lock type.
*/
static struct lock_class_key hostap_netdev_xmit_lock_key;
static struct lock_class_key hostap_netdev_addr_lock_key;
static void prism2_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock,
&hostap_netdev_xmit_lock_key);
}
static void prism2_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock,
&hostap_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
}
static struct net_device *
prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
struct device *sdev)
{
struct net_device *dev;
struct hostap_interface *iface;
struct local_info *local;
int len, i, ret;
if (funcs == NULL)
return NULL;
len = strlen(dev_template);
if (len >= IFNAMSIZ || strstr(dev_template, "%d") == NULL) {
printk(KERN_WARNING "hostap: Invalid dev_template='%s'\n",
dev_template);
return NULL;
}
len = sizeof(struct hostap_interface) +
3 + sizeof(struct local_info) +
3 + sizeof(struct ap_data);
dev = alloc_etherdev(len);
if (dev == NULL)
return NULL;
iface = netdev_priv(dev);
local = (struct local_info *) ((((long) (iface + 1)) + 3) & ~3);
local->ap = (struct ap_data *) ((((long) (local + 1)) + 3) & ~3);
local->dev = iface->dev = dev;
iface->local = local;
iface->type = HOSTAP_INTERFACE_MASTER;
INIT_LIST_HEAD(&local->hostap_interfaces);
local->hw_module = THIS_MODULE;
#ifdef PRISM2_IO_DEBUG
local->io_debug_enabled = 1;
#endif /* PRISM2_IO_DEBUG */
local->func = funcs;
local->func->cmd = hfa384x_cmd;
local->func->read_regs = hfa384x_read_regs;
local->func->get_rid = hfa384x_get_rid;
local->func->set_rid = hfa384x_set_rid;
local->func->hw_enable = prism2_hw_enable;
local->func->hw_config = prism2_hw_config;
local->func->hw_reset = prism2_hw_reset;
local->func->hw_shutdown = prism2_hw_shutdown;
local->func->reset_port = prism2_reset_port;
local->func->schedule_reset = prism2_schedule_reset;
#ifdef PRISM2_DOWNLOAD_SUPPORT
local->func->read_aux = prism2_download_aux_dump;
local->func->download = prism2_download;
#endif /* PRISM2_DOWNLOAD_SUPPORT */
local->func->tx = prism2_tx_80211;
local->func->set_tim = prism2_set_tim;
local->func->need_tx_headroom = 0; /* no need to add txdesc in
* skb->data (FIX: maybe for DMA bus
* mastering? */
local->mtu = mtu;
rwlock_init(&local->iface_lock);
spin_lock_init(&local->txfidlock);
spin_lock_init(&local->cmdlock);
spin_lock_init(&local->baplock);
spin_lock_init(&local->lock);
spin_lock_init(&local->irq_init_lock);
mutex_init(&local->rid_bap_mtx);
if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES)
card_idx = 0;
local->card_idx = card_idx;
len = strlen(essid);
memcpy(local->essid, essid,
len > MAX_SSID_LEN ? MAX_SSID_LEN : len);
local->essid[MAX_SSID_LEN] = '\0';
i = GET_INT_PARM(iw_mode, card_idx);
if ((i >= IW_MODE_ADHOC && i <= IW_MODE_REPEAT) ||
i == IW_MODE_MONITOR) {
local->iw_mode = i;
} else {
printk(KERN_WARNING "prism2: Unknown iw_mode %d; using "
"IW_MODE_MASTER\n", i);
local->iw_mode = IW_MODE_MASTER;
}
local->channel = GET_INT_PARM(channel, card_idx);
local->beacon_int = GET_INT_PARM(beacon_int, card_idx);
local->dtim_period = GET_INT_PARM(dtim_period, card_idx);
local->wds_max_connections = 16;
local->tx_control = HFA384X_TX_CTRL_FLAGS;
local->manual_retry_count = -1;
local->rts_threshold = 2347;
local->fragm_threshold = 2346;
local->rssi_to_dBm = 100; /* default; to be overriden by
* cnfDbmAdjust, if available */
local->auth_algs = PRISM2_AUTH_OPEN | PRISM2_AUTH_SHARED_KEY;
local->sram_type = -1;
local->scan_channel_mask = 0xffff;
local->monitor_type = PRISM2_MONITOR_RADIOTAP;
/* Initialize task queue structures */
INIT_WORK(&local->reset_queue, handle_reset_queue);
INIT_WORK(&local->set_multicast_list_queue,
hostap_set_multicast_list_queue);
INIT_WORK(&local->set_tim_queue, handle_set_tim_queue);
INIT_LIST_HEAD(&local->set_tim_list);
spin_lock_init(&local->set_tim_lock);
INIT_WORK(&local->comms_qual_update, handle_comms_qual_update);
/* Initialize tasklets for handling hardware IRQ related operations
* outside hw IRQ handler */
#define HOSTAP_TASKLET_INIT(q, f, d) \
do { memset((q), 0, sizeof(*(q))); (q)->func = (f); (q)->data = (d); } \
while (0)
HOSTAP_TASKLET_INIT(&local->bap_tasklet, hostap_bap_tasklet,
(unsigned long) local);
HOSTAP_TASKLET_INIT(&local->info_tasklet, hostap_info_tasklet,
(unsigned long) local);
hostap_info_init(local);
HOSTAP_TASKLET_INIT(&local->rx_tasklet,
hostap_rx_tasklet, (unsigned long) local);
skb_queue_head_init(&local->rx_list);
HOSTAP_TASKLET_INIT(&local->sta_tx_exc_tasklet,
hostap_sta_tx_exc_tasklet, (unsigned long) local);
skb_queue_head_init(&local->sta_tx_exc_list);
INIT_LIST_HEAD(&local->cmd_queue);
init_waitqueue_head(&local->hostscan_wq);
lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock);
init_timer(&local->passive_scan_timer);
local->passive_scan_timer.data = (unsigned long) local;
local->passive_scan_timer.function = hostap_passive_scan;
init_timer(&local->tick_timer);
local->tick_timer.data = (unsigned long) local;
local->tick_timer.function = hostap_tick_timer;
local->tick_timer.expires = jiffies + 2 * HZ;
add_timer(&local->tick_timer);
INIT_LIST_HEAD(&local->bss_list);
hostap_setup_dev(dev, local, HOSTAP_INTERFACE_MASTER);
dev->type = ARPHRD_IEEE80211;
dev->header_ops = &hostap_80211_ops;
rtnl_lock();
ret = dev_alloc_name(dev, "wifi%d");
SET_NETDEV_DEV(dev, sdev);
if (ret >= 0)
ret = register_netdevice(dev);
prism2_set_lockdep_class(dev);
rtnl_unlock();
if (ret < 0) {
printk(KERN_WARNING "%s: register netdevice failed!\n",
dev_info);
goto fail;
}
printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name);
hostap_init_data(local);
return dev;
fail:
free_netdev(dev);
return NULL;
}
static int hostap_hw_ready(struct net_device *dev)
{
struct hostap_interface *iface;
struct local_info *local;
iface = netdev_priv(dev);
local = iface->local;
local->ddev = hostap_add_interface(local, HOSTAP_INTERFACE_MAIN, 0,
"", dev_template);
if (local->ddev) {
if (local->iw_mode == IW_MODE_INFRA ||
local->iw_mode == IW_MODE_ADHOC) {
netif_carrier_off(local->dev);
netif_carrier_off(local->ddev);
}
hostap_init_proc(local);
#ifndef PRISM2_NO_PROCFS_DEBUG
create_proc_read_entry("registers", 0, local->proc,
prism2_registers_proc_read, local);
#endif /* PRISM2_NO_PROCFS_DEBUG */
hostap_init_ap_proc(local);
return 0;
}
return -1;
}
static void prism2_free_local_data(struct net_device *dev)
{
struct hostap_tx_callback_info *tx_cb, *tx_cb_prev;
int i;
struct hostap_interface *iface;
struct local_info *local;
struct list_head *ptr, *n;
if (dev == NULL)
return;
iface = netdev_priv(dev);
local = iface->local;
/* Unregister all netdevs before freeing local data. */
list_for_each_safe(ptr, n, &local->hostap_interfaces) {
iface = list_entry(ptr, struct hostap_interface, list);
if (iface->type == HOSTAP_INTERFACE_MASTER) {
/* special handling for this interface below */
continue;
}
hostap_remove_interface(iface->dev, 0, 1);
}
unregister_netdev(local->dev);
flush_work_sync(&local->reset_queue);
flush_work_sync(&local->set_multicast_list_queue);
flush_work_sync(&local->set_tim_queue);
#ifndef PRISM2_NO_STATION_MODES
flush_work_sync(&local->info_queue);
#endif
flush_work_sync(&local->comms_qual_update);
lib80211_crypt_info_free(&local->crypt_info);
if (timer_pending(&local->passive_scan_timer))
del_timer(&local->passive_scan_timer);
if (timer_pending(&local->tick_timer))
del_timer(&local->tick_timer);
prism2_clear_cmd_queue(local);
skb_queue_purge(&local->info_list);
skb_queue_purge(&local->rx_list);
skb_queue_purge(&local->sta_tx_exc_list);
if (local->dev_enabled)
prism2_callback(local, PRISM2_CALLBACK_DISABLE);
if (local->ap != NULL)
hostap_free_data(local->ap);
#ifndef PRISM2_NO_PROCFS_DEBUG
if (local->proc != NULL)
remove_proc_entry("registers", local->proc);
#endif /* PRISM2_NO_PROCFS_DEBUG */
hostap_remove_proc(local);
tx_cb = local->tx_callback;
while (tx_cb != NULL) {
tx_cb_prev = tx_cb;
tx_cb = tx_cb->next;
kfree(tx_cb_prev);
}
hostap_set_hostapd(local, 0, 0);
hostap_set_hostapd_sta(local, 0, 0);
for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) {
if (local->frag_cache[i].skb != NULL)
dev_kfree_skb(local->frag_cache[i].skb);
}
#ifdef PRISM2_DOWNLOAD_SUPPORT
prism2_download_free_data(local->dl_pri);
prism2_download_free_data(local->dl_sec);
#endif /* PRISM2_DOWNLOAD_SUPPORT */
prism2_clear_set_tim_queue(local);
list_for_each_safe(ptr, n, &local->bss_list) {
struct hostap_bss_info *bss =
list_entry(ptr, struct hostap_bss_info, list);
kfree(bss);
}
kfree(local->pda);
kfree(local->last_scan_results);
kfree(local->generic_elem);
free_netdev(local->dev);
}
#if (defined(PRISM2_PCI) && defined(CONFIG_PM)) || defined(PRISM2_PCCARD)
static void prism2_suspend(struct net_device *dev)
{
struct hostap_interface *iface;
struct local_info *local;
union iwreq_data wrqu;
iface = netdev_priv(dev);
local = iface->local;
/* Send disconnect event, e.g., to trigger reassociation after resume
* if wpa_supplicant is used. */
memset(&wrqu, 0, sizeof(wrqu));
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
/* Disable hardware and firmware */
prism2_hw_shutdown(dev, 0);
}
#endif /* (PRISM2_PCI && CONFIG_PM) || PRISM2_PCCARD */
/* These might at some point be compiled separately and used as separate
* kernel modules or linked into one */
#ifdef PRISM2_DOWNLOAD_SUPPORT
#include "hostap_download.c"
#endif /* PRISM2_DOWNLOAD_SUPPORT */
#ifdef PRISM2_CALLBACK
/* External hostap_callback.c file can be used to, e.g., blink activity led.
* This can use platform specific code and must define prism2_callback()
* function (if PRISM2_CALLBACK is not defined, these function calls are not
* used. */
#include "hostap_callback.c"
#endif /* PRISM2_CALLBACK */
| gpl-2.0 |
netico-solutions/linux-am335x | drivers/infiniband/hw/nes/nes_nic.c | 3150 | 63363 | /*
* Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ethtool.h>
#include <linux/slab.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <linux/inet.h>
#include "nes.h"
static struct nic_qp_map nic_qp_mapping_0[] = {
{16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0},
{20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0},
{18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
{22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map nic_qp_mapping_1[] = {
{18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
{22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map nic_qp_mapping_2[] = {
{20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}
};
static struct nic_qp_map nic_qp_mapping_3[] = {
{22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map nic_qp_mapping_4[] = {
{28,8,0,0},{32,12,0,0}
};
static struct nic_qp_map nic_qp_mapping_5[] = {
{29,9,1,0},{33,13,1,0}
};
static struct nic_qp_map nic_qp_mapping_6[] = {
{30,10,2,0},{34,14,2,0}
};
static struct nic_qp_map nic_qp_mapping_7[] = {
{31,11,3,0},{35,15,3,0}
};
static struct nic_qp_map *nic_qp_mapping_per_function[] = {
nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3,
nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7
};
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
static int debug = -1;
static int nics_per_function = 1;
/**
* nes_netdev_poll
*/
static int nes_netdev_poll(struct napi_struct *napi, int budget)
{
struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq;
nesvnic->budget = budget;
nescq->cqes_pending = 0;
nescq->rx_cqes_completed = 0;
nescq->cqe_allocs_pending = 0;
nescq->rx_pkts_indicated = 0;
nes_nic_ce_handler(nesdev, nescq);
if (nescq->cqes_pending == 0) {
napi_complete(napi);
/* clear out completed cqes and arm */
nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
nescq->cq_number | (nescq->cqe_allocs_pending << 16));
nes_read32(nesdev->regs+NES_CQE_ALLOC);
} else {
/* clear out completed cqes but don't arm */
nes_write32(nesdev->regs+NES_CQE_ALLOC,
nescq->cq_number | (nescq->cqe_allocs_pending << 16));
nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n",
nesvnic->netdev->name);
}
return nescq->rx_pkts_indicated;
}
/**
* nes_netdev_open - Activate the network interface; ifconfig
* ethx up.
*/
static int nes_netdev_open(struct net_device *netdev)
{
u32 macaddr_low;
u16 macaddr_high;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
int ret;
int i;
struct nes_vnic *first_nesvnic = NULL;
u32 nic_active_bit;
u32 nic_active;
struct list_head *list_pos, *list_temp;
unsigned long flags;
assert(nesdev != NULL);
if (nesvnic->netdev_open == 1)
return 0;
if (netif_msg_ifup(nesvnic))
printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name);
ret = nes_init_nic_qp(nesdev, netdev);
if (ret) {
return ret;
}
netif_carrier_off(netdev);
netif_stop_queue(netdev);
if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) {
nesvnic->nesibdev = nes_init_ofa_device(netdev);
if (nesvnic->nesibdev == NULL) {
printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name);
} else {
nesvnic->nesibdev->nesvnic = nesvnic;
ret = nes_register_ofa_device(nesvnic->nesibdev);
if (ret) {
printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n",
netdev->name, ret);
}
}
}
/* Set packet filters */
nic_active_bit = 1 << nesvnic->nic_index;
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
macaddr_high += (u16)netdev->dev_addr[1];
macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
macaddr_low += (u32)netdev->dev_addr[5];
/* Program the various MAC regs */
for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
if (nesvnic->qp_nic_index[i] == 0xf) {
break;
}
nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
" (Addr:%08X) = %08X, HIGH = %08X.\n",
i, nesvnic->qp_nic_index[i],
NES_IDX_PERFECT_FILTER_LOW+
(nesvnic->qp_nic_index[i] * 8),
macaddr_low,
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
macaddr_low);
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
}
nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
nesvnic->nic_cq.cq_number);
nes_read32(nesdev->regs+NES_CQE_ALLOC);
list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
first_nesvnic = container_of(list_pos, struct nes_vnic, list);
if (first_nesvnic->netdev_open == 1)
break;
}
if (first_nesvnic->netdev_open == 0) {
nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n");
nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index),
~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
first_nesvnic = nesvnic;
}
if (first_nesvnic->linkup) {
/* Enable network packets */
nesvnic->linkup = 1;
netif_start_queue(netdev);
netif_carrier_on(netdev);
}
spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
nesdev->link_recheck = 1;
mod_delayed_work(system_wq, &nesdev->work,
NES_LINK_RECHECK_DELAY);
}
spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
if (nesvnic->of_device_registered) {
nesdev->nesadapter->send_term_ok = 1;
if (nesvnic->linkup == 1) {
if (nesdev->iw_status == 0) {
nesdev->iw_status = 1;
nes_port_ibevent(nesvnic);
}
} else {
nesdev->iw_status = 0;
}
}
spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
napi_enable(&nesvnic->napi);
nesvnic->netdev_open = 1;
return 0;
}
/**
* nes_netdev_stop
*/
static int nes_netdev_stop(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u32 nic_active_mask;
u32 nic_active;
struct nes_vnic *first_nesvnic = NULL;
struct list_head *list_pos, *list_temp;
unsigned long flags;
nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
nesvnic, nesdev, netdev, netdev->name);
if (nesvnic->netdev_open == 0)
return 0;
if (netif_msg_ifdown(nesvnic))
printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
netif_carrier_off(netdev);
/* Disable network packets */
napi_disable(&nesvnic->napi);
netif_stop_queue(netdev);
list_for_each_safe(list_pos, list_temp, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]) {
first_nesvnic = container_of(list_pos, struct nes_vnic, list);
if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic))
break;
}
if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic) &&
(PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) !=
PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+
(0x200*nesdev->mac_index), 0xffffffff);
nes_write_indexed(first_nesvnic->nesdev,
NES_IDX_MAC_INT_MASK+
(0x200*first_nesvnic->nesdev->mac_index),
~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
} else {
nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
}
nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+
(nesvnic->perfect_filter_index*8), 0);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
nic_active &= nic_active_mask;
nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
spin_lock_irqsave(&nesvnic->port_ibevent_lock, flags);
if (nesvnic->of_device_registered) {
nesdev->nesadapter->send_term_ok = 0;
nesdev->iw_status = 0;
if (nesvnic->linkup == 1)
nes_port_ibevent(nesvnic);
}
del_timer_sync(&nesvnic->event_timer);
nesvnic->event_timer.function = NULL;
spin_unlock_irqrestore(&nesvnic->port_ibevent_lock, flags);
nes_destroy_nic_qp(nesvnic);
nesvnic->netdev_open = 0;
return 0;
}
/**
* nes_nic_send
*/
static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_nic *nesnic = &nesvnic->nic;
struct nes_hw_nic_sq_wqe *nic_sqe;
struct tcphdr *tcph;
__le16 *wqe_fragment_length;
u32 wqe_misc;
u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
u16 skb_fragment_index;
dma_addr_t bus_address;
nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
if (vlan_tx_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
netdev->name, vlan_tx_tag_get(skb));
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
} else
wqe_misc = 0;
/* bump past the vlan tag */
wqe_fragment_length++;
/* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */
wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb_is_gso(skb)) {
tcph = tcp_hdr(skb);
/* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... is_gso = %u seg size = %u\n",
netdev->name, skb_is_gso(skb), skb_shinfo(skb)->gso_size); */
wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE | (u16)skb_shinfo(skb)->gso_size;
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
((u32)tcph->doff) |
(((u32)(((unsigned char *)tcph) - skb->data)) << 4));
}
} else { /* CHECKSUM_HW */
wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM;
}
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
skb->len);
memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb)));
wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
skb_headlen(skb)));
wqe_fragment_length[1] = 0;
if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
if ((skb_shinfo(skb)->nr_frags + 1) > 4) {
nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n",
netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
kfree_skb(skb);
nesvnic->tx_sw_dropped++;
return NETDEV_TX_LOCKED;
}
set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE);
wqe_fragment_length[wqe_fragment_index++] =
cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE);
wqe_fragment_length[wqe_fragment_index] = 0;
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
((u64)(bus_address)));
nesnic->tx_skb[nesnic->sq_head] = skb;
}
if (skb_headlen(skb) == skb->len) {
if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
nesnic->tx_skb[nesnic->sq_head] = skb;
}
} else {
/* Deal with Fragments */
nesnic->tx_skb[nesnic->sq_head] = skb;
for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags;
skb_fragment_index++) {
skb_frag_t *frag =
&skb_shinfo(skb)->frags[skb_fragment_index];
bus_address = skb_frag_dma_map(&nesdev->pcidev->dev,
frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
wqe_fragment_length[wqe_fragment_index] =
cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[skb_fragment_index]));
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
bus_address);
wqe_fragment_index++;
if (wqe_fragment_index < 5)
wqe_fragment_length[wqe_fragment_index] = 0;
}
}
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
nesnic->sq_head++;
nesnic->sq_head &= nesnic->sq_size - 1;
return NETDEV_TX_OK;
}
/**
* nes_netdev_start_xmit
*/
static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_hw_nic *nesnic = &nesvnic->nic;
struct nes_hw_nic_sq_wqe *nic_sqe;
struct tcphdr *tcph;
/* struct udphdr *udph; */
#define NES_MAX_TSO_FRAGS MAX_SKB_FRAGS
/* 64K segment plus overflow on each side */
dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
dma_addr_t bus_address;
u32 tso_frag_index;
u32 tso_frag_count;
u32 tso_wqe_length;
u32 curr_tcp_seq;
u32 wqe_count=1;
u32 send_rc;
struct iphdr *iph;
__le16 *wqe_fragment_length;
u32 nr_frags;
u32 original_first_length;
/* u64 *wqe_fragment_address; */
/* first fragment (0) is used by copy buffer */
u16 wqe_fragment_index=1;
u16 hoffset;
u16 nhoffset;
u16 wqes_needed;
u16 wqes_available;
u32 wqe_misc;
/*
* nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
* " (%u frags), tso_size=%u\n",
* netdev->name, skb->len, skb_headlen(skb),
* skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
*/
if (!netif_carrier_ok(netdev))
return NETDEV_TX_OK;
if (netif_queue_stopped(netdev))
return NETDEV_TX_BUSY;
/* Check if SQ is full */
if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
if (!netif_queue_stopped(netdev)) {
netif_stop_queue(netdev);
barrier();
if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) {
netif_start_queue(netdev);
goto sq_no_longer_full;
}
}
nesvnic->sq_full++;
return NETDEV_TX_BUSY;
}
sq_no_longer_full:
nr_frags = skb_shinfo(skb)->nr_frags;
if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
nr_frags++;
}
/* Check if too many fragments */
if (unlikely((nr_frags > 4))) {
if (skb_is_gso(skb)) {
nesvnic->segmented_tso_requests++;
nesvnic->tso_requests++;
/* Basically 4 fragments available per WQE with extended fragments */
wqes_needed = nr_frags >> 2;
wqes_needed += (nr_frags&3)?1:0;
wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) &
(nesnic->sq_size - 1);
if (unlikely(wqes_needed > wqes_available)) {
if (!netif_queue_stopped(netdev)) {
netif_stop_queue(netdev);
barrier();
wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) &
(nesnic->sq_size - 1);
if (wqes_needed <= wqes_available) {
netif_start_queue(netdev);
goto tso_sq_no_longer_full;
}
}
nesvnic->sq_full++;
nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
netdev->name);
return NETDEV_TX_BUSY;
}
tso_sq_no_longer_full:
/* Map all the buffers */
for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags;
tso_frag_count++) {
skb_frag_t *frag =
&skb_shinfo(skb)->frags[tso_frag_count];
tso_bus_address[tso_frag_count] =
skb_frag_dma_map(&nesdev->pcidev->dev,
frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
}
tso_frag_index = 0;
curr_tcp_seq = ntohl(tcp_hdr(skb)->seq);
hoffset = skb_transport_header(skb) - skb->data;
nhoffset = skb_network_header(skb) - skb->data;
original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2);
for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) {
tso_wqe_length = 0;
nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
wqe_fragment_length =
(__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
/* setup the VLAN tag if present */
if (vlan_tx_tag_present(skb)) {
nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
netdev->name, vlan_tx_tag_get(skb) );
wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
} else
wqe_misc = 0;
/* bump past the vlan tag */
wqe_fragment_length++;
/* Assumes header totally fits in allocated buffer and is in first fragment */
if (original_first_length > NES_FIRST_FRAG_SIZE) {
nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n",
original_first_length, NES_FIRST_FRAG_SIZE);
nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
" (%u frags), is_gso = %u tso_size=%u\n",
netdev->name,
skb->len, skb_headlen(skb),
skb_shinfo(skb)->nr_frags, skb_is_gso(skb), skb_shinfo(skb)->gso_size);
}
memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
original_first_length));
iph = (struct iphdr *)
(&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]);
tcph = (struct tcphdr *)
(&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]);
if ((wqe_count+1)!=(u32)wqes_needed) {
tcph->fin = 0;
tcph->psh = 0;
tcph->rst = 0;
tcph->urg = 0;
}
if (wqe_count) {
tcph->syn = 0;
}
tcph->seq = htonl(curr_tcp_seq);
wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
original_first_length));
wqe_fragment_index = 1;
if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) {
set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length,
skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE);
wqe_fragment_length[wqe_fragment_index++] =
cpu_to_le16(skb_headlen(skb) - original_first_length);
wqe_fragment_length[wqe_fragment_index] = 0;
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
bus_address);
tso_wqe_length += skb_headlen(skb) -
original_first_length;
}
while (wqe_fragment_index < 5) {
wqe_fragment_length[wqe_fragment_index] =
cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index]));
set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
(u64)tso_bus_address[tso_frag_index]);
wqe_fragment_index++;
tso_wqe_length += skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index++]);
if (wqe_fragment_index < 5)
wqe_fragment_length[wqe_fragment_index] = 0;
if (tso_frag_index == tso_frag_count)
break;
}
if ((wqe_count+1) == (u32)wqes_needed) {
nesnic->tx_skb[nesnic->sq_head] = skb;
} else {
nesnic->tx_skb[nesnic->sq_head] = NULL;
}
wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_shinfo(skb)->gso_size;
if ((tso_wqe_length + original_first_length) > skb_shinfo(skb)->gso_size) {
wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
} else {
iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
}
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX,
wqe_misc);
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
((u32)tcph->doff) | (((u32)hoffset) << 4));
set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
tso_wqe_length + original_first_length);
curr_tcp_seq += tso_wqe_length;
nesnic->sq_head++;
nesnic->sq_head &= nesnic->sq_size-1;
}
} else {
nesvnic->linearized_skbs++;
hoffset = skb_transport_header(skb) - skb->data;
nhoffset = skb_network_header(skb) - skb->data;
skb_linearize(skb);
skb_set_transport_header(skb, hoffset);
skb_set_network_header(skb, nhoffset);
send_rc = nes_nic_send(skb, netdev);
if (send_rc != NETDEV_TX_OK)
return NETDEV_TX_OK;
}
} else {
send_rc = nes_nic_send(skb, netdev);
if (send_rc != NETDEV_TX_OK)
return NETDEV_TX_OK;
}
barrier();
if (wqe_count)
nes_write32(nesdev->regs+NES_WQE_ALLOC,
(wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
netdev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/**
* nes_netdev_get_stats
*/
static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u64 u64temp;
u32 u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->endnode_nstat_rx_discard += u32temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_rx_octets += u64temp;
nesvnic->netstats.rx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_rx_frames += u64temp;
nesvnic->netstats.rx_packets += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_tx_octets += u64temp;
nesvnic->netstats.tx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
nesvnic->endnode_nstat_tx_frames += u64temp;
nesvnic->netstats.tx_packets += u64temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_short_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_length_errors += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_crc_errors += u32temp;
nesvnic->netstats.rx_crc_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_tx_errors += u32temp;
nesvnic->netstats.tx_errors += u32temp;
return &nesvnic->netstats;
}
/**
* nes_netdev_tx_timeout
*/
static void nes_netdev_tx_timeout(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
if (netif_msg_timer(nesvnic))
nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name);
}
/**
* nes_netdev_set_mac_address
*/
static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct sockaddr *mac_addr = p;
int i;
u32 macaddr_low;
u16 macaddr_high;
if (!is_valid_ether_addr(mac_addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
printk(PFX "%s: Address length = %d, Address = %pM\n",
__func__, netdev->addr_len, mac_addr->sa_data);
macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
macaddr_high += (u16)netdev->dev_addr[1];
macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
macaddr_low += (u32)netdev->dev_addr[5];
for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
if (nesvnic->qp_nic_index[i] == 0xf) {
break;
}
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
macaddr_low);
nes_write_indexed(nesdev,
NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)nesvnic->nic_index) << 16)));
}
return 0;
}
static void set_allmulti(struct nes_device *nesdev, u32 nic_active_bit)
{
u32 nic_active;
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active &= ~nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
#define get_addr(addrs, index) ((addrs) + (index) * ETH_ALEN)
/**
* nes_netdev_set_multicast_list
*/
static void nes_netdev_set_multicast_list(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
u32 nic_active_bit;
u32 nic_active;
u32 perfect_filter_register_address;
u32 macaddr_low;
u16 macaddr_high;
u8 mc_all_on = 0;
u8 mc_index;
int mc_nic_index = -1;
u8 pft_entries_preallocated = max(nesadapter->adapter_fcn_count *
nics_per_function, 4);
u8 max_pft_entries_avaiable = NES_PFT_SIZE - pft_entries_preallocated;
unsigned long flags;
int mc_count = netdev_mc_count(netdev);
spin_lock_irqsave(&nesadapter->resource_lock, flags);
nic_active_bit = 1 << nesvnic->nic_index;
if (netdev->flags & IFF_PROMISC) {
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active |= nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
mc_all_on = 1;
} else if ((netdev->flags & IFF_ALLMULTI) ||
(nesvnic->nic_index > 3)) {
set_allmulti(nesdev, nic_active_bit);
mc_all_on = 1;
} else {
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
nic_active &= ~nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active &= ~nic_active_bit;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscuous = %d, All Multicast = %d.\n",
mc_count, !!(netdev->flags & IFF_PROMISC),
!!(netdev->flags & IFF_ALLMULTI));
if (!mc_all_on) {
char *addrs;
int i;
struct netdev_hw_addr *ha;
addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
if (!addrs) {
set_allmulti(nesdev, nic_active_bit);
goto unlock;
}
i = 0;
netdev_for_each_mc_addr(ha, netdev)
memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN);
perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
pft_entries_preallocated * 0x8;
for (i = 0, mc_index = 0; mc_index < max_pft_entries_avaiable;
mc_index++) {
while (i < mc_count && nesvnic->mcrq_mcast_filter &&
((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic,
get_addr(addrs, i++))) == 0));
if (mc_nic_index < 0)
mc_nic_index = nesvnic->nic_index;
while (nesadapter->pft_mcast_map[mc_index] < 16 &&
nesadapter->pft_mcast_map[mc_index] !=
nesvnic->nic_index &&
mc_index < max_pft_entries_avaiable) {
nes_debug(NES_DBG_NIC_RX,
"mc_index=%d skipping nic_index=%d, "
"used for=%d \n", mc_index,
nesvnic->nic_index,
nesadapter->pft_mcast_map[mc_index]);
mc_index++;
}
if (mc_index >= max_pft_entries_avaiable)
break;
if (i < mc_count) {
char *addr = get_addr(addrs, i++);
nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %pM to register 0x%04X nic_idx=%d\n",
addr,
perfect_filter_register_address+(mc_index * 8),
mc_nic_index);
macaddr_high = ((u8) addr[0]) << 8;
macaddr_high += (u8) addr[1];
macaddr_low = ((u8) addr[2]) << 24;
macaddr_low += ((u8) addr[3]) << 16;
macaddr_low += ((u8) addr[4]) << 8;
macaddr_low += (u8) addr[5];
nes_write_indexed(nesdev,
perfect_filter_register_address+(mc_index * 8),
macaddr_low);
nes_write_indexed(nesdev,
perfect_filter_register_address+4+(mc_index * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)(1<<mc_nic_index)) << 16)));
nesadapter->pft_mcast_map[mc_index] =
nesvnic->nic_index;
} else {
nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
perfect_filter_register_address+(mc_index * 8));
nes_write_indexed(nesdev,
perfect_filter_register_address+4+(mc_index * 8),
0);
nesadapter->pft_mcast_map[mc_index] = 255;
}
}
kfree(addrs);
/* PFT is not large enough */
if (i < mc_count)
set_allmulti(nesdev, nic_active_bit);
}
unlock:
spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
}
/**
* nes_netdev_change_mtu
*/
static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
int ret = 0;
u8 jumbomode = 0;
u32 nic_active;
u32 nic_active_bit;
u32 uc_all_active;
u32 mc_all_active;
if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
return -EINVAL;
netdev->mtu = new_mtu;
nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN;
if (netdev->mtu > 1500) {
jumbomode=1;
}
nes_nic_init_timer_defaults(nesdev, jumbomode);
if (netif_running(netdev)) {
nic_active_bit = 1 << nesvnic->nic_index;
mc_all_active = nes_read_indexed(nesdev,
NES_IDX_NIC_MULTICAST_ALL) & nic_active_bit;
uc_all_active = nes_read_indexed(nesdev,
NES_IDX_NIC_UNICAST_ALL) & nic_active_bit;
nes_netdev_stop(netdev);
nes_netdev_open(netdev);
nic_active = nes_read_indexed(nesdev,
NES_IDX_NIC_MULTICAST_ALL);
nic_active |= mc_all_active;
nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
nic_active);
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
nic_active |= uc_all_active;
nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
}
return ret;
}
static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
"Link Change Interrupts",
"Linearized SKBs",
"T/GSO Requests",
"Pause Frames Sent",
"Pause Frames Received",
"Internal Routing Errors",
"SQ SW Dropped SKBs",
"SQ Full",
"Segmented TSO Requests",
"Rx Symbol Errors",
"Rx Jabber Errors",
"Rx Oversized Frames",
"Rx Short Frames",
"Rx Length Errors",
"Rx CRC Errors",
"Rx Port Discard",
"Endnode Rx Discards",
"Endnode Rx Octets",
"Endnode Rx Frames",
"Endnode Tx Octets",
"Endnode Tx Frames",
"Tx Errors",
"mh detected",
"mh pauses",
"Retransmission Count",
"CM Connects",
"CM Accepts",
"Disconnects",
"Connected Events",
"Connect Requests",
"CM Rejects",
"ModifyQP Timeouts",
"CreateQPs",
"SW DestroyQPs",
"DestroyQPs",
"CM Closes",
"CM Packets Sent",
"CM Packets Bounced",
"CM Packets Created",
"CM Packets Rcvd",
"CM Packets Dropped",
"CM Packets Retrans",
"CM Listens Created",
"CM Listens Destroyed",
"CM Backlog Drops",
"CM Loopbacks",
"CM Nodes Created",
"CM Nodes Destroyed",
"CM Accel Drops",
"CM Resets Received",
"Free 4Kpbls",
"Free 256pbls",
"Timer Inits",
"LRO aggregated",
"LRO flushed",
"LRO no_desc",
"PAU CreateQPs",
"PAU DestroyQPs",
};
#define NES_ETHTOOL_STAT_COUNT ARRAY_SIZE(nes_ethtool_stringset)
/**
* nes_netdev_get_sset_count
*/
static int nes_netdev_get_sset_count(struct net_device *netdev, int stringset)
{
if (stringset == ETH_SS_STATS)
return NES_ETHTOOL_STAT_COUNT;
else
return -EINVAL;
}
/**
* nes_netdev_get_strings
*/
static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
u8 *ethtool_strings)
{
if (stringset == ETH_SS_STATS)
memcpy(ethtool_strings,
&nes_ethtool_stringset,
sizeof(nes_ethtool_stringset));
}
/**
* nes_netdev_get_ethtool_stats
*/
static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
{
u64 u64temp;
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 nic_count;
u32 u32temp;
u32 index = 0;
target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
target_stat_values[index] = nesvnic->nesdev->link_status_interrupts;
target_stat_values[++index] = nesvnic->linearized_skbs;
target_stat_values[++index] = nesvnic->tso_requests;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_pause_frames_sent += u32temp;
target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_sent;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_pause_frames_received += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
nesvnic->nesdev->port_rx_discards += u32temp;
nesvnic->netstats.rx_dropped += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
nesvnic->nesdev->port_tx_discards += u32temp;
nesvnic->netstats.tx_dropped += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_short_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->netstats.rx_length_errors += u32temp;
nesvnic->nesdev->mac_rx_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_rx_errors += u32temp;
nesvnic->nesdev->mac_rx_crc_errors += u32temp;
nesvnic->netstats.rx_crc_errors += u32temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
nesvnic->nesdev->mac_tx_errors += u32temp;
nesvnic->netstats.tx_errors += u32temp;
for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
if (nesvnic->qp_nic_index[nic_count] == 0xf)
break;
u32temp = nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_DISCARD +
(nesvnic->qp_nic_index[nic_count]*0x200));
nesvnic->netstats.rx_dropped += u32temp;
nesvnic->endnode_nstat_rx_discard += u32temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_rx_octets += u64temp;
nesvnic->netstats.rx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_rx_frames += u64temp;
nesvnic->netstats.rx_packets += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_tx_octets += u64temp;
nesvnic->netstats.tx_bytes += u64temp;
u64temp = (u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO +
(nesvnic->qp_nic_index[nic_count]*0x200));
u64temp += ((u64)nes_read_indexed(nesdev,
NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI +
(nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
nesvnic->endnode_nstat_tx_frames += u64temp;
nesvnic->netstats.tx_packets += u64temp;
u32temp = nes_read_indexed(nesdev,
NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200));
nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
}
target_stat_values[++index] = nesvnic->nesdev->mac_pause_frames_received;
target_stat_values[++index] = nesdev->nesadapter->nic_rx_eth_route_err;
target_stat_values[++index] = nesvnic->tx_sw_dropped;
target_stat_values[++index] = nesvnic->sq_full;
target_stat_values[++index] = nesvnic->segmented_tso_requests;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_symbol_err_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_jabber_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_oversized_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_short_frames;
target_stat_values[++index] = nesvnic->netstats.rx_length_errors;
target_stat_values[++index] = nesvnic->nesdev->mac_rx_crc_errors;
target_stat_values[++index] = nesvnic->nesdev->port_rx_discards;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_discard;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_rx_frames;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_octets;
target_stat_values[++index] = nesvnic->endnode_nstat_tx_frames;
target_stat_values[++index] = nesvnic->nesdev->mac_tx_errors;
target_stat_values[++index] = mh_detected;
target_stat_values[++index] = mh_pauses_sent;
target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
target_stat_values[++index] = atomic_read(&cm_connects);
target_stat_values[++index] = atomic_read(&cm_accepts);
target_stat_values[++index] = atomic_read(&cm_disconnects);
target_stat_values[++index] = atomic_read(&cm_connecteds);
target_stat_values[++index] = atomic_read(&cm_connect_reqs);
target_stat_values[++index] = atomic_read(&cm_rejects);
target_stat_values[++index] = atomic_read(&mod_qp_timouts);
target_stat_values[++index] = atomic_read(&qps_created);
target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
target_stat_values[++index] = atomic_read(&qps_destroyed);
target_stat_values[++index] = atomic_read(&cm_closes);
target_stat_values[++index] = cm_packets_sent;
target_stat_values[++index] = cm_packets_bounced;
target_stat_values[++index] = cm_packets_created;
target_stat_values[++index] = cm_packets_received;
target_stat_values[++index] = cm_packets_dropped;
target_stat_values[++index] = cm_packets_retrans;
target_stat_values[++index] = atomic_read(&cm_listens_created);
target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
target_stat_values[++index] = cm_backlog_drops;
target_stat_values[++index] = atomic_read(&cm_loopbacks);
target_stat_values[++index] = atomic_read(&cm_nodes_created);
target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
target_stat_values[++index] = atomic_read(&cm_resets_recvd);
target_stat_values[++index] = nesadapter->free_4kpbl;
target_stat_values[++index] = nesadapter->free_256pbl;
target_stat_values[++index] = int_mod_timer_init;
target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
target_stat_values[++index] = atomic_read(&pau_qps_created);
target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
}
/**
* nes_netdev_get_drvinfo
*/
static void nes_netdev_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u", nesadapter->firmware_version >> 16,
nesadapter->firmware_version & 0x000000ff);
strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
drvinfo->testinfo_len = 0;
drvinfo->eedump_len = 0;
drvinfo->regdump_len = 0;
}
/**
* nes_netdev_set_coalesce
*/
static int nes_netdev_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et_coalesce)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
unsigned long flags;
spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
if (et_coalesce->rx_max_coalesced_frames_low) {
shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low;
}
if (et_coalesce->rx_max_coalesced_frames_irq) {
shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq;
}
if (et_coalesce->rx_max_coalesced_frames_high) {
shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high;
}
if (et_coalesce->rx_coalesce_usecs_low) {
shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low;
}
if (et_coalesce->rx_coalesce_usecs_high) {
shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high;
}
spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
/* using this to drive total interrupt moderation */
nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq;
if (et_coalesce->use_adaptive_rx_coalesce) {
nesadapter->et_use_adaptive_rx_coalesce = 1;
nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
nesadapter->et_rx_coalesce_usecs_irq = 0;
if (et_coalesce->pkt_rate_low) {
nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low;
}
} else {
nesadapter->et_use_adaptive_rx_coalesce = 0;
nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
if (nesadapter->et_rx_coalesce_usecs_irq) {
nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8)));
}
}
return 0;
}
/**
* nes_netdev_get_coalesce
*/
static int nes_netdev_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *et_coalesce)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
struct ethtool_coalesce temp_et_coalesce;
struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
unsigned long flags;
memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce));
temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq;
temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce;
temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval;
temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low;
spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low;
temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target;
temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high;
temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min;
temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max;
if (nesadapter->et_use_adaptive_rx_coalesce) {
temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use;
}
spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce));
return 0;
}
/**
* nes_netdev_get_pauseparam
*/
static void nes_netdev_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *et_pauseparam)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
et_pauseparam->autoneg = 0;
et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0;
et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0;
}
/**
* nes_netdev_set_pauseparam
*/
static int nes_netdev_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *et_pauseparam)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u32 u32temp;
if (et_pauseparam->autoneg) {
/* TODO: should return unsupported */
return 0;
}
if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 0;
} else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200), u32temp);
nesdev->disable_tx_flow_control = 1;
}
if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
nesdev->disable_rx_flow_control = 0;
} else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) {
u32temp = nes_read_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
nes_write_indexed(nesdev,
NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
nesdev->disable_rx_flow_control = 1;
}
return 0;
}
/**
* nes_netdev_get_settings
*/
static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 mac_index = nesdev->mac_index;
u8 phy_type = nesadapter->phy_type[mac_index];
u8 phy_index = nesadapter->phy_index[mac_index];
u16 phy_data;
et_cmd->duplex = DUPLEX_FULL;
et_cmd->port = PORT_MII;
et_cmd->maxtxpkt = 511;
et_cmd->maxrxpkt = 511;
if (nesadapter->OneG_Mode) {
ethtool_cmd_speed_set(et_cmd, SPEED_1000);
if (phy_type == NES_PHY_TYPE_PUMA_1G) {
et_cmd->supported = SUPPORTED_1000baseT_Full;
et_cmd->advertising = ADVERTISED_1000baseT_Full;
et_cmd->autoneg = AUTONEG_DISABLE;
et_cmd->transceiver = XCVR_INTERNAL;
et_cmd->phy_address = mac_index;
} else {
unsigned long flags;
et_cmd->supported = SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg;
et_cmd->advertising = ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
if (phy_data & 0x1000)
et_cmd->autoneg = AUTONEG_ENABLE;
else
et_cmd->autoneg = AUTONEG_DISABLE;
et_cmd->transceiver = XCVR_EXTERNAL;
et_cmd->phy_address = phy_index;
}
return 0;
}
if ((phy_type == NES_PHY_TYPE_ARGUS) ||
(phy_type == NES_PHY_TYPE_SFP_D) ||
(phy_type == NES_PHY_TYPE_KR)) {
et_cmd->transceiver = XCVR_EXTERNAL;
et_cmd->port = PORT_FIBRE;
et_cmd->supported = SUPPORTED_FIBRE;
et_cmd->advertising = ADVERTISED_FIBRE;
et_cmd->phy_address = phy_index;
} else {
et_cmd->transceiver = XCVR_INTERNAL;
et_cmd->supported = SUPPORTED_10000baseT_Full;
et_cmd->advertising = ADVERTISED_10000baseT_Full;
et_cmd->phy_address = mac_index;
}
ethtool_cmd_speed_set(et_cmd, SPEED_10000);
et_cmd->autoneg = AUTONEG_DISABLE;
return 0;
}
/**
* nes_netdev_set_settings
*/
static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
if ((nesadapter->OneG_Mode) &&
(nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
unsigned long flags;
u16 phy_data;
u8 phy_index = nesadapter->phy_index[nesdev->mac_index];
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
if (et_cmd->autoneg) {
/* Turn on Full duplex, Autoneg, and restart autonegotiation */
phy_data |= 0x1300;
} else {
/* Turn off autoneg */
phy_data &= ~0x1000;
}
nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
return 0;
}
static const struct ethtool_ops nes_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = nes_netdev_get_settings,
.set_settings = nes_netdev_set_settings,
.get_strings = nes_netdev_get_strings,
.get_sset_count = nes_netdev_get_sset_count,
.get_ethtool_stats = nes_netdev_get_ethtool_stats,
.get_drvinfo = nes_netdev_get_drvinfo,
.get_coalesce = nes_netdev_get_coalesce,
.set_coalesce = nes_netdev_set_coalesce,
.get_pauseparam = nes_netdev_get_pauseparam,
.set_pauseparam = nes_netdev_set_pauseparam,
};
static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
{
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
unsigned long flags;
spin_lock_irqsave(&nesadapter->phy_lock, flags);
nes_debug(NES_DBG_NETDEV, "%s: %s\n", __func__, netdev->name);
/* Enable/Disable VLAN Stripping */
u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
if (features & NETIF_F_HW_VLAN_CTAG_RX)
u32temp &= 0xfdffffff;
else
u32temp |= 0x02000000;
nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp);
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
* enable/disable make sure tx flag is always in same state as rx.
*/
if (features & NETIF_F_HW_VLAN_CTAG_RX)
features |= NETIF_F_HW_VLAN_CTAG_TX;
else
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return features;
}
static int nes_set_features(struct net_device *netdev, netdev_features_t features)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
u32 changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_CTAG_RX)
nes_vlan_mode(netdev, nesdev, features);
return 0;
}
static const struct net_device_ops nes_netdev_ops = {
.ndo_open = nes_netdev_open,
.ndo_stop = nes_netdev_stop,
.ndo_start_xmit = nes_netdev_start_xmit,
.ndo_get_stats = nes_netdev_get_stats,
.ndo_tx_timeout = nes_netdev_tx_timeout,
.ndo_set_mac_address = nes_netdev_set_mac_address,
.ndo_set_rx_mode = nes_netdev_set_multicast_list,
.ndo_change_mtu = nes_netdev_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_fix_features = nes_fix_features,
.ndo_set_features = nes_set_features,
};
/**
* nes_netdev_init - initialize network device
*/
struct net_device *nes_netdev_init(struct nes_device *nesdev,
void __iomem *mmio_addr)
{
u64 u64temp;
struct nes_vnic *nesvnic;
struct net_device *netdev;
struct nic_qp_map *curr_qp_map;
u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index];
netdev = alloc_etherdev(sizeof(struct nes_vnic));
if (!netdev) {
printk(KERN_ERR PFX "nesvnic etherdev alloc failed");
return NULL;
}
nesvnic = netdev_priv(netdev);
nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name);
SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev);
netdev->watchdog_timeo = NES_TX_TIMEOUT;
netdev->irq = nesdev->pcidev->irq;
netdev->mtu = ETH_DATA_LEN;
netdev->hard_header_len = ETH_HLEN;
netdev->addr_len = ETH_ALEN;
netdev->type = ARPHRD_ETHER;
netdev->netdev_ops = &nes_netdev_ops;
netdev->ethtool_ops = &nes_ethtool_ops;
netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
/* Fill in the port structure */
nesvnic->netdev = netdev;
nesvnic->nesdev = nesdev;
nesvnic->msg_enable = netif_msg_init(debug, default_msg);
nesvnic->netdev_index = nesdev->netdev_count;
nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count;
nesvnic->max_frame_size = netdev->mtu + netdev->hard_header_len + VLAN_HLEN;
curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)];
nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid;
nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index;
nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port;
/* Setup the burned in MAC address */
u64temp = (u64)nesdev->nesadapter->mac_addr_low;
u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32;
u64temp += nesvnic->nic_index;
netdev->dev_addr[0] = (u8)(u64temp>>40);
netdev->dev_addr[1] = (u8)(u64temp>>32);
netdev->dev_addr[2] = (u8)(u64temp>>24);
netdev->dev_addr[3] = (u8)(u64temp>>16);
netdev->dev_addr[4] = (u8)(u64temp>>8);
netdev->dev_addr[5] = (u8)u64temp;
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV))
netdev->hw_features |= NETIF_F_TSO;
netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_LRO;
nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
" nic_index = %d, logical_port = %d, mac_index = %d.\n",
nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
if (nesvnic->nesdev->nesadapter->port_count == 1 &&
nesvnic->nesdev->nesadapter->adapter_fcn_count == 1) {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
nesvnic->qp_nic_index[2] = 0xf;
nesvnic->qp_nic_index[3] = 0xf;
} else {
nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2;
nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
}
} else {
if (nesvnic->nesdev->nesadapter->port_count == 2 ||
(nesvnic->nesdev->nesadapter->port_count == 1 &&
nesvnic->nesdev->nesadapter->adapter_fcn_count == 2)) {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = nesvnic->nic_index
+ 2;
nesvnic->qp_nic_index[2] = 0xf;
nesvnic->qp_nic_index[3] = 0xf;
} else {
nesvnic->qp_nic_index[0] = nesvnic->nic_index;
nesvnic->qp_nic_index[1] = 0xf;
nesvnic->qp_nic_index[2] = 0xf;
nesvnic->qp_nic_index[3] = 0xf;
}
}
nesvnic->next_qp_nic_index = 0;
if (nesdev->netdev_count == 0) {
nesvnic->rdma_enabled = 1;
} else {
nesvnic->rdma_enabled = 0;
}
nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
init_timer(&nesvnic->event_timer);
nesvnic->event_timer.function = NULL;
spin_lock_init(&nesvnic->tx_lock);
spin_lock_init(&nesvnic->port_ibevent_lock);
nesdev->netdev[nesdev->netdev_count] = netdev;
nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
nesvnic, nesdev->mac_index);
list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
if ((nesdev->netdev_count == 0) &&
((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
((phy_type == NES_PHY_TYPE_PUMA_1G) &&
(((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
u32 u32temp;
u32 link_mask = 0;
u32 link_val = 0;
u16 temp_phy_data;
u16 phy_data = 0;
unsigned long flags;
u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)));
if (phy_type != NES_PHY_TYPE_PUMA_1G) {
u32temp |= 0x00200000;
nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)), u32temp);
}
/* Check and set linkup here. This is for back to back */
/* configuration where second port won't get link interrupt */
switch (phy_type) {
case NES_PHY_TYPE_PUMA_1G:
if (nesdev->mac_index < 2) {
link_mask = 0x01010000;
link_val = 0x01010000;
} else {
link_mask = 0x02020000;
link_val = 0x02020000;
}
break;
case NES_PHY_TYPE_SFP_D:
spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
nes_read_10G_phy_reg(nesdev,
nesdev->nesadapter->phy_index[nesdev->mac_index],
1, 0x9003);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
nes_read_10G_phy_reg(nesdev,
nesdev->nesadapter->phy_index[nesdev->mac_index],
3, 0x0021);
nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
nes_read_10G_phy_reg(nesdev,
nesdev->nesadapter->phy_index[nesdev->mac_index],
3, 0x0021);
phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0;
break;
default:
link_mask = 0x0f1f0000;
link_val = 0x0f0f0000;
break;
}
u32temp = nes_read_indexed(nesdev,
NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)));
if (phy_type == NES_PHY_TYPE_SFP_D) {
if (phy_data & 0x0004)
nesvnic->linkup = 1;
} else {
if ((u32temp & link_mask) == link_val)
nesvnic->linkup = 1;
}
/* clear the MAC interrupt status, assumes direct logical to physical mapping */
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp);
nes_init_phy(nesdev);
}
nes_vlan_mode(netdev, nesdev, netdev->features);
return netdev;
}
/**
* nes_netdev_destroy - destroy network device structure
*/
void nes_netdev_destroy(struct net_device *netdev)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
/* make sure 'stop' method is called by Linux stack */
/* nes_netdev_stop(netdev); */
list_del(&nesvnic->list);
if (nesvnic->of_device_registered) {
nes_destroy_ofa_device(nesvnic->nesibdev);
}
free_netdev(netdev);
}
/**
* nes_nic_cm_xmit -- CM calls this to send out pkts
*/
int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int ret;
skb->dev = netdev;
ret = dev_queue_xmit(skb);
if (ret) {
nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret);
}
return ret;
}
| gpl-2.0 |
xlfjn/kernel_htc_hima | drivers/hid/hid-gyration.c | 4686 | 2888 | /*
* HID driver for some gyration "special" devices
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2008 Jiri Slaby
* Copyright (c) 2006-2008 Jiri Kosina
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
#define gy_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
EV_KEY, (c))
static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
return 0;
set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
/* Reported on Gyration MCE Remote */
case 0x00d: gy_map_key_clear(KEY_HOME); break;
case 0x024: gy_map_key_clear(KEY_DVD); break;
case 0x025: gy_map_key_clear(KEY_PVR); break;
case 0x046: gy_map_key_clear(KEY_MEDIA); break;
case 0x047: gy_map_key_clear(KEY_MP3); break;
case 0x048: gy_map_key_clear(KEY_MEDIA); break;
case 0x049: gy_map_key_clear(KEY_CAMERA); break;
case 0x04a: gy_map_key_clear(KEY_VIDEO); break;
case 0x05a: gy_map_key_clear(KEY_TEXT); break;
case 0x05b: gy_map_key_clear(KEY_RED); break;
case 0x05c: gy_map_key_clear(KEY_GREEN); break;
case 0x05d: gy_map_key_clear(KEY_YELLOW); break;
case 0x05e: gy_map_key_clear(KEY_BLUE); break;
default:
return 0;
}
return 1;
}
static int gyration_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
return 0;
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
(usage->hid & 0xff) == 0x82) {
struct input_dev *input = field->hidinput->input;
input_event(input, usage->type, usage->code, 1);
input_sync(input);
input_event(input, usage->type, usage->code, 0);
input_sync(input);
return 1;
}
return 0;
}
static const struct hid_device_id gyration_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ }
};
MODULE_DEVICE_TABLE(hid, gyration_devices);
static struct hid_driver gyration_driver = {
.name = "gyration",
.id_table = gyration_devices,
.input_mapping = gyration_input_mapping,
.event = gyration_event,
};
module_hid_driver(gyration_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
penhoi/linux-3.14.56 | drivers/hid/hid-gyration.c | 4686 | 2888 | /*
* HID driver for some gyration "special" devices
*
* Copyright (c) 1999 Andreas Gal
* Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
* Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
* Copyright (c) 2008 Jiri Slaby
* Copyright (c) 2006-2008 Jiri Kosina
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
#define gy_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \
EV_KEY, (c))
static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR)
return 0;
set_bit(EV_REP, hi->input->evbit);
switch (usage->hid & HID_USAGE) {
/* Reported on Gyration MCE Remote */
case 0x00d: gy_map_key_clear(KEY_HOME); break;
case 0x024: gy_map_key_clear(KEY_DVD); break;
case 0x025: gy_map_key_clear(KEY_PVR); break;
case 0x046: gy_map_key_clear(KEY_MEDIA); break;
case 0x047: gy_map_key_clear(KEY_MP3); break;
case 0x048: gy_map_key_clear(KEY_MEDIA); break;
case 0x049: gy_map_key_clear(KEY_CAMERA); break;
case 0x04a: gy_map_key_clear(KEY_VIDEO); break;
case 0x05a: gy_map_key_clear(KEY_TEXT); break;
case 0x05b: gy_map_key_clear(KEY_RED); break;
case 0x05c: gy_map_key_clear(KEY_GREEN); break;
case 0x05d: gy_map_key_clear(KEY_YELLOW); break;
case 0x05e: gy_map_key_clear(KEY_BLUE); break;
default:
return 0;
}
return 1;
}
static int gyration_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
return 0;
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
(usage->hid & 0xff) == 0x82) {
struct input_dev *input = field->hidinput->input;
input_event(input, usage->type, usage->code, 1);
input_sync(input);
input_event(input, usage->type, usage->code, 0);
input_sync(input);
return 1;
}
return 0;
}
static const struct hid_device_id gyration_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ }
};
MODULE_DEVICE_TABLE(hid, gyration_devices);
static struct hid_driver gyration_driver = {
.name = "gyration",
.id_table = gyration_devices,
.input_mapping = gyration_input_mapping,
.event = gyration_event,
};
module_hid_driver(gyration_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/android_kernel_sony_msm8x27 | drivers/staging/tidspbridge/rmgr/dspdrv.c | 4942 | 3947 | /*
* dspdrv.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Interface to allocate and free bridge resources.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- Host OS */
#include <linux/types.h>
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
#include <dspbridge/dspapi.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/mgr.h>
/* ----------------------------------- This */
#include <dspbridge/dspdrv.h>
/*
* ======== dsp_init ========
* Allocates bridge resources. Loads a base image onto DSP, if specified.
*/
u32 dsp_init(u32 *init_status)
{
char dev_node[MAXREGPATHLENGTH] = "TIOMAP1510";
int status = -EPERM;
struct drv_object *drv_obj = NULL;
u32 device_node;
u32 device_node_string;
if (!api_init())
goto func_cont;
status = drv_create(&drv_obj);
if (status) {
api_exit();
goto func_cont;
}
/* End drv_create */
/* Request Resources */
status = drv_request_resources((u32) &dev_node, &device_node_string);
if (!status) {
/* Attempt to Start the Device */
status = dev_start_device((struct cfg_devnode *)
device_node_string);
if (status)
(void)drv_release_resources
((u32) device_node_string, drv_obj);
} else {
dev_dbg(bridge, "%s: drv_request_resources Failed\n", __func__);
status = -EPERM;
}
/* Unwind whatever was loaded */
if (status) {
/* irrespective of the status of dev_remove_device we conitinue
* unloading. Get the Driver Object iterate through and remove.
* Reset the status to E_FAIL to avoid going through
* api_init_complete2. */
for (device_node = drv_get_first_dev_extension();
device_node != 0;
device_node = drv_get_next_dev_extension(device_node)) {
(void)dev_remove_device((struct cfg_devnode *)
device_node);
(void)drv_release_resources((u32) device_node, drv_obj);
}
/* Remove the Driver Object */
(void)drv_destroy(drv_obj);
drv_obj = NULL;
api_exit();
dev_dbg(bridge, "%s: Logical device failed init\n", __func__);
} /* Unwinding the loaded drivers */
func_cont:
/* Attempt to Start the Board */
if (!status) {
/* BRD_AutoStart could fail if the dsp execuetable is not the
* correct one. We should not propagate that error
* into the device loader. */
(void)api_init_complete2();
} else {
dev_dbg(bridge, "%s: Failed\n", __func__);
} /* End api_init_complete2 */
*init_status = status;
/* Return the Driver Object */
return (u32) drv_obj;
}
/*
* ======== dsp_deinit ========
* Frees the resources allocated for bridge.
*/
bool dsp_deinit(u32 device_context)
{
bool ret = true;
u32 device_node;
struct mgr_object *mgr_obj = NULL;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
while ((device_node = drv_get_first_dev_extension()) != 0) {
(void)dev_remove_device((struct cfg_devnode *)device_node);
(void)drv_release_resources((u32) device_node,
(struct drv_object *)device_context);
}
(void)drv_destroy((struct drv_object *)device_context);
/* Get the Manager Object from driver data
* MGR Destroy will unload the DCD dll */
if (drv_datap && drv_datap->mgr_object) {
mgr_obj = drv_datap->mgr_object;
(void)mgr_destroy(mgr_obj);
} else {
pr_err("%s: Failed to retrieve the object handle\n", __func__);
}
api_exit();
return ret;
}
| gpl-2.0 |
rex-xxx/mt6572_x201 | kernel/drivers/staging/tidspbridge/rmgr/node.c | 4942 | 84642 | /*
* node.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* DSP/BIOS Bridge Node Manager.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/types.h>
#include <linux/bitmap.h>
#include <linux/list.h>
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/memdefs.h>
#include <dspbridge/proc.h>
#include <dspbridge/strm.h>
#include <dspbridge/sync.h>
#include <dspbridge/ntfy.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/cmm.h>
#include <dspbridge/cod.h>
#include <dspbridge/dev.h>
#include <dspbridge/msg.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/dbdcd.h>
#include <dspbridge/disp.h>
#include <dspbridge/rms_sh.h>
/* ----------------------------------- Link Driver */
#include <dspbridge/dspdefs.h>
#include <dspbridge/dspioctl.h>
/* ----------------------------------- Others */
#include <dspbridge/uuidutil.h>
/* ----------------------------------- This */
#include <dspbridge/nodepriv.h>
#include <dspbridge/node.h>
#include <dspbridge/dmm.h>
/* Static/Dynamic Loader includes */
#include <dspbridge/dbll.h>
#include <dspbridge/nldr.h>
#include <dspbridge/drv.h>
#include <dspbridge/resourcecleanup.h>
#include <_tiomap.h>
#include <dspbridge/dspdeh.h>
#define HOSTPREFIX "/host"
#define PIPEPREFIX "/dbpipe"
#define MAX_INPUTS(h) \
((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
#define MAX_OUTPUTS(h) \
((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
#define NODE_GET_PRIORITY(h) ((h)->prio)
#define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
#define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
#define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
#define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
#define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
#define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
#define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
#define CREATEPHASE 1
#define EXECUTEPHASE 2
#define DELETEPHASE 3
/* Define default STRM parameters */
/*
* TBD: Put in header file, make global DSP_STRMATTRS with defaults,
* or make defaults configurable.
*/
#define DEFAULTBUFSIZE 32
#define DEFAULTNBUFS 2
#define DEFAULTSEGID 0
#define DEFAULTALIGNMENT 0
#define DEFAULTTIMEOUT 10000
#define RMSQUERYSERVER 0
#define RMSCONFIGURESERVER 1
#define RMSCREATENODE 2
#define RMSEXECUTENODE 3
#define RMSDELETENODE 4
#define RMSCHANGENODEPRIORITY 5
#define RMSREADMEMORY 6
#define RMSWRITEMEMORY 7
#define RMSCOPY 8
#define MAXTIMEOUT 2000
#define NUMRMSFXNS 9
#define PWR_TIMEOUT 500 /* default PWR timeout in msec */
#define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
/*
* ======== node_mgr ========
*/
struct node_mgr {
struct dev_object *dev_obj; /* Device object */
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
struct disp_object *disp_obj; /* Node dispatcher */
struct list_head node_list; /* List of all allocated nodes */
u32 num_nodes; /* Number of nodes in node_list */
u32 num_created; /* Number of nodes *created* on DSP */
DECLARE_BITMAP(pipe_map, MAXPIPES); /* Pipe connection bitmap */
DECLARE_BITMAP(pipe_done_map, MAXPIPES); /* Pipes that are half free */
/* Channel allocation bitmap */
DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS);
/* DMA Channel allocation bitmap */
DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS);
/* Zero-Copy Channel alloc bitmap */
DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS);
struct ntfy_object *ntfy_obj; /* Manages registered notifications */
struct mutex node_mgr_lock; /* For critical sections */
u32 fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
struct msg_mgr *msg_mgr_obj;
/* Processor properties needed by Node Dispatcher */
u32 num_chnls; /* Total number of channels */
u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */
u32 chnl_buf_size; /* Buffer size for data to RMS */
int proc_family; /* eg, 5000 */
int proc_type; /* eg, 5510 */
u32 dsp_word_size; /* Size of DSP word on host bytes */
u32 dsp_data_mau_size; /* Size of DSP data MAU */
u32 dsp_mau_size; /* Size of MAU */
s32 min_pri; /* Minimum runtime priority for node */
s32 max_pri; /* Maximum runtime priority for node */
struct strm_mgr *strm_mgr_obj; /* STRM manager */
/* Loader properties */
struct nldr_object *nldr_obj; /* Handle to loader */
struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
};
/*
* ======== connecttype ========
*/
enum connecttype {
NOTCONNECTED = 0,
NODECONNECT,
HOSTCONNECT,
DEVICECONNECT,
};
/*
* ======== stream_chnl ========
*/
struct stream_chnl {
enum connecttype type; /* Type of stream connection */
u32 dev_id; /* pipe or channel id */
};
/*
* ======== node_object ========
*/
struct node_object {
struct list_head list_elem;
struct node_mgr *node_mgr; /* The manager of this node */
struct proc_object *processor; /* Back pointer to processor */
struct dsp_uuid node_uuid; /* Node's ID */
s32 prio; /* Node's current priority */
u32 timeout; /* Timeout for blocking NODE calls */
u32 heap_size; /* Heap Size */
u32 dsp_heap_virt_addr; /* Heap Size */
u32 gpp_heap_virt_addr; /* Heap Size */
enum node_type ntype; /* Type of node: message, task, etc */
enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
u32 num_inputs; /* Current number of inputs */
u32 num_outputs; /* Current number of outputs */
u32 max_input_index; /* Current max input stream index */
u32 max_output_index; /* Current max output stream index */
struct stream_chnl *inputs; /* Node's input streams */
struct stream_chnl *outputs; /* Node's output streams */
struct node_createargs create_args; /* Args for node create func */
nodeenv node_env; /* Environment returned by RMS */
struct dcd_genericobj dcd_props; /* Node properties from DCD */
struct dsp_cbdata *args; /* Optional args to pass to node */
struct ntfy_object *ntfy_obj; /* Manages registered notifications */
char *str_dev_name; /* device name, if device node */
struct sync_object *sync_done; /* Synchronize node_terminate */
s32 exit_status; /* execute function return status */
/* Information needed for node_get_attr() */
void *device_owner; /* If dev node, task that owns it */
u32 num_gpp_inputs; /* Current # of from GPP streams */
u32 num_gpp_outputs; /* Current # of to GPP streams */
/* Current stream connections */
struct dsp_streamconnect *stream_connect;
/* Message queue */
struct msg_queue *msg_queue_obj;
/* These fields used for SM messaging */
struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
/* Handle to pass to dynamic loader */
struct nldr_nodeobject *nldr_node_obj;
bool loaded; /* Code is (dynamically) loaded */
bool phase_split; /* Phases split in many libs or ovly */
};
/* Default buffer attributes */
static struct dsp_bufferattr node_dfltbufattrs = {
.cb_struct = 0,
.segment_id = 1,
.buf_alignment = 0,
};
static void delete_node(struct node_object *hnode,
struct process_context *pr_ctxt);
static void delete_node_mgr(struct node_mgr *hnode_mgr);
static void fill_stream_connect(struct node_object *node1,
struct node_object *node2, u32 stream1,
u32 stream2);
static void fill_stream_def(struct node_object *hnode,
struct node_strmdef *pstrm_def,
struct dsp_strmattr *pattrs);
static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
u32 phase);
static int get_node_props(struct dcd_manager *hdcd_mgr,
struct node_object *hnode,
const struct dsp_uuid *node_uuid,
struct dcd_genericobj *dcd_prop);
static int get_proc_props(struct node_mgr *hnode_mgr,
struct dev_object *hdev_obj);
static int get_rms_fxns(struct node_mgr *hnode_mgr);
static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
u32 ul_num_bytes, u32 mem_space);
static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
u32 ul_num_bytes, u32 mem_space);
/* Dynamic loader functions. */
static struct node_ldr_fxns nldr_fxns = {
nldr_allocate,
nldr_create,
nldr_delete,
nldr_get_fxn_addr,
nldr_load,
nldr_unload,
};
enum node_state node_get_state(void *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
if (!pnode)
return -1;
return pnode->node_state;
}
/*
* ======== node_allocate ========
* Purpose:
* Allocate GPP resources to manage a node on the DSP.
*/
int node_allocate(struct proc_object *hprocessor,
const struct dsp_uuid *node_uuid,
const struct dsp_cbdata *pargs,
const struct dsp_nodeattrin *attr_in,
struct node_res_object **noderes,
struct process_context *pr_ctxt)
{
struct node_mgr *hnode_mgr;
struct dev_object *hdev_obj;
struct node_object *pnode = NULL;
enum node_type node_type = NODE_TASK;
struct node_msgargs *pmsg_args;
struct node_taskargs *ptask_args;
u32 num_streams;
struct bridge_drv_interface *intf_fxns;
int status = 0;
struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
u32 proc_id;
u32 pul_value;
u32 dynext_base;
u32 off_set = 0;
u32 ul_stack_seg_addr, ul_stack_seg_val;
u32 ul_gpp_mem_base;
struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context;
u32 mapped_addr = 0;
u32 map_attrs = 0x0;
struct dsp_processorstate proc_state;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
#endif
void *node_res;
*noderes = NULL;
status = proc_get_processor_id(hprocessor, &proc_id);
if (proc_id != DSP_UNIT)
goto func_end;
status = proc_get_dev_object(hprocessor, &hdev_obj);
if (!status) {
status = dev_get_node_manager(hdev_obj, &hnode_mgr);
if (hnode_mgr == NULL)
status = -EPERM;
}
if (status)
goto func_end;
status = dev_get_bridge_context(hdev_obj, &pbridge_context);
if (!pbridge_context) {
status = -EFAULT;
goto func_end;
}
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt
to send the message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
/* Assuming that 0 is not a valid function address */
if (hnode_mgr->fxn_addrs[0] == 0) {
/* No RMS on target - we currently can't handle this */
pr_err("%s: Failed, no RMS in base image\n", __func__);
status = -EPERM;
} else {
/* Validate attr_in fields, if non-NULL */
if (attr_in) {
/* Check if attr_in->prio is within range */
if (attr_in->prio < hnode_mgr->min_pri ||
attr_in->prio > hnode_mgr->max_pri)
status = -EDOM;
}
}
/* Allocate node object and fill in */
if (status)
goto func_end;
pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
if (pnode == NULL) {
status = -ENOMEM;
goto func_end;
}
pnode->node_mgr = hnode_mgr;
/* This critical section protects get_node_props */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Get dsp_ndbprops from node database */
status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
&(pnode->dcd_props));
if (status)
goto func_cont;
pnode->node_uuid = *node_uuid;
pnode->processor = hprocessor;
pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout;
pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
/* Currently only C64 DSP builds support Node Dynamic * heaps */
/* Allocate memory for node heap */
pnode->create_args.asa.task_arg_obj.heap_size = 0;
pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0;
pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0;
pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0;
if (!attr_in)
goto func_cont;
/* Check if we have a user allocated node heap */
if (!(attr_in->pgpp_virt_addr))
goto func_cont;
/* check for page aligned Heap size */
if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
__func__, attr_in->heap_size);
status = -EINVAL;
} else {
pnode->create_args.asa.task_arg_obj.heap_size =
attr_in->heap_size;
pnode->create_args.asa.task_arg_obj.gpp_heap_addr =
(u32) attr_in->pgpp_virt_addr;
}
if (status)
goto func_cont;
status = proc_reserve_memory(hprocessor,
pnode->create_args.asa.task_arg_obj.
heap_size + PAGE_SIZE,
(void **)&(pnode->create_args.asa.
task_arg_obj.dsp_heap_res_addr),
pr_ctxt);
if (status) {
pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
__func__, status);
goto func_cont;
}
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = DSP_EHANDLE;
goto func_cont;
}
dmm_mem_map_dump(dmm_mgr);
#endif
map_attrs |= DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPVIRTUALADDR;
status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
pnode->create_args.asa.task_arg_obj.heap_size,
(void *)pnode->create_args.asa.task_arg_obj.
dsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
pr_ctxt);
if (status)
pr_err("%s: Failed to map memory for Heap: 0x%x\n",
__func__, status);
else
pnode->create_args.asa.task_arg_obj.dsp_heap_addr =
(u32) mapped_addr;
func_cont:
mutex_unlock(&hnode_mgr->node_mgr_lock);
if (attr_in != NULL) {
/* Overrides of NBD properties */
pnode->timeout = attr_in->timeout;
pnode->prio = attr_in->prio;
}
/* Create object to manage notifications */
if (!status) {
pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (pnode->ntfy_obj)
ntfy_init(pnode->ntfy_obj);
else
status = -ENOMEM;
}
if (!status) {
node_type = node_get_type(pnode);
/* Allocate dsp_streamconnect array for device, task, and
* dais socket nodes. */
if (node_type != NODE_MESSAGE) {
num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
pnode->stream_connect = kzalloc(num_streams *
sizeof(struct dsp_streamconnect),
GFP_KERNEL);
if (num_streams > 0 && pnode->stream_connect == NULL)
status = -ENOMEM;
}
if (!status && (node_type == NODE_TASK ||
node_type == NODE_DAISSOCKET)) {
/* Allocate arrays for maintainig stream connections */
pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
sizeof(struct stream_chnl), GFP_KERNEL);
pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
sizeof(struct stream_chnl), GFP_KERNEL);
ptask_args = &(pnode->create_args.asa.task_arg_obj);
ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
sizeof(struct node_strmdef),
GFP_KERNEL);
ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
sizeof(struct node_strmdef),
GFP_KERNEL);
if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
ptask_args->strm_in_def
== NULL))
|| (MAX_OUTPUTS(pnode) > 0
&& (pnode->outputs == NULL
|| ptask_args->strm_out_def == NULL)))
status = -ENOMEM;
}
}
if (!status && (node_type != NODE_DEVICE)) {
/* Create an event that will be posted when RMS_EXIT is
* received. */
pnode->sync_done = kzalloc(sizeof(struct sync_object),
GFP_KERNEL);
if (pnode->sync_done)
sync_init_event(pnode->sync_done);
else
status = -ENOMEM;
if (!status) {
/*Get the shared mem mgr for this nodes dev object */
status = cmm_get_handle(hprocessor, &hcmm_mgr);
if (!status) {
/* Allocate a SM addr translator for this node
* w/ deflt attr */
status = cmm_xlator_create(&pnode->xlator,
hcmm_mgr, NULL);
}
}
if (!status) {
/* Fill in message args */
if ((pargs != NULL) && (pargs->cb_data > 0)) {
pmsg_args =
&(pnode->create_args.asa.node_msg_args);
pmsg_args->pdata = kzalloc(pargs->cb_data,
GFP_KERNEL);
if (pmsg_args->pdata == NULL) {
status = -ENOMEM;
} else {
pmsg_args->arg_length = pargs->cb_data;
memcpy(pmsg_args->pdata,
pargs->node_data,
pargs->cb_data);
}
}
}
}
if (!status && node_type != NODE_DEVICE) {
/* Create a message queue for this node */
intf_fxns = hnode_mgr->intf_fxns;
status =
(*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj,
&pnode->msg_queue_obj,
0,
pnode->create_args.asa.
node_msg_args.max_msgs,
pnode);
}
if (!status) {
/* Create object for dynamic loading */
status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
(void *)pnode,
&pnode->dcd_props.
obj_data.node_obj,
&pnode->
nldr_node_obj,
&pnode->phase_split);
}
/* Compare value read from Node Properties and check if it is same as
* STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
* GPP Address, Read the value in that address and override the
* stack_seg value in task args */
if (!status &&
(char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
stack_seg_name != NULL) {
if (strcmp((char *)
pnode->dcd_props.obj_data.node_obj.ndb_props.
stack_seg_name, STACKSEGLABEL) == 0) {
status =
hnode_mgr->nldr_fxns.
get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
&dynext_base);
if (status)
pr_err("%s: Failed to get addr for DYNEXT_BEG"
" status = 0x%x\n", __func__, status);
status =
hnode_mgr->nldr_fxns.
get_fxn_addr(pnode->nldr_node_obj,
"L1DSRAM_HEAP", &pul_value);
if (status)
pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
" status = 0x%x\n", __func__, status);
host_res = pbridge_context->resources;
if (!host_res)
status = -EPERM;
if (status) {
pr_err("%s: Failed to get host resource, status"
" = 0x%x\n", __func__, status);
goto func_end;
}
ul_gpp_mem_base = (u32) host_res->mem_base[1];
off_set = pul_value - dynext_base;
ul_stack_seg_addr = ul_gpp_mem_base + off_set;
ul_stack_seg_val = readl(ul_stack_seg_addr);
dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
" 0x%x\n", __func__, ul_stack_seg_val,
ul_stack_seg_addr);
pnode->create_args.asa.task_arg_obj.stack_seg =
ul_stack_seg_val;
}
}
if (!status) {
/* Add the node to the node manager's list of allocated
* nodes. */
NODE_SET_STATE(pnode, NODE_ALLOCATED);
mutex_lock(&hnode_mgr->node_mgr_lock);
list_add_tail(&pnode->list_elem, &hnode_mgr->node_list);
++(hnode_mgr->num_nodes);
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
/* Preset this to assume phases are split
* (for overlay and dll) */
pnode->phase_split = true;
/* Notify all clients registered for DSP_NODESTATECHANGE. */
proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
} else {
/* Cleanup */
if (pnode)
delete_node(pnode, pr_ctxt);
}
if (!status) {
status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
if (status) {
delete_node(pnode, pr_ctxt);
goto func_end;
}
*noderes = (struct node_res_object *)node_res;
drv_proc_node_update_heap_status(node_res, true);
drv_proc_node_update_status(node_res, true);
}
func_end:
dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
"node_res: %p status: 0x%x\n", __func__, hprocessor,
node_uuid, pargs, attr_in, noderes, status);
return status;
}
/*
* ======== node_alloc_msg_buf ========
* Purpose:
* Allocates buffer for zero copy messaging.
*/
DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
struct dsp_bufferattr *pattr,
u8 **pbuffer)
{
struct node_object *pnode = (struct node_object *)hnode;
int status = 0;
bool va_flag = false;
bool set_info;
u32 proc_id;
if (!pnode)
status = -EFAULT;
else if (node_get_type(pnode) == NODE_DEVICE)
status = -EPERM;
if (status)
goto func_end;
if (pattr == NULL)
pattr = &node_dfltbufattrs; /* set defaults */
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id != DSP_UNIT) {
goto func_end;
}
/* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
* virt address, so set this info in this node's translator
* object for future ref. If MEM_GETVIRTUALSEGID then retrieve
* virtual address from node's translator. */
if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
(pattr->segment_id & MEM_GETVIRTUALSEGID)) {
va_flag = true;
set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
true : false;
/* Clear mask bits */
pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
/* Set/get this node's translators virtual address base/size */
status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
pattr->segment_id, set_info);
}
if (!status && (!va_flag)) {
if (pattr->segment_id != 1) {
/* Node supports single SM segment only. */
status = -EBADR;
}
/* Arbitrary SM buffer alignment not supported for host side
* allocs, but guaranteed for the following alignment
* values. */
switch (pattr->buf_alignment) {
case 0:
case 1:
case 2:
case 4:
break;
default:
/* alignment value not suportted */
status = -EPERM;
break;
}
if (!status) {
/* allocate physical buffer from seg_id in node's
* translator */
(void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
usize);
if (*pbuffer == NULL) {
pr_err("%s: error - Out of shared memory\n",
__func__);
status = -ENOMEM;
}
}
}
func_end:
return status;
}
/*
* ======== node_change_priority ========
* Purpose:
* Change the priority of a node in the allocated state, or that is
* currently running or paused on the target.
*/
int node_change_priority(struct node_object *hnode, s32 prio)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr = NULL;
enum node_type node_type;
enum node_state state;
int status = 0;
u32 proc_id;
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
} else {
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
status = -EDOM;
}
if (status)
goto func_end;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
NODE_SET_PRIORITY(hnode, prio);
} else {
if (state != NODE_RUNNING) {
status = -EBADR;
goto func_cont;
}
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id == DSP_UNIT) {
status =
disp_node_change_priority(hnode_mgr->disp_obj,
hnode,
hnode_mgr->fxn_addrs
[RMSCHANGENODEPRIORITY],
hnode->node_env, prio);
}
if (status >= 0)
NODE_SET_PRIORITY(hnode, prio);
}
func_cont:
/* Leave critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
return status;
}
/*
* ======== node_connect ========
* Purpose:
* Connect two nodes on the DSP, or a node on the DSP to the GPP.
*/
int node_connect(struct node_object *node1, u32 stream1,
struct node_object *node2,
u32 stream2, struct dsp_strmattr *pattrs,
struct dsp_cbdata *conn_param)
{
struct node_mgr *hnode_mgr;
char *pstr_dev_name = NULL;
enum node_type node1_type = NODE_TASK;
enum node_type node2_type = NODE_TASK;
enum dsp_strmmode strm_mode;
struct node_strmdef *pstrm_def;
struct node_strmdef *input = NULL;
struct node_strmdef *output = NULL;
struct node_object *dev_node_obj;
struct node_object *hnode;
struct stream_chnl *pstream;
u32 pipe_id;
u32 chnl_id;
s8 chnl_mode;
u32 dw_length;
int status = 0;
if (!node1 || !node2)
return -EFAULT;
/* The two nodes must be on the same processor */
if (node1 != (struct node_object *)DSP_HGPPNODE &&
node2 != (struct node_object *)DSP_HGPPNODE &&
node1->node_mgr != node2->node_mgr)
return -EPERM;
/* Cannot connect a node to itself */
if (node1 == node2)
return -EPERM;
/* node_get_type() will return NODE_GPP if hnode = DSP_HGPPNODE. */
node1_type = node_get_type(node1);
node2_type = node_get_type(node2);
/* Check stream indices ranges */
if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
stream1 >= MAX_OUTPUTS(node1)) ||
(node2_type != NODE_GPP && node2_type != NODE_DEVICE &&
stream2 >= MAX_INPUTS(node2)))
return -EINVAL;
/*
* Only the following types of connections are allowed:
* task/dais socket < == > task/dais socket
* task/dais socket < == > device
* task/dais socket < == > GPP
*
* ie, no message nodes, and at least one task or dais
* socket node.
*/
if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
(node1_type != NODE_TASK &&
node1_type != NODE_DAISSOCKET &&
node2_type != NODE_TASK &&
node2_type != NODE_DAISSOCKET))
return -EPERM;
/*
* Check stream mode. Default is STRMMODE_PROCCOPY.
*/
if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
return -EPERM; /* illegal stream mode */
if (node1_type != NODE_GPP) {
hnode_mgr = node1->node_mgr;
} else {
hnode_mgr = node2->node_mgr;
}
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Nodes must be in the allocated state */
if (node1_type != NODE_GPP &&
node_get_state(node1) != NODE_ALLOCATED) {
status = -EBADR;
goto out_unlock;
}
if (node2_type != NODE_GPP &&
node_get_state(node2) != NODE_ALLOCATED) {
status = -EBADR;
goto out_unlock;
}
/*
* Check that stream indices for task and dais socket nodes
* are not already be used. (Device nodes checked later)
*/
if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
output = &(node1->create_args.asa.
task_arg_obj.strm_out_def[stream1]);
if (output->sz_device) {
status = -EISCONN;
goto out_unlock;
}
}
if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
input = &(node2->create_args.asa.
task_arg_obj.strm_in_def[stream2]);
if (input->sz_device) {
status = -EISCONN;
goto out_unlock;
}
}
/* Connecting two task nodes? */
if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) &&
(node2_type == NODE_TASK ||
node2_type == NODE_DAISSOCKET)) {
/* Find available pipe */
pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES);
if (pipe_id == MAXPIPES) {
status = -ECONNREFUSED;
goto out_unlock;
}
set_bit(pipe_id, hnode_mgr->pipe_map);
node1->outputs[stream1].type = NODECONNECT;
node2->inputs[stream2].type = NODECONNECT;
node1->outputs[stream1].dev_id = pipe_id;
node2->inputs[stream2].dev_id = pipe_id;
output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
if (!output->sz_device || !input->sz_device) {
/* Undo the connection */
kfree(output->sz_device);
kfree(input->sz_device);
clear_bit(pipe_id, hnode_mgr->pipe_map);
status = -ENOMEM;
goto out_unlock;
}
/* Copy "/dbpipe<pipId>" name to device names */
sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id);
strcpy(input->sz_device, output->sz_device);
}
/* Connecting task node to host? */
if (node1_type == NODE_GPP || node2_type == NODE_GPP) {
pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
if (!pstr_dev_name) {
status = -ENOMEM;
goto out_unlock;
}
chnl_mode = (node1_type == NODE_GPP) ?
CHNL_MODETODSP : CHNL_MODEFROMDSP;
/*
* Reserve a channel id. We need to put the name "/host<id>"
* in the node's create_args, but the host
* side channel will not be opened until DSPStream_Open is
* called for this node.
*/
strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY;
switch (strm_mode) {
case STRMMODE_RDMA:
chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map,
CHNL_MAXCHANNELS);
if (chnl_id < CHNL_MAXCHANNELS) {
set_bit(chnl_id, hnode_mgr->dma_chnl_map);
/* dma chans are 2nd transport chnl set
* ids(e.g. 16-31) */
chnl_id = chnl_id + hnode_mgr->num_chnls;
}
break;
case STRMMODE_ZEROCOPY:
chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map,
CHNL_MAXCHANNELS);
if (chnl_id < CHNL_MAXCHANNELS) {
set_bit(chnl_id, hnode_mgr->zc_chnl_map);
/* zero-copy chans are 3nd transport set
* (e.g. 32-47) */
chnl_id = chnl_id +
(2 * hnode_mgr->num_chnls);
}
break;
case STRMMODE_PROCCOPY:
chnl_id = find_first_zero_bit(hnode_mgr->chnl_map,
CHNL_MAXCHANNELS);
if (chnl_id < CHNL_MAXCHANNELS)
set_bit(chnl_id, hnode_mgr->chnl_map);
break;
default:
status = -EINVAL;
goto out_unlock;
}
if (chnl_id == CHNL_MAXCHANNELS) {
status = -ECONNREFUSED;
goto out_unlock;
}
if (node1 == (struct node_object *)DSP_HGPPNODE) {
node2->inputs[stream2].type = HOSTCONNECT;
node2->inputs[stream2].dev_id = chnl_id;
input->sz_device = pstr_dev_name;
} else {
node1->outputs[stream1].type = HOSTCONNECT;
node1->outputs[stream1].dev_id = chnl_id;
output->sz_device = pstr_dev_name;
}
sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
}
/* Connecting task node to device node? */
if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) {
if (node2_type == NODE_DEVICE) {
/* node1 == > device */
dev_node_obj = node2;
hnode = node1;
pstream = &(node1->outputs[stream1]);
pstrm_def = output;
} else {
/* device == > node2 */
dev_node_obj = node1;
hnode = node2;
pstream = &(node2->inputs[stream2]);
pstrm_def = input;
}
/* Set up create args */
pstream->type = DEVICECONNECT;
dw_length = strlen(dev_node_obj->str_dev_name);
if (conn_param)
pstrm_def->sz_device = kzalloc(dw_length + 1 +
conn_param->cb_data,
GFP_KERNEL);
else
pstrm_def->sz_device = kzalloc(dw_length + 1,
GFP_KERNEL);
if (!pstrm_def->sz_device) {
status = -ENOMEM;
goto out_unlock;
}
/* Copy device name */
strncpy(pstrm_def->sz_device,
dev_node_obj->str_dev_name, dw_length);
if (conn_param)
strncat(pstrm_def->sz_device,
(char *)conn_param->node_data,
(u32) conn_param->cb_data);
dev_node_obj->device_owner = hnode;
}
/* Fill in create args */
if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
node1->create_args.asa.task_arg_obj.num_outputs++;
fill_stream_def(node1, output, pattrs);
}
if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
node2->create_args.asa.task_arg_obj.num_inputs++;
fill_stream_def(node2, input, pattrs);
}
/* Update node1 and node2 stream_connect */
if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
node1->num_outputs++;
if (stream1 > node1->max_output_index)
node1->max_output_index = stream1;
}
if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
node2->num_inputs++;
if (stream2 > node2->max_input_index)
node2->max_input_index = stream2;
}
fill_stream_connect(node1, node2, stream1, stream2);
/* end of sync_enter_cs */
/* Exit critical section */
out_unlock:
if (status && pstr_dev_name)
kfree(pstr_dev_name);
mutex_unlock(&hnode_mgr->node_mgr_lock);
dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
"pattrs: %p status: 0x%x\n", __func__, node1,
stream1, node2, stream2, pattrs, status);
return status;
}
/*
* ======== node_create ========
* Purpose:
* Create a node on the DSP by remotely calling the node's create function.
*/
int node_create(struct node_object *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr;
struct bridge_drv_interface *intf_fxns;
u32 ul_create_fxn;
enum node_type node_type;
int status = 0;
int status1 = 0;
struct dsp_cbdata cb_data;
u32 proc_id = 255;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
struct dspbridge_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
#endif
if (!pnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt to create
new node */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
/* create struct dsp_cbdata struct for PWR calls */
cb_data.cb_data = PWR_TIMEOUT;
node_type = node_get_type(hnode);
hnode_mgr = hnode->node_mgr;
intf_fxns = hnode_mgr->intf_fxns;
/* Get access to node dispatcher */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Check node state */
if (node_get_state(hnode) != NODE_ALLOCATED)
status = -EBADR;
if (!status)
status = proc_get_processor_id(pnode->processor, &proc_id);
if (status)
goto func_cont2;
if (proc_id != DSP_UNIT)
goto func_cont2;
/* Make sure streams are properly connected */
if ((hnode->num_inputs && hnode->max_input_index >
hnode->num_inputs - 1) ||
(hnode->num_outputs && hnode->max_output_index >
hnode->num_outputs - 1))
status = -ENOTCONN;
if (!status) {
/* If node's create function is not loaded, load it */
/* Boost the OPP level to max level that DSP can be requested */
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
if (pdata->cpu_set_freq)
(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
#endif
status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
NLDR_CREATE);
/* Get address of node's create function */
if (!status) {
hnode->loaded = true;
if (node_type != NODE_DEVICE) {
status = get_fxn_address(hnode, &ul_create_fxn,
CREATEPHASE);
}
} else {
pr_err("%s: failed to load create code: 0x%x\n",
__func__, status);
}
/* Request the lowest OPP level */
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
if (pdata->cpu_set_freq)
(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
#endif
/* Get address of iAlg functions, if socket node */
if (!status) {
if (node_type == NODE_DAISSOCKET) {
status = hnode_mgr->nldr_fxns.get_fxn_addr
(hnode->nldr_node_obj,
hnode->dcd_props.obj_data.node_obj.
str_i_alg_name,
&hnode->create_args.asa.
task_arg_obj.dais_arg);
}
}
}
if (!status) {
if (node_type != NODE_DEVICE) {
status = disp_node_create(hnode_mgr->disp_obj, hnode,
hnode_mgr->fxn_addrs
[RMSCREATENODE],
ul_create_fxn,
&(hnode->create_args),
&(hnode->node_env));
if (status >= 0) {
/* Set the message queue id to the node env
* pointer */
intf_fxns = hnode_mgr->intf_fxns;
(*intf_fxns->msg_set_queue_id) (hnode->
msg_queue_obj,
hnode->node_env);
}
}
}
/* Phase II/Overlays: Create, execute, delete phases possibly in
* different files/sections. */
if (hnode->loaded && hnode->phase_split) {
/* If create code was dynamically loaded, we can now unload
* it. */
status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj,
NLDR_CREATE);
hnode->loaded = false;
}
if (status1)
pr_err("%s: Failed to unload create code: 0x%x\n",
__func__, status1);
func_cont2:
/* Update node state and node manager state */
if (status >= 0) {
NODE_SET_STATE(hnode, NODE_CREATED);
hnode_mgr->num_created++;
goto func_cont;
}
if (status != -EBADR) {
/* Put back in NODE_ALLOCATED state if error occurred */
NODE_SET_STATE(hnode, NODE_ALLOCATED);
}
func_cont:
/* Free access to node dispatcher */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
if (status >= 0) {
proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
hnode, status);
return status;
}
/*
* ======== node_create_mgr ========
* Purpose:
* Create a NODE Manager object.
*/
int node_create_mgr(struct node_mgr **node_man,
struct dev_object *hdev_obj)
{
u32 i;
struct node_mgr *node_mgr_obj = NULL;
struct disp_attr disp_attr_obj;
char *sz_zl_file = "";
struct nldr_attrs nldr_attrs_obj;
int status = 0;
u8 dev_type;
*node_man = NULL;
/* Allocate Node manager object */
node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
if (!node_mgr_obj)
return -ENOMEM;
node_mgr_obj->dev_obj = hdev_obj;
node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (!node_mgr_obj->ntfy_obj) {
status = -ENOMEM;
goto out_err;
}
ntfy_init(node_mgr_obj->ntfy_obj);
INIT_LIST_HEAD(&node_mgr_obj->node_list);
dev_get_dev_type(hdev_obj, &dev_type);
status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
if (status)
goto out_err;
status = get_proc_props(node_mgr_obj, hdev_obj);
if (status)
goto out_err;
/* Create NODE Dispatcher */
disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset;
disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size;
disp_attr_obj.proc_family = node_mgr_obj->proc_family;
disp_attr_obj.proc_type = node_mgr_obj->proc_type;
status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj);
if (status)
goto out_err;
/* Create a STRM Manager */
status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
if (status)
goto out_err;
dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
/* Get msg_ctrl queue manager */
dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
mutex_init(&node_mgr_obj->node_mgr_lock);
/* Block out reserved channels */
for (i = 0; i < node_mgr_obj->chnl_offset; i++)
set_bit(i, node_mgr_obj->chnl_map);
/* Block out channels reserved for RMS */
set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map);
set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map);
/* NO RM Server on the IVA */
if (dev_type != IVA_UNIT) {
/* Get addresses of any RMS functions loaded */
status = get_rms_fxns(node_mgr_obj);
if (status)
goto out_err;
}
/* Get loader functions and create loader */
node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
nldr_attrs_obj.ovly = ovly;
nldr_attrs_obj.write = mem_write;
nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
hdev_obj,
&nldr_attrs_obj);
if (status)
goto out_err;
*node_man = node_mgr_obj;
return status;
out_err:
delete_node_mgr(node_mgr_obj);
return status;
}
/*
* ======== node_delete ========
* Purpose:
* Delete a node on the DSP by remotely calling the node's delete function.
* Loads the node's delete function if necessary. Free GPP side resources
* after node's delete function returns.
*/
int node_delete(struct node_res_object *noderes,
struct process_context *pr_ctxt)
{
struct node_object *pnode = noderes->node;
struct node_mgr *hnode_mgr;
struct proc_object *hprocessor;
struct disp_object *disp_obj;
u32 ul_delete_fxn;
enum node_type node_type;
enum node_state state;
int status = 0;
int status1 = 0;
struct dsp_cbdata cb_data;
u32 proc_id;
struct bridge_drv_interface *intf_fxns;
void *node_res = noderes;
struct dsp_processorstate proc_state;
if (!pnode) {
status = -EFAULT;
goto func_end;
}
/* create struct dsp_cbdata struct for PWR call */
cb_data.cb_data = PWR_TIMEOUT;
hnode_mgr = pnode->node_mgr;
hprocessor = pnode->processor;
disp_obj = hnode_mgr->disp_obj;
node_type = node_get_type(pnode);
intf_fxns = hnode_mgr->intf_fxns;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(pnode);
/* Execute delete phase code for non-device node in all cases
* except when the node was only allocated. Delete phase must be
* executed even if create phase was executed, but failed.
* If the node environment pointer is non-NULL, the delete phase
* code must be executed. */
if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
node_type != NODE_DEVICE) {
status = proc_get_processor_id(pnode->processor, &proc_id);
if (status)
goto func_cont1;
if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
/* If node has terminated, execute phase code will
* have already been unloaded in node_on_exit(). If the
* node is PAUSED, the execute phase is loaded, and it
* is now ok to unload it. If the node is running, we
* will unload the execute phase only after deleting
* the node. */
if (state == NODE_PAUSED && pnode->loaded &&
pnode->phase_split) {
/* Ok to unload execute code as long as node
* is not * running */
status1 =
hnode_mgr->nldr_fxns.
unload(pnode->nldr_node_obj,
NLDR_EXECUTE);
pnode->loaded = false;
NODE_SET_STATE(pnode, NODE_DONE);
}
/* Load delete phase code if not loaded or if haven't
* * unloaded EXECUTE phase */
if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
pnode->phase_split) {
status =
hnode_mgr->nldr_fxns.
load(pnode->nldr_node_obj, NLDR_DELETE);
if (!status)
pnode->loaded = true;
else
pr_err("%s: fail - load delete code:"
" 0x%x\n", __func__, status);
}
}
func_cont1:
if (!status) {
/* Unblock a thread trying to terminate the node */
(void)sync_set_event(pnode->sync_done);
if (proc_id == DSP_UNIT) {
/* ul_delete_fxn = address of node's delete
* function */
status = get_fxn_address(pnode, &ul_delete_fxn,
DELETEPHASE);
} else if (proc_id == IVA_UNIT)
ul_delete_fxn = (u32) pnode->node_env;
if (!status) {
status = proc_get_state(hprocessor,
&proc_state,
sizeof(struct
dsp_processorstate));
if (proc_state.proc_state != PROC_ERROR) {
status =
disp_node_delete(disp_obj, pnode,
hnode_mgr->
fxn_addrs
[RMSDELETENODE],
ul_delete_fxn,
pnode->node_env);
} else
NODE_SET_STATE(pnode, NODE_DONE);
/* Unload execute, if not unloaded, and delete
* function */
if (state == NODE_RUNNING &&
pnode->phase_split) {
status1 =
hnode_mgr->nldr_fxns.
unload(pnode->nldr_node_obj,
NLDR_EXECUTE);
}
if (status1)
pr_err("%s: fail - unload execute code:"
" 0x%x\n", __func__, status1);
status1 =
hnode_mgr->nldr_fxns.unload(pnode->
nldr_node_obj,
NLDR_DELETE);
pnode->loaded = false;
if (status1)
pr_err("%s: fail - unload delete code: "
"0x%x\n", __func__, status1);
}
}
}
/* Free host side resources even if a failure occurred */
/* Remove node from hnode_mgr->node_list */
list_del(&pnode->list_elem);
hnode_mgr->num_nodes--;
/* Decrement count of nodes created on DSP */
if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
(pnode->node_env != (u32) NULL)))
hnode_mgr->num_created--;
/* Free host-side resources allocated by node_create()
* delete_node() fails if SM buffers not freed by client! */
drv_proc_node_update_status(node_res, false);
delete_node(pnode, pr_ctxt);
/*
* Release all Node resources and its context
*/
idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
kfree(node_res);
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
func_end:
dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
return status;
}
/*
* ======== node_delete_mgr ========
* Purpose:
* Delete the NODE Manager.
*/
int node_delete_mgr(struct node_mgr *hnode_mgr)
{
if (!hnode_mgr)
return -EFAULT;
delete_node_mgr(hnode_mgr);
return 0;
}
/*
* ======== node_enum_nodes ========
* Purpose:
* Enumerate currently allocated nodes.
*/
int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
u32 node_tab_size, u32 *pu_num_nodes,
u32 *pu_allocated)
{
struct node_object *hnode;
u32 i = 0;
int status = 0;
if (!hnode_mgr) {
status = -EFAULT;
goto func_end;
}
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
if (hnode_mgr->num_nodes > node_tab_size) {
*pu_allocated = hnode_mgr->num_nodes;
*pu_num_nodes = 0;
status = -EINVAL;
} else {
list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem)
node_tab[i++] = hnode;
*pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
}
/* end of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
return status;
}
/*
* ======== node_free_msg_buf ========
* Purpose:
* Frees the message buffer.
*/
int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
struct dsp_bufferattr *pattr)
{
struct node_object *pnode = (struct node_object *)hnode;
int status = 0;
u32 proc_id;
if (!hnode) {
status = -EFAULT;
goto func_end;
}
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id == DSP_UNIT) {
if (!status) {
if (pattr == NULL) {
/* set defaults */
pattr = &node_dfltbufattrs;
}
/* Node supports single SM segment only */
if (pattr->segment_id != 1)
status = -EBADR;
/* pbuffer is clients Va. */
status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
}
} else {
}
func_end:
return status;
}
/*
* ======== node_get_attr ========
* Purpose:
* Copy the current attributes of the specified node into a dsp_nodeattr
* structure.
*/
int node_get_attr(struct node_object *hnode,
struct dsp_nodeattr *pattr, u32 attr_size)
{
struct node_mgr *hnode_mgr;
if (!hnode)
return -EFAULT;
hnode_mgr = hnode->node_mgr;
/* Enter hnode_mgr critical section (since we're accessing
* data that could be changed by node_change_priority() and
* node_connect(). */
mutex_lock(&hnode_mgr->node_mgr_lock);
pattr->cb_struct = sizeof(struct dsp_nodeattr);
/* dsp_nodeattrin */
pattr->in_node_attr_in.cb_struct =
sizeof(struct dsp_nodeattrin);
pattr->in_node_attr_in.prio = hnode->prio;
pattr->in_node_attr_in.timeout = hnode->timeout;
pattr->in_node_attr_in.heap_size =
hnode->create_args.asa.task_arg_obj.heap_size;
pattr->in_node_attr_in.pgpp_virt_addr = (void *)
hnode->create_args.asa.task_arg_obj.gpp_heap_addr;
pattr->node_attr_inputs = hnode->num_gpp_inputs;
pattr->node_attr_outputs = hnode->num_gpp_outputs;
/* dsp_nodeinfo */
get_node_info(hnode, &(pattr->node_info));
/* end of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
return 0;
}
/*
* ======== node_get_channel_id ========
* Purpose:
* Get the channel index reserved for a stream connection between the
* host and a node.
*/
int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
u32 *chan_id)
{
enum node_type node_type;
int status = -EINVAL;
if (!hnode) {
status = -EFAULT;
return status;
}
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
status = -EPERM;
return status;
}
if (dir == DSP_TONODE) {
if (index < MAX_INPUTS(hnode)) {
if (hnode->inputs[index].type == HOSTCONNECT) {
*chan_id = hnode->inputs[index].dev_id;
status = 0;
}
}
} else {
if (index < MAX_OUTPUTS(hnode)) {
if (hnode->outputs[index].type == HOSTCONNECT) {
*chan_id = hnode->outputs[index].dev_id;
status = 0;
}
}
}
return status;
}
/*
* ======== node_get_message ========
* Purpose:
* Retrieve a message from a node on the DSP.
*/
int node_get_message(struct node_object *hnode,
struct dsp_msg *message, u32 utimeout)
{
struct node_mgr *hnode_mgr;
enum node_type node_type;
struct bridge_drv_interface *intf_fxns;
int status = 0;
void *tmp_buf;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
if (!hnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt to get the
message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
node_type != NODE_DAISSOCKET) {
status = -EPERM;
goto func_end;
}
/* This function will block unless a message is available. Since
* DSPNode_RegisterNotify() allows notification when a message
* is available, the system can be designed so that
* DSPNode_GetMessage() is only called when a message is
* available. */
intf_fxns = hnode_mgr->intf_fxns;
status =
(*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout);
/* Check if message contains SM descriptor */
if (status || !(message->cmd & DSP_RMSBUFDESC))
goto func_end;
/* Translate DSP byte addr to GPP Va. */
tmp_buf = cmm_xlator_translate(hnode->xlator,
(void *)(message->arg1 *
hnode->node_mgr->
dsp_word_size), CMM_DSPPA2PA);
if (tmp_buf != NULL) {
/* now convert this GPP Pa to Va */
tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
CMM_PA2VA);
if (tmp_buf != NULL) {
/* Adjust SM size in msg */
message->arg1 = (u32) tmp_buf;
message->arg2 *= hnode->node_mgr->dsp_word_size;
} else {
status = -ESRCH;
}
} else {
status = -ESRCH;
}
func_end:
dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
hnode, message, utimeout);
return status;
}
/*
* ======== node_get_nldr_obj ========
*/
int node_get_nldr_obj(struct node_mgr *hnode_mgr,
struct nldr_object **nldr_ovlyobj)
{
int status = 0;
struct node_mgr *node_mgr_obj = hnode_mgr;
if (!hnode_mgr)
status = -EFAULT;
else
*nldr_ovlyobj = node_mgr_obj->nldr_obj;
return status;
}
/*
* ======== node_get_strm_mgr ========
* Purpose:
* Returns the Stream manager.
*/
int node_get_strm_mgr(struct node_object *hnode,
struct strm_mgr **strm_man)
{
int status = 0;
if (!hnode)
status = -EFAULT;
else
*strm_man = hnode->node_mgr->strm_mgr_obj;
return status;
}
/*
* ======== node_get_load_type ========
*/
enum nldr_loadtype node_get_load_type(struct node_object *hnode)
{
if (!hnode) {
dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
return -1;
} else {
return hnode->dcd_props.obj_data.node_obj.load_type;
}
}
/*
* ======== node_get_timeout ========
* Purpose:
* Returns the timeout value for this node.
*/
u32 node_get_timeout(struct node_object *hnode)
{
if (!hnode) {
dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
return 0;
} else {
return hnode->timeout;
}
}
/*
* ======== node_get_type ========
* Purpose:
* Returns the node type.
*/
enum node_type node_get_type(struct node_object *hnode)
{
enum node_type node_type;
if (hnode == (struct node_object *)DSP_HGPPNODE)
node_type = NODE_GPP;
else {
if (!hnode)
node_type = -1;
else
node_type = hnode->ntype;
}
return node_type;
}
/*
* ======== node_on_exit ========
* Purpose:
* Gets called when RMS_EXIT is received for a node.
*/
void node_on_exit(struct node_object *hnode, s32 node_status)
{
if (!hnode)
return;
/* Set node state to done */
NODE_SET_STATE(hnode, NODE_DONE);
hnode->exit_status = node_status;
if (hnode->loaded && hnode->phase_split) {
(void)hnode->node_mgr->nldr_fxns.unload(hnode->
nldr_node_obj,
NLDR_EXECUTE);
hnode->loaded = false;
}
/* Unblock call to node_terminate */
(void)sync_set_event(hnode->sync_done);
/* Notify clients */
proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
/*
* ======== node_pause ========
* Purpose:
* Suspend execution of a node currently running on the DSP.
*/
int node_pause(struct node_object *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
enum node_type node_type;
enum node_state state;
struct node_mgr *hnode_mgr;
int status = 0;
u32 proc_id;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
if (!hnode) {
status = -EFAULT;
} else {
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
}
if (status)
goto func_end;
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id == IVA_UNIT)
status = -ENOSYS;
if (!status) {
hnode_mgr = hnode->node_mgr;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
/* Check node state */
if (state != NODE_RUNNING)
status = -EBADR;
if (status)
goto func_cont;
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_cont;
/* If processor is in error state then don't attempt
to send the message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_cont;
}
status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY],
hnode->node_env, NODE_SUSPENDEDPRI);
/* Update state */
if (status >= 0)
NODE_SET_STATE(hnode, NODE_PAUSED);
func_cont:
/* End of sync_enter_cs */
/* Leave critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
if (status >= 0) {
proc_notify_clients(hnode->processor,
DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
}
func_end:
dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
return status;
}
/*
* ======== node_put_message ========
* Purpose:
* Send a message to a message node, task node, or XDAIS socket node. This
* function will block until the message stream can accommodate the
* message, or a timeout occurs.
*/
int node_put_message(struct node_object *hnode,
const struct dsp_msg *pmsg, u32 utimeout)
{
struct node_mgr *hnode_mgr = NULL;
enum node_type node_type;
struct bridge_drv_interface *intf_fxns;
enum node_state state;
int status = 0;
void *tmp_buf;
struct dsp_msg new_msg;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
if (!hnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in bad state then don't attempt sending the
message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
node_type != NODE_DAISSOCKET)
status = -EPERM;
if (!status) {
/* Check node state. Can't send messages to a node after
* we've sent the RMS_EXIT command. There is still the
* possibility that node_terminate can be called after we've
* checked the state. Could add another SYNC object to
* prevent this (can't use node_mgr_lock, since we don't
* want to block other NODE functions). However, the node may
* still exit on its own, before this message is sent. */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state == NODE_TERMINATING || state == NODE_DONE)
status = -EBADR;
/* end of sync_enter_cs */
mutex_unlock(&hnode_mgr->node_mgr_lock);
}
if (status)
goto func_end;
/* assign pmsg values to new msg */
new_msg = *pmsg;
/* Now, check if message contains a SM buffer descriptor */
if (pmsg->cmd & DSP_RMSBUFDESC) {
/* Translate GPP Va to DSP physical buf Ptr. */
tmp_buf = cmm_xlator_translate(hnode->xlator,
(void *)new_msg.arg1,
CMM_VA2DSPPA);
if (tmp_buf != NULL) {
/* got translation, convert to MAUs in msg */
if (hnode->node_mgr->dsp_word_size != 0) {
new_msg.arg1 =
(u32) tmp_buf /
hnode->node_mgr->dsp_word_size;
/* MAUs */
new_msg.arg2 /= hnode->node_mgr->
dsp_word_size;
} else {
pr_err("%s: dsp_word_size is zero!\n",
__func__);
status = -EPERM; /* bad DSPWordSize */
}
} else { /* failed to translate buffer address */
status = -ESRCH;
}
}
if (!status) {
intf_fxns = hnode_mgr->intf_fxns;
status = (*intf_fxns->msg_put) (hnode->msg_queue_obj,
&new_msg, utimeout);
}
func_end:
dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
"status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
return status;
}
/*
* ======== node_register_notify ========
* Purpose:
* Register to be notified on specific events for this node.
*/
int node_register_notify(struct node_object *hnode, u32 event_mask,
u32 notify_type,
struct dsp_notification *hnotification)
{
struct bridge_drv_interface *intf_fxns;
int status = 0;
if (!hnode) {
status = -EFAULT;
} else {
/* Check if event mask is a valid node related event */
if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
status = -EINVAL;
/* Check if notify type is valid */
if (notify_type != DSP_SIGNALEVENT)
status = -EINVAL;
/* Only one Notification can be registered at a
* time - Limitation */
if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
status = -EINVAL;
}
if (!status) {
if (event_mask == DSP_NODESTATECHANGE) {
status = ntfy_register(hnode->ntfy_obj, hnotification,
event_mask & DSP_NODESTATECHANGE,
notify_type);
} else {
/* Send Message part of event mask to msg_ctrl */
intf_fxns = hnode->node_mgr->intf_fxns;
status = (*intf_fxns->msg_register_notify)
(hnode->msg_queue_obj,
event_mask & DSP_NODEMESSAGEREADY, notify_type,
hnotification);
}
}
dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
"hnotification: %p status 0x%x\n", __func__, hnode,
event_mask, notify_type, hnotification, status);
return status;
}
/*
* ======== node_run ========
* Purpose:
* Start execution of a node's execute phase, or resume execution of a node
* that has been suspended (via NODE_NodePause()) on the DSP. Load the
* node's execute function if necessary.
*/
int node_run(struct node_object *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr;
enum node_type node_type;
enum node_state state;
u32 ul_execute_fxn;
u32 ul_fxn_addr;
int status = 0;
u32 proc_id;
struct bridge_drv_interface *intf_fxns;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
if (!hnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt to run the node */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
node_type = node_get_type(hnode);
if (node_type == NODE_DEVICE)
status = -EPERM;
if (status)
goto func_end;
hnode_mgr = hnode->node_mgr;
if (!hnode_mgr) {
status = -EFAULT;
goto func_end;
}
intf_fxns = hnode_mgr->intf_fxns;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state != NODE_CREATED && state != NODE_PAUSED)
status = -EBADR;
if (!status)
status = proc_get_processor_id(pnode->processor, &proc_id);
if (status)
goto func_cont1;
if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
goto func_cont1;
if (state == NODE_CREATED) {
/* If node's execute function is not loaded, load it */
if (!(hnode->loaded) && hnode->phase_split) {
status =
hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
NLDR_EXECUTE);
if (!status) {
hnode->loaded = true;
} else {
pr_err("%s: fail - load execute code: 0x%x\n",
__func__, status);
}
}
if (!status) {
/* Get address of node's execute function */
if (proc_id == IVA_UNIT)
ul_execute_fxn = (u32) hnode->node_env;
else {
status = get_fxn_address(hnode, &ul_execute_fxn,
EXECUTEPHASE);
}
}
if (!status) {
ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE];
status =
disp_node_run(hnode_mgr->disp_obj, hnode,
ul_fxn_addr, ul_execute_fxn,
hnode->node_env);
}
} else if (state == NODE_PAUSED) {
ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY];
status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
ul_fxn_addr, hnode->node_env,
NODE_GET_PRIORITY(hnode));
} else {
/* We should never get here */
}
func_cont1:
/* Update node state. */
if (status >= 0)
NODE_SET_STATE(hnode, NODE_RUNNING);
else /* Set state back to previous value */
NODE_SET_STATE(hnode, state);
/*End of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
if (status >= 0) {
proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
func_end:
dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
return status;
}
/*
* ======== node_terminate ========
* Purpose:
* Signal a node running on the DSP that it should exit its execute phase
* function.
*/
int node_terminate(struct node_object *hnode, int *pstatus)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr = NULL;
enum node_type node_type;
struct bridge_drv_interface *intf_fxns;
enum node_state state;
struct dsp_msg msg, killmsg;
int status = 0;
u32 proc_id, kill_time_out;
struct deh_mgr *hdeh_mgr;
struct dsp_processorstate proc_state;
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
goto func_end;
}
if (pnode->processor == NULL) {
status = -EFAULT;
goto func_end;
}
status = proc_get_processor_id(pnode->processor, &proc_id);
if (!status) {
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
}
if (!status) {
/* Check node state */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state != NODE_RUNNING) {
status = -EBADR;
/* Set the exit status if node terminated on
* its own. */
if (state == NODE_DONE)
*pstatus = hnode->exit_status;
} else {
NODE_SET_STATE(hnode, NODE_TERMINATING);
}
/* end of sync_enter_cs */
mutex_unlock(&hnode_mgr->node_mgr_lock);
}
if (!status) {
/*
* Send exit message. Do not change state to NODE_DONE
* here. That will be done in callback.
*/
status = proc_get_state(pnode->processor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_cont;
/* If processor is in error state then don't attempt to send
* A kill task command */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_cont;
}
msg.cmd = RMS_EXIT;
msg.arg1 = hnode->node_env;
killmsg.cmd = RMS_KILLTASK;
killmsg.arg1 = hnode->node_env;
intf_fxns = hnode_mgr->intf_fxns;
if (hnode->timeout > MAXTIMEOUT)
kill_time_out = MAXTIMEOUT;
else
kill_time_out = (hnode->timeout) * 2;
status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg,
hnode->timeout);
if (status)
goto func_cont;
/*
* Wait on synchronization object that will be
* posted in the callback on receiving RMS_EXIT
* message, or by node_delete. Check for valid hnode,
* in case posted by node_delete().
*/
status = sync_wait_on_event(hnode->sync_done,
kill_time_out / 2);
if (status != ETIME)
goto func_cont;
status = (*intf_fxns->msg_put)(hnode->msg_queue_obj,
&killmsg, hnode->timeout);
if (status)
goto func_cont;
status = sync_wait_on_event(hnode->sync_done,
kill_time_out / 2);
if (status) {
/*
* Here it goes the part of the simulation of
* the DSP exception.
*/
dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
if (!hdeh_mgr)
goto func_cont;
bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
}
}
func_cont:
if (!status) {
/* Enter CS before getting exit status, in case node was
* deleted. */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Make sure node wasn't deleted while we blocked */
if (!hnode) {
status = -EPERM;
} else {
*pstatus = hnode->exit_status;
dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
__func__, hnode, hnode->node_env, status);
}
mutex_unlock(&hnode_mgr->node_mgr_lock);
} /*End of sync_enter_cs */
func_end:
return status;
}
/*
* ======== delete_node ========
* Purpose:
* Free GPP resources allocated in node_allocate() or node_connect().
*/
static void delete_node(struct node_object *hnode,
struct process_context *pr_ctxt)
{
struct node_mgr *hnode_mgr;
struct bridge_drv_interface *intf_fxns;
u32 i;
enum node_type node_type;
struct stream_chnl stream;
struct node_msgargs node_msg_args;
struct node_taskargs task_arg_obj;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object =
(struct proc_object *)hnode->processor;
#endif
int status;
if (!hnode)
goto func_end;
hnode_mgr = hnode->node_mgr;
if (!hnode_mgr)
goto func_end;
node_type = node_get_type(hnode);
if (node_type != NODE_DEVICE) {
node_msg_args = hnode->create_args.asa.node_msg_args;
kfree(node_msg_args.pdata);
/* Free msg_ctrl queue */
if (hnode->msg_queue_obj) {
intf_fxns = hnode_mgr->intf_fxns;
(*intf_fxns->msg_delete_queue) (hnode->
msg_queue_obj);
hnode->msg_queue_obj = NULL;
}
kfree(hnode->sync_done);
/* Free all stream info */
if (hnode->inputs) {
for (i = 0; i < MAX_INPUTS(hnode); i++) {
stream = hnode->inputs[i];
free_stream(hnode_mgr, stream);
}
kfree(hnode->inputs);
hnode->inputs = NULL;
}
if (hnode->outputs) {
for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
stream = hnode->outputs[i];
free_stream(hnode_mgr, stream);
}
kfree(hnode->outputs);
hnode->outputs = NULL;
}
task_arg_obj = hnode->create_args.asa.task_arg_obj;
if (task_arg_obj.strm_in_def) {
for (i = 0; i < MAX_INPUTS(hnode); i++) {
kfree(task_arg_obj.strm_in_def[i].sz_device);
task_arg_obj.strm_in_def[i].sz_device = NULL;
}
kfree(task_arg_obj.strm_in_def);
task_arg_obj.strm_in_def = NULL;
}
if (task_arg_obj.strm_out_def) {
for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
kfree(task_arg_obj.strm_out_def[i].sz_device);
task_arg_obj.strm_out_def[i].sz_device = NULL;
}
kfree(task_arg_obj.strm_out_def);
task_arg_obj.strm_out_def = NULL;
}
if (task_arg_obj.dsp_heap_res_addr) {
status = proc_un_map(hnode->processor, (void *)
task_arg_obj.dsp_heap_addr,
pr_ctxt);
status = proc_un_reserve_memory(hnode->processor,
(void *)
task_arg_obj.
dsp_heap_res_addr,
pr_ctxt);
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (dmm_mgr)
dmm_mem_map_dump(dmm_mgr);
else
status = DSP_EHANDLE;
#endif
}
}
if (node_type != NODE_MESSAGE) {
kfree(hnode->stream_connect);
hnode->stream_connect = NULL;
}
kfree(hnode->str_dev_name);
hnode->str_dev_name = NULL;
if (hnode->ntfy_obj) {
ntfy_delete(hnode->ntfy_obj);
kfree(hnode->ntfy_obj);
hnode->ntfy_obj = NULL;
}
/* These were allocated in dcd_get_object_def (via node_allocate) */
kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn);
hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL;
kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn);
hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL;
kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn);
hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL;
kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name);
hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL;
/* Free all SM address translator resources */
kfree(hnode->xlator);
kfree(hnode->nldr_node_obj);
hnode->nldr_node_obj = NULL;
hnode->node_mgr = NULL;
kfree(hnode);
hnode = NULL;
func_end:
return;
}
/*
* ======== delete_node_mgr ========
* Purpose:
* Frees the node manager.
*/
static void delete_node_mgr(struct node_mgr *hnode_mgr)
{
struct node_object *hnode, *tmp;
if (hnode_mgr) {
/* Free resources */
if (hnode_mgr->dcd_mgr)
dcd_destroy_manager(hnode_mgr->dcd_mgr);
/* Remove any elements remaining in lists */
list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
list_elem) {
list_del(&hnode->list_elem);
delete_node(hnode, NULL);
}
mutex_destroy(&hnode_mgr->node_mgr_lock);
if (hnode_mgr->ntfy_obj) {
ntfy_delete(hnode_mgr->ntfy_obj);
kfree(hnode_mgr->ntfy_obj);
}
if (hnode_mgr->disp_obj)
disp_delete(hnode_mgr->disp_obj);
if (hnode_mgr->strm_mgr_obj)
strm_delete(hnode_mgr->strm_mgr_obj);
/* Delete the loader */
if (hnode_mgr->nldr_obj)
hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
kfree(hnode_mgr);
}
}
/*
* ======== fill_stream_connect ========
* Purpose:
* Fills stream information.
*/
static void fill_stream_connect(struct node_object *node1,
struct node_object *node2,
u32 stream1, u32 stream2)
{
u32 strm_index;
struct dsp_streamconnect *strm1 = NULL;
struct dsp_streamconnect *strm2 = NULL;
enum node_type node1_type = NODE_TASK;
enum node_type node2_type = NODE_TASK;
node1_type = node_get_type(node1);
node2_type = node_get_type(node2);
if (node1 != (struct node_object *)DSP_HGPPNODE) {
if (node1_type != NODE_DEVICE) {
strm_index = node1->num_inputs +
node1->num_outputs - 1;
strm1 = &(node1->stream_connect[strm_index]);
strm1->cb_struct = sizeof(struct dsp_streamconnect);
strm1->this_node_stream_index = stream1;
}
if (node2 != (struct node_object *)DSP_HGPPNODE) {
/* NODE == > NODE */
if (node1_type != NODE_DEVICE) {
strm1->connected_node = node2;
strm1->ui_connected_node_id = node2->node_uuid;
strm1->connected_node_stream_index = stream2;
strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
}
if (node2_type != NODE_DEVICE) {
strm_index = node2->num_inputs +
node2->num_outputs - 1;
strm2 = &(node2->stream_connect[strm_index]);
strm2->cb_struct =
sizeof(struct dsp_streamconnect);
strm2->this_node_stream_index = stream2;
strm2->connected_node = node1;
strm2->ui_connected_node_id = node1->node_uuid;
strm2->connected_node_stream_index = stream1;
strm2->connect_type = CONNECTTYPE_NODEINPUT;
}
} else if (node1_type != NODE_DEVICE)
strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
} else {
/* GPP == > NODE */
strm_index = node2->num_inputs + node2->num_outputs - 1;
strm2 = &(node2->stream_connect[strm_index]);
strm2->cb_struct = sizeof(struct dsp_streamconnect);
strm2->this_node_stream_index = stream2;
strm2->connect_type = CONNECTTYPE_GPPINPUT;
}
}
/*
* ======== fill_stream_def ========
* Purpose:
* Fills Stream attributes.
*/
static void fill_stream_def(struct node_object *hnode,
struct node_strmdef *pstrm_def,
struct dsp_strmattr *pattrs)
{
struct node_mgr *hnode_mgr = hnode->node_mgr;
if (pattrs != NULL) {
pstrm_def->num_bufs = pattrs->num_bufs;
pstrm_def->buf_size =
pattrs->buf_size / hnode_mgr->dsp_data_mau_size;
pstrm_def->seg_id = pattrs->seg_id;
pstrm_def->buf_alignment = pattrs->buf_alignment;
pstrm_def->timeout = pattrs->timeout;
} else {
pstrm_def->num_bufs = DEFAULTNBUFS;
pstrm_def->buf_size =
DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size;
pstrm_def->seg_id = DEFAULTSEGID;
pstrm_def->buf_alignment = DEFAULTALIGNMENT;
pstrm_def->timeout = DEFAULTTIMEOUT;
}
}
/*
* ======== free_stream ========
* Purpose:
* Updates the channel mask and frees the pipe id.
*/
static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
{
/* Free up the pipe id unless other node has not yet been deleted. */
if (stream.type == NODECONNECT) {
if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) {
/* The other node has already been deleted */
clear_bit(stream.dev_id, hnode_mgr->pipe_done_map);
clear_bit(stream.dev_id, hnode_mgr->pipe_map);
} else {
/* The other node has not been deleted yet */
set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
}
} else if (stream.type == HOSTCONNECT) {
if (stream.dev_id < hnode_mgr->num_chnls) {
clear_bit(stream.dev_id, hnode_mgr->chnl_map);
} else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
/* dsp-dma */
clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
hnode_mgr->dma_chnl_map);
} else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
/* zero-copy */
clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
hnode_mgr->zc_chnl_map);
}
}
}
/*
* ======== get_fxn_address ========
* Purpose:
* Retrieves the address for create, execute or delete phase for a node.
*/
static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
u32 phase)
{
char *pstr_fxn_name = NULL;
struct node_mgr *hnode_mgr = hnode->node_mgr;
int status = 0;
switch (phase) {
case CREATEPHASE:
pstr_fxn_name =
hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn;
break;
case EXECUTEPHASE:
pstr_fxn_name =
hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn;
break;
case DELETEPHASE:
pstr_fxn_name =
hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn;
break;
default:
/* Should never get here */
break;
}
status =
hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj,
pstr_fxn_name, fxn_addr);
return status;
}
/*
* ======== get_node_info ========
* Purpose:
* Retrieves the node information.
*/
void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
{
u32 i;
node_info->cb_struct = sizeof(struct dsp_nodeinfo);
node_info->nb_node_database_props =
hnode->dcd_props.obj_data.node_obj.ndb_props;
node_info->execution_priority = hnode->prio;
node_info->device_owner = hnode->device_owner;
node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
node_info->node_env = hnode->node_env;
node_info->ns_execution_state = node_get_state(hnode);
/* Copy stream connect data */
for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
node_info->sc_stream_connection[i] = hnode->stream_connect[i];
}
/*
* ======== get_node_props ========
* Purpose:
* Retrieve node properties.
*/
static int get_node_props(struct dcd_manager *hdcd_mgr,
struct node_object *hnode,
const struct dsp_uuid *node_uuid,
struct dcd_genericobj *dcd_prop)
{
u32 len;
struct node_msgargs *pmsg_args;
struct node_taskargs *task_arg_obj;
enum node_type node_type = NODE_TASK;
struct dsp_ndbprops *pndb_props =
&(dcd_prop->obj_data.node_obj.ndb_props);
int status = 0;
char sz_uuid[MAXUUIDLEN];
status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
DSP_DCDNODETYPE, dcd_prop);
if (!status) {
hnode->ntype = node_type = pndb_props->ntype;
/* Create UUID value to set in registry. */
uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
MAXUUIDLEN);
dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
/* Fill in message args that come from NDB */
if (node_type != NODE_DEVICE) {
pmsg_args = &(hnode->create_args.asa.node_msg_args);
pmsg_args->seg_id =
dcd_prop->obj_data.node_obj.msg_segid;
pmsg_args->notify_type =
dcd_prop->obj_data.node_obj.msg_notify_type;
pmsg_args->max_msgs = pndb_props->message_depth;
dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
pmsg_args->max_msgs);
} else {
/* Copy device name */
len = strlen(pndb_props->ac_name);
hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
if (hnode->str_dev_name == NULL) {
status = -ENOMEM;
} else {
strncpy(hnode->str_dev_name,
pndb_props->ac_name, len);
}
}
}
if (!status) {
/* Fill in create args that come from NDB */
if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
task_arg_obj->prio = pndb_props->prio;
task_arg_obj->stack_size = pndb_props->stack_size;
task_arg_obj->sys_stack_size =
pndb_props->sys_stack_size;
task_arg_obj->stack_seg = pndb_props->stack_seg;
dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
"0x%x words System Stack Size: 0x%x words "
"Stack Segment: 0x%x profile count : 0x%x\n",
task_arg_obj->prio, task_arg_obj->stack_size,
task_arg_obj->sys_stack_size,
task_arg_obj->stack_seg,
pndb_props->count_profiles);
}
}
return status;
}
/*
* ======== get_proc_props ========
* Purpose:
* Retrieve the processor properties.
*/
static int get_proc_props(struct node_mgr *hnode_mgr,
struct dev_object *hdev_obj)
{
struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context;
int status = 0;
status = dev_get_bridge_context(hdev_obj, &pbridge_context);
if (!pbridge_context)
status = -EFAULT;
if (!status) {
host_res = pbridge_context->resources;
if (!host_res)
return -EPERM;
hnode_mgr->chnl_offset = host_res->chnl_offset;
hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
hnode_mgr->num_chnls = host_res->num_chnls;
/*
* PROC will add an API to get dsp_processorinfo.
* Fill in default values for now.
*/
/* TODO -- Instead of hard coding, take from registry */
hnode_mgr->proc_family = 6000;
hnode_mgr->proc_type = 6410;
hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
hnode_mgr->dsp_word_size = DSPWORDSIZE;
hnode_mgr->dsp_data_mau_size = DSPWORDSIZE;
hnode_mgr->dsp_mau_size = 1;
}
return status;
}
/*
* ======== node_get_uuid_props ========
* Purpose:
* Fetch Node UUID properties from DCD/DOF file.
*/
int node_get_uuid_props(void *hprocessor,
const struct dsp_uuid *node_uuid,
struct dsp_ndbprops *node_props)
{
struct node_mgr *hnode_mgr = NULL;
struct dev_object *hdev_obj;
int status = 0;
struct dcd_nodeprops dcd_node_props;
struct dsp_processorstate proc_state;
if (hprocessor == NULL || node_uuid == NULL) {
status = -EFAULT;
goto func_end;
}
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt
to send the message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
status = proc_get_dev_object(hprocessor, &hdev_obj);
if (hdev_obj) {
status = dev_get_node_manager(hdev_obj, &hnode_mgr);
if (hnode_mgr == NULL) {
status = -EFAULT;
goto func_end;
}
}
/*
* Enter the critical section. This is needed because
* dcd_get_object_def will ultimately end up calling dbll_open/close,
* which needs to be protected in order to not corrupt the zlib manager
* (COD).
*/
mutex_lock(&hnode_mgr->node_mgr_lock);
dcd_node_props.str_create_phase_fxn = NULL;
dcd_node_props.str_execute_phase_fxn = NULL;
dcd_node_props.str_delete_phase_fxn = NULL;
dcd_node_props.str_i_alg_name = NULL;
status = dcd_get_object_def(hnode_mgr->dcd_mgr,
(struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
(struct dcd_genericobj *)&dcd_node_props);
if (!status) {
*node_props = dcd_node_props.ndb_props;
kfree(dcd_node_props.str_create_phase_fxn);
kfree(dcd_node_props.str_execute_phase_fxn);
kfree(dcd_node_props.str_delete_phase_fxn);
kfree(dcd_node_props.str_i_alg_name);
}
/* Leave the critical section, we're done. */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
return status;
}
/*
* ======== get_rms_fxns ========
* Purpose:
* Retrieve the RMS functions.
*/
static int get_rms_fxns(struct node_mgr *hnode_mgr)
{
s32 i;
struct dev_object *dev_obj = hnode_mgr->dev_obj;
int status = 0;
static char *psz_fxns[NUMRMSFXNS] = {
"RMS_queryServer", /* RMSQUERYSERVER */
"RMS_configureServer", /* RMSCONFIGURESERVER */
"RMS_createNode", /* RMSCREATENODE */
"RMS_executeNode", /* RMSEXECUTENODE */
"RMS_deleteNode", /* RMSDELETENODE */
"RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
"RMS_readMemory", /* RMSREADMEMORY */
"RMS_writeMemory", /* RMSWRITEMEMORY */
"RMS_copy", /* RMSCOPY */
};
for (i = 0; i < NUMRMSFXNS; i++) {
status = dev_get_symbol(dev_obj, psz_fxns[i],
&(hnode_mgr->fxn_addrs[i]));
if (status) {
if (status == -ESPIPE) {
/*
* May be loaded dynamically (in the future),
* but return an error for now.
*/
dev_dbg(bridge, "%s: RMS function: %s currently"
" not loaded\n", __func__, psz_fxns[i]);
} else {
dev_dbg(bridge, "%s: Symbol not found: %s "
"status = 0x%x\n", __func__,
psz_fxns[i], status);
break;
}
}
}
return status;
}
/*
* ======== ovly ========
* Purpose:
* Called during overlay.Sends command to RMS to copy a block of data.
*/
static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
u32 ul_num_bytes, u32 mem_space)
{
struct node_object *hnode = (struct node_object *)priv_ref;
struct node_mgr *hnode_mgr;
u32 ul_bytes = 0;
u32 ul_size;
u32 ul_timeout;
int status = 0;
struct bridge_dev_context *hbridge_context;
/* Function interface to Bridge driver*/
struct bridge_drv_interface *intf_fxns;
hnode_mgr = hnode->node_mgr;
ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
ul_timeout = hnode->timeout;
/* Call new MemCopy function */
intf_fxns = hnode_mgr->intf_fxns;
status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
if (!status) {
status =
(*intf_fxns->brd_mem_copy) (hbridge_context,
dsp_run_addr, dsp_load_addr,
ul_num_bytes, (u32) mem_space);
if (!status)
ul_bytes = ul_num_bytes;
else
pr_debug("%s: failed to copy brd memory, status 0x%x\n",
__func__, status);
} else {
pr_debug("%s: failed to get Bridge context, status 0x%x\n",
__func__, status);
}
return ul_bytes;
}
/*
* ======== mem_write ========
*/
static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
u32 ul_num_bytes, u32 mem_space)
{
struct node_object *hnode = (struct node_object *)priv_ref;
struct node_mgr *hnode_mgr;
u16 mem_sect_type;
u32 ul_timeout;
int status = 0;
struct bridge_dev_context *hbridge_context;
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
hnode_mgr = hnode->node_mgr;
ul_timeout = hnode->timeout;
mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
/* Call new MemWrite function */
intf_fxns = hnode_mgr->intf_fxns;
status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
dsp_add, ul_num_bytes, mem_sect_type);
return ul_num_bytes;
}
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
/*
* ======== node_find_addr ========
*/
int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
u32 offset_range, void *sym_addr_output, char *sym_name)
{
struct node_object *node_obj;
int status = -ENOENT;
pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
(unsigned int) node_mgr,
sym_addr, offset_range,
(unsigned int) sym_addr_output, sym_name);
list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
offset_range, sym_addr_output, sym_name);
if (!status)
break;
}
return status;
}
#endif
| gpl-2.0 |
spegelius/android_kernel_samsung_jf | arch/arm/mach-omap2/clockdomain44xx.c | 4942 | 3688 | /*
* OMAP4 clockdomain control
*
* Copyright (C) 2008-2010 Texas Instruments, Inc.
* Copyright (C) 2008-2010 Nokia Corporation
*
* Derived from mach-omap2/clockdomain.c written by Paul Walmsley
* Rajendra Nayak <rnayak@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include "clockdomain.h"
#include "cminst44xx.h"
#include "cm44xx.h"
static int omap4_clkdm_add_wkup_sleep_dep(struct clockdomain *clkdm1,
struct clockdomain *clkdm2)
{
omap4_cminst_set_inst_reg_bits((1 << clkdm2->dep_bit),
clkdm1->prcm_partition,
clkdm1->cm_inst, clkdm1->clkdm_offs +
OMAP4_CM_STATICDEP);
return 0;
}
static int omap4_clkdm_del_wkup_sleep_dep(struct clockdomain *clkdm1,
struct clockdomain *clkdm2)
{
omap4_cminst_clear_inst_reg_bits((1 << clkdm2->dep_bit),
clkdm1->prcm_partition,
clkdm1->cm_inst, clkdm1->clkdm_offs +
OMAP4_CM_STATICDEP);
return 0;
}
static int omap4_clkdm_read_wkup_sleep_dep(struct clockdomain *clkdm1,
struct clockdomain *clkdm2)
{
return omap4_cminst_read_inst_reg_bits(clkdm1->prcm_partition,
clkdm1->cm_inst, clkdm1->clkdm_offs +
OMAP4_CM_STATICDEP,
(1 << clkdm2->dep_bit));
}
static int omap4_clkdm_clear_all_wkup_sleep_deps(struct clockdomain *clkdm)
{
struct clkdm_dep *cd;
u32 mask = 0;
for (cd = clkdm->wkdep_srcs; cd && cd->clkdm_name; cd++) {
if (!cd->clkdm)
continue; /* only happens if data is erroneous */
mask |= 1 << cd->clkdm->dep_bit;
atomic_set(&cd->wkdep_usecount, 0);
}
omap4_cminst_clear_inst_reg_bits(mask, clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs +
OMAP4_CM_STATICDEP);
return 0;
}
static int omap4_clkdm_sleep(struct clockdomain *clkdm)
{
omap4_cminst_clkdm_force_sleep(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
return 0;
}
static int omap4_clkdm_wakeup(struct clockdomain *clkdm)
{
omap4_cminst_clkdm_force_wakeup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
return 0;
}
static void omap4_clkdm_allow_idle(struct clockdomain *clkdm)
{
omap4_cminst_clkdm_enable_hwsup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
}
static void omap4_clkdm_deny_idle(struct clockdomain *clkdm)
{
omap4_cminst_clkdm_disable_hwsup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
}
static int omap4_clkdm_clk_enable(struct clockdomain *clkdm)
{
if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
return omap4_clkdm_wakeup(clkdm);
return 0;
}
static int omap4_clkdm_clk_disable(struct clockdomain *clkdm)
{
bool hwsup = false;
hwsup = omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP))
omap4_clkdm_sleep(clkdm);
return 0;
}
struct clkdm_ops omap4_clkdm_operations = {
.clkdm_add_wkdep = omap4_clkdm_add_wkup_sleep_dep,
.clkdm_del_wkdep = omap4_clkdm_del_wkup_sleep_dep,
.clkdm_read_wkdep = omap4_clkdm_read_wkup_sleep_dep,
.clkdm_clear_all_wkdeps = omap4_clkdm_clear_all_wkup_sleep_deps,
.clkdm_add_sleepdep = omap4_clkdm_add_wkup_sleep_dep,
.clkdm_del_sleepdep = omap4_clkdm_del_wkup_sleep_dep,
.clkdm_read_sleepdep = omap4_clkdm_read_wkup_sleep_dep,
.clkdm_clear_all_sleepdeps = omap4_clkdm_clear_all_wkup_sleep_deps,
.clkdm_sleep = omap4_clkdm_sleep,
.clkdm_wakeup = omap4_clkdm_wakeup,
.clkdm_allow_idle = omap4_clkdm_allow_idle,
.clkdm_deny_idle = omap4_clkdm_deny_idle,
.clkdm_clk_enable = omap4_clkdm_clk_enable,
.clkdm_clk_disable = omap4_clkdm_clk_disable,
};
| gpl-2.0 |
emxys1/gpio_imx6rex-linux-3.10.17 | arch/openrisc/kernel/module.c | 7502 | 2068 | /*
* OpenRISC module.c
*
* Linux architectural port borrowing liberally from similar works of
* others. All original copyrights apply as per the original source
* declaration.
*
* Modifications for the OpenRISC architecture:
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
uint32_t value;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
value = sym->st_value + rel[i].r_addend;
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_OR32_32:
*location = value;
break;
case R_OR32_CONST:
location = (uint16_t *)location + 1;
*((uint16_t *)location) = (uint16_t) (value);
break;
case R_OR32_CONSTH:
location = (uint16_t *)location + 1;
*((uint16_t *)location) = (uint16_t) (value >> 16);
break;
case R_OR32_JUMPTARG:
value -= (uint32_t)location;
value >>= 2;
value &= 0x03ffffff;
value |= *location & 0xfc000000;
*location = value;
break;
default:
pr_err("module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
break;
}
}
return 0;
}
| gpl-2.0 |
PoonKang/Kernel_GT-N8013_ICS | drivers/media/video/cx231xx/cx231xx-pcb-cfg.c | 10062 | 20991 | /*
cx231xx-pcb-config.c - driver for Conexant
Cx23100/101/102 USB video capture devices
Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "cx231xx.h"
#include "cx231xx-conf-reg.h"
static unsigned int pcb_debug;
module_param(pcb_debug, int, 0644);
MODULE_PARM_DESC(pcb_debug, "enable pcb config debug messages [video]");
/******************************************************************************/
struct pcb_config cx231xx_Scenario[] = {
{
INDEX_SELFPOWER_DIGITAL_ONLY, /* index */
USB_SELF_POWER, /* power_type */
0, /* speed , not decide yet */
MOD_DIGITAL, /* mode */
SOURCE_TS_BDA, /* ts1_source, digital tv only */
NOT_SUPPORTED, /* ts2_source */
NOT_SUPPORTED, /* analog source */
0, /* digital_index */
0, /* analog index */
0, /* dif_index */
0, /* external_index */
1, /* only one configuration */
{
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
NOT_SUPPORTED, /* AUDIO */
NOT_SUPPORTED, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
,
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
,
/* full-speed config */
{
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
NOT_SUPPORTED, /* AUDIO */
NOT_SUPPORTED, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
}
,
{
INDEX_SELFPOWER_DUAL_DIGITAL, /* index */
USB_SELF_POWER, /* power_type */
0, /* speed , not decide yet */
MOD_DIGITAL, /* mode */
SOURCE_TS_BDA, /* ts1_source, digital tv only */
0, /* ts2_source,need update from register */
NOT_SUPPORTED, /* analog source */
0, /* digital_index */
0, /* analog index */
0, /* dif_index */
0, /* external_index */
1, /* only one configuration */
{
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
2, /* TS2 index */
NOT_SUPPORTED, /* AUDIO */
NOT_SUPPORTED, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
,
/* full-speed */
{
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
2, /* TS2 index */
NOT_SUPPORTED, /* AUDIO */
NOT_SUPPORTED, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
}
,
{
INDEX_SELFPOWER_ANALOG_ONLY, /* index */
USB_SELF_POWER, /* power_type */
0, /* speed , not decide yet */
MOD_ANALOG | MOD_DIF | MOD_EXTERNAL, /* mode ,analog tv only */
NOT_SUPPORTED, /* ts1_source, NOT SUPPORT */
NOT_SUPPORTED, /* ts2_source,NOT SUPPORT */
0, /* analog source, need update */
0, /* digital_index */
0, /* analog index */
0, /* dif_index */
0, /* external_index */
1, /* only one configuration */
{
{
0, /* config index */
{
0, /* interrupt ep index */
NOT_SUPPORTED, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
1, /* AUDIO */
2, /* VIDEO */
3, /* VANC */
4, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
,
/* full-speed */
{
{
0, /* config index */
{
0, /* interrupt ep index */
NOT_SUPPORTED, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
1, /* AUDIO */
2, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
}
,
{
INDEX_SELFPOWER_DUAL, /* index */
USB_SELF_POWER, /* power_type */
0, /* speed , not decide yet */
/* mode ,analog tv and digital path */
MOD_ANALOG | MOD_DIF | MOD_DIGITAL | MOD_EXTERNAL,
0, /* ts1_source,will update in register */
NOT_SUPPORTED, /* ts2_source,NOT SUPPORT */
0, /* analog source need update */
0, /* digital_index */
0, /* analog index */
0, /* dif_index */
0, /* external_index */
1, /* only one configuration */
{
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
2, /* AUDIO */
3, /* VIDEO */
4, /* VANC */
5, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
,
/* full-speed */
{
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
2, /* AUDIO */
3, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
}
,
{
INDEX_SELFPOWER_TRIPLE, /* index */
USB_SELF_POWER, /* power_type */
0, /* speed , not decide yet */
/* mode ,analog tv and digital path */
MOD_ANALOG | MOD_DIF | MOD_DIGITAL | MOD_EXTERNAL,
0, /* ts1_source, update in register */
0, /* ts2_source,update in register */
0, /* analog source, need update */
0, /* digital_index */
0, /* analog index */
0, /* dif_index */
0, /* external_index */
1, /* only one configuration */
{
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
2, /* TS2 index */
3, /* AUDIO */
4, /* VIDEO */
5, /* VANC */
6, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
,
/* full-speed */
{
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
2, /* TS2 index */
3, /* AUDIO */
4, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
}
,
{
INDEX_SELFPOWER_COMPRESSOR, /* index */
USB_SELF_POWER, /* power_type */
0, /* speed , not decide yet */
/* mode ,analog tv AND DIGITAL path */
MOD_ANALOG | MOD_DIF | MOD_DIGITAL | MOD_EXTERNAL,
NOT_SUPPORTED, /* ts1_source, disable */
SOURCE_TS_BDA, /* ts2_source */
0, /* analog source,need update */
0, /* digital_index */
0, /* analog index */
0, /* dif_index */
0, /* external_index */
1, /* only one configuration */
{
{
0, /* config index */
{
0, /* interrupt ep index */
NOT_SUPPORTED, /* ts1 index */
1, /* TS2 index */
2, /* AUDIO */
3, /* VIDEO */
4, /* VANC */
5, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
,
/* full-speed */
{
{
0, /* config index */
{
0, /* interrupt ep index */
NOT_SUPPORTED, /* ts1 index */
1, /* TS2 index */
2, /* AUDIO */
3, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
}
,
{
INDEX_BUSPOWER_DIGITAL_ONLY, /* index */
USB_BUS_POWER, /* power_type */
0, /* speed , not decide yet */
MOD_DIGITAL, /* mode ,analog tv AND DIGITAL path */
SOURCE_TS_BDA, /* ts1_source, disable */
NOT_SUPPORTED, /* ts2_source */
NOT_SUPPORTED, /* analog source */
0, /* digital_index */
0, /* analog index */
0, /* dif_index */
0, /* external_index */
1, /* only one configuration */
{
{
0, /* config index */
{
0, /* interrupt ep index = 2 */
1, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
NOT_SUPPORTED, /* AUDIO */
NOT_SUPPORTED, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
,
/* full-speed */
{
{
0, /* config index */
{
0, /* interrupt ep index = 2 */
1, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
NOT_SUPPORTED, /* AUDIO */
NOT_SUPPORTED, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
}
,
{
INDEX_BUSPOWER_ANALOG_ONLY, /* index */
USB_BUS_POWER, /* power_type */
0, /* speed , not decide yet */
MOD_ANALOG, /* mode ,analog tv AND DIGITAL path */
NOT_SUPPORTED, /* ts1_source, disable */
NOT_SUPPORTED, /* ts2_source */
SOURCE_ANALOG, /* analog source--analog */
0, /* digital_index */
0, /* analog index */
0, /* dif_index */
0, /* external_index */
1, /* only one configuration */
{
{
0, /* config index */
{
0, /* interrupt ep index */
NOT_SUPPORTED, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
1, /* AUDIO */
2, /* VIDEO */
3, /* VANC */
4, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
,
{ /* full-speed */
{
0, /* config index */
{
0, /* interrupt ep index */
NOT_SUPPORTED, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
1, /* AUDIO */
2, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
}
,
{
INDEX_BUSPOWER_DIF_ONLY, /* index */
USB_BUS_POWER, /* power_type */
0, /* speed , not decide yet */
/* mode ,analog tv AND DIGITAL path */
MOD_DIF | MOD_ANALOG | MOD_DIGITAL | MOD_EXTERNAL,
SOURCE_TS_BDA, /* ts1_source, disable */
NOT_SUPPORTED, /* ts2_source */
SOURCE_DIF | SOURCE_ANALOG | SOURCE_EXTERNAL, /* analog source, dif */
0, /* digital_index */
0, /* analog index */
0, /* dif_index */
0, /* external_index */
1, /* only one configuration */
{
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
2, /* AUDIO */
3, /* VIDEO */
4, /* VANC */
5, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
,
{ /* full speed */
{
0, /* config index */
{
0, /* interrupt ep index */
1, /* ts1 index */
NOT_SUPPORTED, /* TS2 index */
2, /* AUDIO */
3, /* VIDEO */
NOT_SUPPORTED, /* VANC */
NOT_SUPPORTED, /* HANC */
NOT_SUPPORTED /* ir_index */
}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
,
{NOT_SUPPORTED, {NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED, NOT_SUPPORTED, NOT_SUPPORTED,
NOT_SUPPORTED}
}
}
}
,
};
/*****************************************************************/
u32 initialize_cx231xx(struct cx231xx *dev)
{
u32 config_info = 0;
struct pcb_config *p_pcb_info;
u8 usb_speed = 1; /* from register,1--HS, 0--FS */
u8 data[4] = { 0, 0, 0, 0 };
u32 ts1_source = 0;
u32 ts2_source = 0;
u32 analog_source = 0;
u8 _current_scenario_idx = 0xff;
ts1_source = SOURCE_TS_BDA;
ts2_source = SOURCE_TS_BDA;
/* read board config register to find out which
pcb config it is related to */
cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, data, 4);
config_info = *((u32 *) data);
usb_speed = (u8) (config_info & 0x1);
/* Verify this device belongs to Bus power or Self power device */
if (config_info & BUS_POWER) { /* bus-power */
switch (config_info & BUSPOWER_MASK) {
case TS1_PORT | BUS_POWER:
cx231xx_Scenario[INDEX_BUSPOWER_DIGITAL_ONLY].speed =
usb_speed;
p_pcb_info =
&cx231xx_Scenario[INDEX_BUSPOWER_DIGITAL_ONLY];
_current_scenario_idx = INDEX_BUSPOWER_DIGITAL_ONLY;
break;
case AVDEC_ENABLE | BUS_POWER:
cx231xx_Scenario[INDEX_BUSPOWER_ANALOG_ONLY].speed =
usb_speed;
p_pcb_info =
&cx231xx_Scenario[INDEX_BUSPOWER_ANALOG_ONLY];
_current_scenario_idx = INDEX_BUSPOWER_ANALOG_ONLY;
break;
case AVDEC_ENABLE | BUS_POWER | TS1_PORT:
cx231xx_Scenario[INDEX_BUSPOWER_DIF_ONLY].speed =
usb_speed;
p_pcb_info = &cx231xx_Scenario[INDEX_BUSPOWER_DIF_ONLY];
_current_scenario_idx = INDEX_BUSPOWER_DIF_ONLY;
break;
default:
cx231xx_info("bad config in buspower!!!!\n");
cx231xx_info("config_info=%x\n",
(config_info & BUSPOWER_MASK));
return 1;
}
} else { /* self-power */
switch (config_info & SELFPOWER_MASK) {
case TS1_PORT | SELF_POWER:
cx231xx_Scenario[INDEX_SELFPOWER_DIGITAL_ONLY].speed =
usb_speed;
p_pcb_info =
&cx231xx_Scenario[INDEX_SELFPOWER_DIGITAL_ONLY];
_current_scenario_idx = INDEX_SELFPOWER_DIGITAL_ONLY;
break;
case TS1_TS2_PORT | SELF_POWER:
cx231xx_Scenario[INDEX_SELFPOWER_DUAL_DIGITAL].speed =
usb_speed;
cx231xx_Scenario[INDEX_SELFPOWER_DUAL_DIGITAL].
ts2_source = ts2_source;
p_pcb_info =
&cx231xx_Scenario[INDEX_SELFPOWER_DUAL_DIGITAL];
_current_scenario_idx = INDEX_SELFPOWER_DUAL_DIGITAL;
break;
case AVDEC_ENABLE | SELF_POWER:
cx231xx_Scenario[INDEX_SELFPOWER_ANALOG_ONLY].speed =
usb_speed;
cx231xx_Scenario[INDEX_SELFPOWER_ANALOG_ONLY].
analog_source = analog_source;
p_pcb_info =
&cx231xx_Scenario[INDEX_SELFPOWER_ANALOG_ONLY];
_current_scenario_idx = INDEX_SELFPOWER_ANALOG_ONLY;
break;
case AVDEC_ENABLE | TS1_PORT | SELF_POWER:
cx231xx_Scenario[INDEX_SELFPOWER_DUAL].speed =
usb_speed;
cx231xx_Scenario[INDEX_SELFPOWER_DUAL].ts1_source =
ts1_source;
cx231xx_Scenario[INDEX_SELFPOWER_DUAL].analog_source =
analog_source;
p_pcb_info = &cx231xx_Scenario[INDEX_SELFPOWER_DUAL];
_current_scenario_idx = INDEX_SELFPOWER_DUAL;
break;
case AVDEC_ENABLE | TS1_TS2_PORT | SELF_POWER:
cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE].speed =
usb_speed;
cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE].ts1_source =
ts1_source;
cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE].ts2_source =
ts2_source;
cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE].analog_source =
analog_source;
p_pcb_info = &cx231xx_Scenario[INDEX_SELFPOWER_TRIPLE];
_current_scenario_idx = INDEX_SELFPOWER_TRIPLE;
break;
case AVDEC_ENABLE | TS1VIP_TS2_PORT | SELF_POWER:
cx231xx_Scenario[INDEX_SELFPOWER_COMPRESSOR].speed =
usb_speed;
cx231xx_Scenario[INDEX_SELFPOWER_COMPRESSOR].
analog_source = analog_source;
p_pcb_info =
&cx231xx_Scenario[INDEX_SELFPOWER_COMPRESSOR];
_current_scenario_idx = INDEX_SELFPOWER_COMPRESSOR;
break;
default:
cx231xx_info("bad senario!!!!!\n");
cx231xx_info("config_info=%x\n",
(config_info & SELFPOWER_MASK));
return 1;
}
}
dev->current_scenario_idx = _current_scenario_idx;
memcpy(&dev->current_pcb_config, p_pcb_info,
sizeof(struct pcb_config));
if (pcb_debug) {
cx231xx_info("SC(0x00) register = 0x%x\n", config_info);
cx231xx_info("scenario %d\n",
(dev->current_pcb_config.index) + 1);
cx231xx_info("type=%x\n", dev->current_pcb_config.type);
cx231xx_info("mode=%x\n", dev->current_pcb_config.mode);
cx231xx_info("speed=%x\n", dev->current_pcb_config.speed);
cx231xx_info("ts1_source=%x\n",
dev->current_pcb_config.ts1_source);
cx231xx_info("ts2_source=%x\n",
dev->current_pcb_config.ts2_source);
cx231xx_info("analog_source=%x\n",
dev->current_pcb_config.analog_source);
}
return 0;
}
| gpl-2.0 |
rombaby/android_kernel_xiaomi_redmi2 | drivers/mfd/wcd9xxx-irq.c | 79 | 20027 | /* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/mfd/wcd9xxx/core-resource.h>
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include <linux/mfd/wcd9xxx/wcd9310_registers.h>
#include <linux/delay.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <soc/qcom/pm.h>
#define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
#define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
#ifdef CONFIG_OF
struct wcd9xxx_irq_drv_data {
struct irq_domain *domain;
int irq;
};
#endif
static int virq_to_phyirq(
struct wcd9xxx_core_resource *wcd9xxx_res, int virq);
static int phyirq_to_virq(
struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
static unsigned int wcd9xxx_irq_get_upstream_irq(
struct wcd9xxx_core_resource *wcd9xxx_res);
static void wcd9xxx_irq_put_upstream_irq(
struct wcd9xxx_core_resource *wcd9xxx_res);
static int wcd9xxx_map_irq(
struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
static void wcd9xxx_irq_lock(struct irq_data *data)
{
struct wcd9xxx_core_resource *wcd9xxx_res =
irq_data_get_irq_chip_data(data);
mutex_lock(&wcd9xxx_res->irq_lock);
}
static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
{
struct wcd9xxx_core_resource *wcd9xxx_res =
irq_data_get_irq_chip_data(data);
int i;
if ((ARRAY_SIZE(wcd9xxx_res->irq_masks_cur) >
WCD9XXX_MAX_IRQ_REGS) ||
(ARRAY_SIZE(wcd9xxx_res->irq_masks_cache) >
WCD9XXX_MAX_IRQ_REGS)) {
pr_err("%s: Array Size out of bound\n", __func__);
return;
}
if (!wcd9xxx_res->codec_reg_write) {
pr_err("%s: Codec reg write callback function not defined\n",
__func__);
return;
}
for (i = 0; i < ARRAY_SIZE(wcd9xxx_res->irq_masks_cur); i++) {
/* If there's been a change in the mask write it back
* to the hardware.
*/
if (wcd9xxx_res->irq_masks_cur[i] !=
wcd9xxx_res->irq_masks_cache[i]) {
wcd9xxx_res->irq_masks_cache[i] =
wcd9xxx_res->irq_masks_cur[i];
wcd9xxx_res->codec_reg_write(wcd9xxx_res,
WCD9XXX_A_INTR_MASK0 + i,
wcd9xxx_res->irq_masks_cur[i]);
}
}
mutex_unlock(&wcd9xxx_res->irq_lock);
}
static void wcd9xxx_irq_enable(struct irq_data *data)
{
struct wcd9xxx_core_resource *wcd9xxx_res =
irq_data_get_irq_chip_data(data);
int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
wcd9xxx_res->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)] &=
~(BYTE_BIT_MASK(wcd9xxx_irq));
}
static void wcd9xxx_irq_disable(struct irq_data *data)
{
struct wcd9xxx_core_resource *wcd9xxx_res =
irq_data_get_irq_chip_data(data);
int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
wcd9xxx_res->irq_masks_cur[BIT_BYTE(wcd9xxx_irq)]
|= BYTE_BIT_MASK(wcd9xxx_irq);
}
static void wcd9xxx_irq_mask(struct irq_data *d)
{
/* do nothing but required as linux calls irq_mask without NULL check */
}
static struct irq_chip wcd9xxx_irq_chip = {
.name = "wcd9xxx",
.irq_bus_lock = wcd9xxx_irq_lock,
.irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
.irq_disable = wcd9xxx_irq_disable,
.irq_enable = wcd9xxx_irq_enable,
.irq_mask = wcd9xxx_irq_mask,
};
bool wcd9xxx_lock_sleep(
struct wcd9xxx_core_resource *wcd9xxx_res)
{
enum wcd9xxx_pm_state os;
/*
* wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
* and its subroutines only motly.
* but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
* It can race with wcd9xxx_irq_thread.
* So need to embrace wlock_holders with mutex.
*
* If system didn't resume, we can simply return false so codec driver's
* IRQ handler can return without handling IRQ.
* As interrupt line is still active, codec will have another IRQ to
* retry shortly.
*/
mutex_lock(&wcd9xxx_res->pm_lock);
if (wcd9xxx_res->wlock_holders++ == 0) {
pr_debug("%s: holding wake lock\n", __func__);
pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
msm_cpuidle_get_deep_idle_latency());
}
mutex_unlock(&wcd9xxx_res->pm_lock);
if (!wait_event_timeout(wcd9xxx_res->pm_wq,
((os = wcd9xxx_pm_cmpxchg(wcd9xxx_res,
WCD9XXX_PM_SLEEPABLE,
WCD9XXX_PM_AWAKE)) ==
WCD9XXX_PM_SLEEPABLE ||
(os == WCD9XXX_PM_AWAKE)),
msecs_to_jiffies(
WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
__func__,
WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
wcd9xxx_res->wlock_holders);
wcd9xxx_unlock_sleep(wcd9xxx_res);
return false;
}
wake_up_all(&wcd9xxx_res->pm_wq);
return true;
}
EXPORT_SYMBOL(wcd9xxx_lock_sleep);
void wcd9xxx_unlock_sleep(
struct wcd9xxx_core_resource *wcd9xxx_res)
{
mutex_lock(&wcd9xxx_res->pm_lock);
if (--wcd9xxx_res->wlock_holders == 0) {
pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
__func__, wcd9xxx_res->pm_state, WCD9XXX_PM_SLEEPABLE);
/*
* if wcd9xxx_lock_sleep failed, pm_state would be still
* WCD9XXX_PM_ASLEEP, don't overwrite
*/
if (likely(wcd9xxx_res->pm_state == WCD9XXX_PM_AWAKE))
wcd9xxx_res->pm_state = WCD9XXX_PM_SLEEPABLE;
pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
PM_QOS_DEFAULT_VALUE);
}
mutex_unlock(&wcd9xxx_res->pm_lock);
wake_up_all(&wcd9xxx_res->pm_wq);
}
EXPORT_SYMBOL(wcd9xxx_unlock_sleep);
void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *wcd9xxx_res)
{
mutex_lock(&wcd9xxx_res->nested_irq_lock);
}
void wcd9xxx_nested_irq_unlock(struct wcd9xxx_core_resource *wcd9xxx_res)
{
mutex_unlock(&wcd9xxx_res->nested_irq_lock);
}
static void wcd9xxx_irq_dispatch(struct wcd9xxx_core_resource *wcd9xxx_res,
struct intr_data *irqdata)
{
int irqbit = irqdata->intr_num;
if (!wcd9xxx_res->codec_reg_write) {
pr_err("%s: codec read/write callback not defined\n",
__func__);
return;
}
if (irqdata->clear_first) {
wcd9xxx_nested_irq_lock(wcd9xxx_res);
wcd9xxx_res->codec_reg_write(wcd9xxx_res,
WCD9XXX_A_INTR_CLEAR0 + BIT_BYTE(irqbit),
BYTE_BIT_MASK(irqbit));
if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
wcd9xxx_res->codec_reg_write(wcd9xxx_res,
WCD9XXX_A_INTR_MODE, 0x02);
handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
wcd9xxx_nested_irq_unlock(wcd9xxx_res);
} else {
wcd9xxx_nested_irq_lock(wcd9xxx_res);
handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
wcd9xxx_res->codec_reg_write(wcd9xxx_res,
WCD9XXX_A_INTR_CLEAR0 + BIT_BYTE(irqbit),
BYTE_BIT_MASK(irqbit));
if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
wcd9xxx_res->codec_reg_write(wcd9xxx_res,
WCD9XXX_A_INTR_MODE, 0x02);
wcd9xxx_nested_irq_unlock(wcd9xxx_res);
}
}
static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
{
int ret;
int i;
struct intr_data irqdata;
char linebuf[128];
static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
struct wcd9xxx_core_resource *wcd9xxx_res = data;
int num_irq_regs = wcd9xxx_res->num_irq_regs;
u8 status[num_irq_regs], status1[num_irq_regs];
if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
return IRQ_NONE;
}
if (!wcd9xxx_res->codec_bulk_read) {
dev_err(wcd9xxx_res->dev,
"%s: Codec Bulk Register read callback not supplied\n",
__func__);
goto err_disable_irq;
}
ret = wcd9xxx_res->codec_bulk_read(wcd9xxx_res,
WCD9XXX_A_INTR_STATUS0,
num_irq_regs, status);
if (ret < 0) {
dev_err(wcd9xxx_res->dev,
"Failed to read interrupt status: %d\n", ret);
goto err_disable_irq;
}
/* Apply masking */
for (i = 0; i < num_irq_regs; i++)
status[i] &= ~wcd9xxx_res->irq_masks_cur[i];
memcpy(status1, status, sizeof(status1));
/* Find out which interrupt was triggered and call that interrupt's
* handler function
*
* Since codec has only one hardware irq line which is shared by
* codec's different internal interrupts, so it's possible master irq
* handler dispatches multiple nested irq handlers after breaking
* order. Dispatch interrupts in the order that is maintained by
* the interrupt table.
*/
for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
irqdata = wcd9xxx_res->intr_table[i];
if (status[BIT_BYTE(irqdata.intr_num)] &
BYTE_BIT_MASK(irqdata.intr_num)) {
wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
status1[BIT_BYTE(irqdata.intr_num)] &=
~BYTE_BIT_MASK(irqdata.intr_num);
}
}
/*
* As a failsafe if unhandled irq is found, clear it to prevent
* interrupt storm.
* Note that we can say there was an unhandled irq only when no irq
* handled by nested irq handler since Taiko supports qdsp as irqs'
* destination for few irqs. Therefore driver shouldn't clear pending
* irqs when few handled while few others not.
*/
if (unlikely(!memcmp(status, status1, sizeof(status)))) {
if (__ratelimit(&ratelimit)) {
pr_warn("%s: Unhandled irq found\n", __func__);
hex_dump_to_buffer(status, sizeof(status), 16, 1,
linebuf, sizeof(linebuf), false);
pr_warn("%s: status0 : %s\n", __func__, linebuf);
hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
linebuf, sizeof(linebuf), false);
pr_warn("%s: status1 : %s\n", __func__, linebuf);
}
memset(status, 0xff, num_irq_regs);
ret = wcd9xxx_res->codec_bulk_write(wcd9xxx_res,
WCD9XXX_A_INTR_CLEAR0,
num_irq_regs, status);
if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
wcd9xxx_res->codec_reg_write(wcd9xxx_res,
WCD9XXX_A_INTR_MODE, 0x02);
}
wcd9xxx_unlock_sleep(wcd9xxx_res);
return IRQ_HANDLED;
err_disable_irq:
dev_err(wcd9xxx_res->dev,
"Disable irq %d\n", wcd9xxx_res->irq);
disable_irq_wake(wcd9xxx_res->irq);
disable_irq_nosync(wcd9xxx_res->irq);
wcd9xxx_unlock_sleep(wcd9xxx_res);
return IRQ_NONE;
}
void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
int irq, void *data)
{
free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
}
void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
{
enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
}
void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
{
disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
}
void wcd9xxx_disable_irq_sync(
struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
{
disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
}
static int wcd9xxx_irq_setup_downstream_irq(
struct wcd9xxx_core_resource *wcd9xxx_res)
{
int irq, virq, ret;
pr_debug("%s: enter\n", __func__);
for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
/* Map OF irq */
virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
if (virq == NO_IRQ) {
pr_err("%s, No interrupt specifier for irq %d\n",
__func__, irq);
return NO_IRQ;
}
ret = irq_set_chip_data(virq, wcd9xxx_res);
if (ret) {
pr_err("%s: Failed to configure irq %d (%d)\n",
__func__, irq, ret);
return ret;
}
if (wcd9xxx_res->irq_level_high[irq])
irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
handle_level_irq);
else
irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
handle_edge_irq);
irq_set_nested_thread(virq, 1);
}
pr_debug("%s: leave\n", __func__);
return 0;
}
int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
{
int i, ret;
u8 irq_level[wcd9xxx_res->num_irq_regs];
mutex_init(&wcd9xxx_res->irq_lock);
mutex_init(&wcd9xxx_res->nested_irq_lock);
wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res);
if (!wcd9xxx_res->irq) {
pr_warn("%s: irq driver is not yet initialized\n", __func__);
mutex_destroy(&wcd9xxx_res->irq_lock);
mutex_destroy(&wcd9xxx_res->nested_irq_lock);
return -EPROBE_DEFER;
}
pr_debug("%s: probed irq %d\n", __func__, wcd9xxx_res->irq);
/* Setup downstream IRQs */
ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx_res);
if (ret) {
pr_err("%s: Failed to setup downstream IRQ\n", __func__);
wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
mutex_destroy(&wcd9xxx_res->irq_lock);
mutex_destroy(&wcd9xxx_res->nested_irq_lock);
return ret;
}
/* All other wcd9xxx interrupts are edge triggered */
wcd9xxx_res->irq_level_high[0] = true;
/* mask all the interrupts */
memset(irq_level, 0, wcd9xxx_res->num_irq_regs);
for (i = 0; i < wcd9xxx_res->num_irqs; i++) {
wcd9xxx_res->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
wcd9xxx_res->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
irq_level[BIT_BYTE(i)] |=
wcd9xxx_res->irq_level_high[i] << (i % BITS_PER_BYTE);
}
if (!wcd9xxx_res->codec_reg_write) {
dev_err(wcd9xxx_res->dev,
"%s: Codec Register write callback not defined\n",
__func__);
ret = -EINVAL;
goto fail_irq_init;
}
for (i = 0; i < wcd9xxx_res->num_irq_regs; i++) {
/* Initialize interrupt mask and level registers */
wcd9xxx_res->codec_reg_write(wcd9xxx_res,
WCD9XXX_A_INTR_LEVEL0 + i,
irq_level[i]);
wcd9xxx_res->codec_reg_write(wcd9xxx_res,
WCD9XXX_A_INTR_MASK0 + i,
wcd9xxx_res->irq_masks_cur[i]);
}
ret = request_threaded_irq(wcd9xxx_res->irq, NULL, wcd9xxx_irq_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"wcd9xxx", wcd9xxx_res);
if (ret != 0)
dev_err(wcd9xxx_res->dev, "Failed to request IRQ %d: %d\n",
wcd9xxx_res->irq, ret);
else {
ret = enable_irq_wake(wcd9xxx_res->irq);
if (ret)
dev_err(wcd9xxx_res->dev,
"Failed to set wake interrupt on IRQ %d: %d\n",
wcd9xxx_res->irq, ret);
if (ret)
free_irq(wcd9xxx_res->irq, wcd9xxx_res);
}
if (ret)
goto fail_irq_init;
return ret;
fail_irq_init:
dev_err(wcd9xxx_res->dev,
"%s: Failed to init wcd9xxx irq\n", __func__);
wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
mutex_destroy(&wcd9xxx_res->irq_lock);
mutex_destroy(&wcd9xxx_res->nested_irq_lock);
return ret;
}
int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
int irq, irq_handler_t handler,
const char *name, void *data)
{
int virq;
virq = phyirq_to_virq(wcd9xxx_res, irq);
/*
* ARM needs us to explicitly flag the IRQ as valid
* and will set them noprobe when we do so.
*/
#ifdef CONFIG_ARM
set_irq_flags(virq, IRQF_VALID);
#else
set_irq_noprobe(virq);
#endif
return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
name, data);
}
void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
{
dev_dbg(wcd9xxx_res->dev, "%s: Cleaning up irq %d\n", __func__,
wcd9xxx_res->irq);
if (wcd9xxx_res->irq) {
disable_irq_wake(wcd9xxx_res->irq);
free_irq(wcd9xxx_res->irq, wcd9xxx_res);
/* Release parent's of node */
wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
}
mutex_destroy(&wcd9xxx_res->irq_lock);
mutex_destroy(&wcd9xxx_res->nested_irq_lock);
}
#ifndef CONFIG_OF
static int phyirq_to_virq(
struct wcd9xxx_core_resource *wcd9xxx_res,
int offset)
{
return wcd9xxx_res->irq_base + offset;
}
static int virq_to_phyirq(
struct wcd9xxx_core_resource *wcd9xxx_res,
int virq)
{
return virq - wcd9xxx_res->irq_base;
}
static unsigned int wcd9xxx_irq_get_upstream_irq(
struct wcd9xxx_core_resource *wcd9xxx_res)
{
return wcd9xxx_res->irq;
}
static void wcd9xxx_irq_put_upstream_irq(
struct wcd9xxx_core_resource *wcd9xxx_res)
{
/* Do nothing */
}
static int wcd9xxx_map_irq(
struct wcd9xxx_core_resource *wcd9xxx_core_res, int irq)
{
return phyirq_to_virq(wcd9xxx_core_res, irq);
}
#else
int __init wcd9xxx_irq_of_init(struct device_node *node,
struct device_node *parent)
{
struct wcd9xxx_irq_drv_data *data;
pr_debug("%s: node %s, node parent %s\n", __func__,
node->name, node->parent->name);
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
/*
* wcd9xxx_intc interrupt controller supports N to N irq mapping with
* single cell binding with irq numbers(offsets) only.
* Use irq_domain_simple_ops that has irq_domain_simple_map and
* irq_domain_xlate_onetwocell.
*/
data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS,
&irq_domain_simple_ops, data);
if (!data->domain) {
kfree(data);
return -ENOMEM;
}
return 0;
}
static struct wcd9xxx_irq_drv_data *
wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res)
{
struct device_node *pnode;
struct irq_domain *domain;
pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
/* Shouldn't happen */
if (unlikely(!pnode))
return NULL;
domain = irq_find_host(pnode);
if (unlikely(!domain))
return NULL;
return (struct wcd9xxx_irq_drv_data *)domain->host_data;
}
static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset)
{
struct wcd9xxx_irq_drv_data *data;
data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
if (!data) {
pr_warn("%s: not registered to interrupt controller\n",
__func__);
return -EINVAL;
}
return irq_linear_revmap(data->domain, offset);
}
static int virq_to_phyirq(struct wcd9xxx_core_resource *wcd9xxx_res, int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
if (unlikely(!irq_data)) {
pr_err("%s: irq_data is NULL", __func__);
return -EINVAL;
}
return irq_data->hwirq;
}
static unsigned int wcd9xxx_irq_get_upstream_irq(
struct wcd9xxx_core_resource *wcd9xxx_res)
{
struct wcd9xxx_irq_drv_data *data;
/* Hold parent's of node */
if (!of_node_get(of_irq_find_parent(wcd9xxx_res->dev->of_node)))
return -EINVAL;
data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
if (!data) {
pr_err("%s: interrupt controller is not registerd\n", __func__);
return 0;
}
rmb();
return data->irq;
}
static void wcd9xxx_irq_put_upstream_irq(
struct wcd9xxx_core_resource *wcd9xxx_res)
{
/* Hold parent's of node */
of_node_put(of_irq_find_parent(wcd9xxx_res->dev->of_node));
}
static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
{
return of_irq_to_resource(wcd9xxx_res->dev->of_node, irq, NULL);
}
static int wcd9xxx_irq_probe(struct platform_device *pdev)
{
int irq;
struct irq_domain *domain;
struct wcd9xxx_irq_drv_data *data;
int ret = -EINVAL;
irq = platform_get_irq_byname(pdev, "cdc-int");
if (irq < 0) {
dev_err(&pdev->dev, "%s: Couldn't find cdc-int node(%d)\n",
__func__, irq);
return -EINVAL;
} else {
dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
domain = irq_find_host(pdev->dev.of_node);
if (unlikely(!domain)) {
pr_err("%s: domain is NULL", __func__);
return -EINVAL;
}
data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
data->irq = irq;
wmb();
ret = 0;
}
return ret;
}
static int wcd9xxx_irq_remove(struct platform_device *pdev)
{
struct irq_domain *domain;
struct wcd9xxx_irq_drv_data *data;
domain = irq_find_host(pdev->dev.of_node);
if (unlikely(!domain)) {
pr_err("%s: domain is NULL", __func__);
return -EINVAL;
}
data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
data->irq = 0;
wmb();
return 0;
}
static const struct of_device_id of_match[] = {
{ .compatible = "qcom,wcd9xxx-irq" },
{ }
};
static struct platform_driver wcd9xxx_irq_driver = {
.probe = wcd9xxx_irq_probe,
.remove = wcd9xxx_irq_remove,
.driver = {
.name = "wcd9xxx_intc",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(of_match),
},
};
static int wcd9xxx_irq_drv_init(void)
{
return platform_driver_register(&wcd9xxx_irq_driver);
}
subsys_initcall(wcd9xxx_irq_drv_init);
static void wcd9xxx_irq_drv_exit(void)
{
platform_driver_unregister(&wcd9xxx_irq_driver);
}
module_exit(wcd9xxx_irq_drv_exit);
#endif /* CONFIG_OF */
| gpl-2.0 |
cwhuang/linux | fs/gfs2/ops_fstype.c | 79 | 36152 | /*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/export.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/gfs2_ondisk.h>
#include <linux/quotaops.h>
#include <linux/lockdep.h>
#include <linux/module.h>
#include "gfs2.h"
#include "incore.h"
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "recovery.h"
#include "rgrp.h"
#include "super.h"
#include "sys.h"
#include "util.h"
#include "log.h"
#include "quota.h"
#include "dir.h"
#include "meta_io.h"
#include "trace_gfs2.h"
#define DO 0
#define UNDO 1
/**
* gfs2_tune_init - Fill a gfs2_tune structure with default values
* @gt: tune
*
*/
static void gfs2_tune_init(struct gfs2_tune *gt)
{
spin_lock_init(>->gt_spin);
gt->gt_quota_warn_period = 10;
gt->gt_quota_scale_num = 1;
gt->gt_quota_scale_den = 1;
gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = BIT(18);
gt->gt_complain_secs = 10;
}
static struct gfs2_sbd *init_sbd(struct super_block *sb)
{
struct gfs2_sbd *sdp;
struct address_space *mapping;
sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
if (!sdp)
return NULL;
sb->s_fs_info = sdp;
sdp->sd_vfs = sb;
sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
if (!sdp->sd_lkstats) {
kfree(sdp);
return NULL;
}
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
gfs2_tune_init(&sdp->sd_tune);
init_waitqueue_head(&sdp->sd_glock_wait);
atomic_set(&sdp->sd_glock_disposal, 0);
init_completion(&sdp->sd_locking_init);
init_completion(&sdp->sd_wdack);
spin_lock_init(&sdp->sd_statfs_spin);
spin_lock_init(&sdp->sd_rindex_spin);
sdp->sd_rindex_tree.rb_node = NULL;
INIT_LIST_HEAD(&sdp->sd_jindex_list);
spin_lock_init(&sdp->sd_jindex_spin);
mutex_init(&sdp->sd_jindex_mutex);
init_completion(&sdp->sd_journal_ready);
INIT_LIST_HEAD(&sdp->sd_quota_list);
mutex_init(&sdp->sd_quota_mutex);
mutex_init(&sdp->sd_quota_sync_mutex);
init_waitqueue_head(&sdp->sd_quota_wait);
INIT_LIST_HEAD(&sdp->sd_trunc_list);
spin_lock_init(&sdp->sd_trunc_lock);
spin_lock_init(&sdp->sd_bitmap_lock);
mapping = &sdp->sd_aspace;
address_space_init_once(mapping);
mapping->a_ops = &gfs2_rgrp_aops;
mapping->host = sb->s_bdev->bd_inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->private_data = NULL;
mapping->writeback_index = 0;
spin_lock_init(&sdp->sd_log_lock);
atomic_set(&sdp->sd_log_pinned, 0);
INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
INIT_LIST_HEAD(&sdp->sd_log_le_ordered);
spin_lock_init(&sdp->sd_ordered_lock);
init_waitqueue_head(&sdp->sd_log_waitq);
init_waitqueue_head(&sdp->sd_logd_waitq);
spin_lock_init(&sdp->sd_ail_lock);
INIT_LIST_HEAD(&sdp->sd_ail1_list);
INIT_LIST_HEAD(&sdp->sd_ail2_list);
init_rwsem(&sdp->sd_log_flush_lock);
atomic_set(&sdp->sd_log_in_flight, 0);
atomic_set(&sdp->sd_reserving_log, 0);
init_waitqueue_head(&sdp->sd_reserving_log_wait);
init_waitqueue_head(&sdp->sd_log_flush_wait);
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
mutex_init(&sdp->sd_freeze_mutex);
return sdp;
}
/**
* gfs2_check_sb - Check superblock
* @sdp: the filesystem
* @sb: The superblock
* @silent: Don't print a message if the check fails
*
* Checks the version code of the FS is one that we understand how to
* read and that the sizes of the various on-disk structures have not
* changed.
*/
static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
{
struct gfs2_sb_host *sb = &sdp->sd_sb;
if (sb->sb_magic != GFS2_MAGIC ||
sb->sb_type != GFS2_METATYPE_SB) {
if (!silent)
pr_warn("not a GFS2 filesystem\n");
return -EINVAL;
}
/* If format numbers match exactly, we're done. */
if (sb->sb_fs_format == GFS2_FORMAT_FS &&
sb->sb_multihost_format == GFS2_FORMAT_MULTI)
return 0;
fs_warn(sdp, "Unknown on-disk format, unable to mount\n");
return -EINVAL;
}
static void end_bio_io_page(struct bio *bio)
{
struct page *page = bio->bi_private;
if (!bio->bi_error)
SetPageUptodate(page);
else
pr_warn("error %d reading superblock\n", bio->bi_error);
unlock_page(page);
}
static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
{
struct gfs2_sb_host *sb = &sdp->sd_sb;
struct super_block *s = sdp->sd_vfs;
const struct gfs2_sb *str = buf;
sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
sb->sb_format = be32_to_cpu(str->sb_header.mh_format);
sb->sb_fs_format = be32_to_cpu(str->sb_fs_format);
sb->sb_multihost_format = be32_to_cpu(str->sb_multihost_format);
sb->sb_bsize = be32_to_cpu(str->sb_bsize);
sb->sb_bsize_shift = be32_to_cpu(str->sb_bsize_shift);
sb->sb_master_dir.no_addr = be64_to_cpu(str->sb_master_dir.no_addr);
sb->sb_master_dir.no_formal_ino = be64_to_cpu(str->sb_master_dir.no_formal_ino);
sb->sb_root_dir.no_addr = be64_to_cpu(str->sb_root_dir.no_addr);
sb->sb_root_dir.no_formal_ino = be64_to_cpu(str->sb_root_dir.no_formal_ino);
memcpy(sb->sb_lockproto, str->sb_lockproto, GFS2_LOCKNAME_LEN);
memcpy(sb->sb_locktable, str->sb_locktable, GFS2_LOCKNAME_LEN);
memcpy(s->s_uuid, str->sb_uuid, 16);
}
/**
* gfs2_read_super - Read the gfs2 super block from disk
* @sdp: The GFS2 super block
* @sector: The location of the super block
* @error: The error code to return
*
* This uses the bio functions to read the super block from disk
* because we want to be 100% sure that we never read cached data.
* A super block is read twice only during each GFS2 mount and is
* never written to by the filesystem. The first time its read no
* locks are held, and the only details which are looked at are those
* relating to the locking protocol. Once locking is up and working,
* the sb is read again under the lock to establish the location of
* the master directory (contains pointers to journals etc) and the
* root directory.
*
* Returns: 0 on success or error
*/
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
{
struct super_block *sb = sdp->sd_vfs;
struct gfs2_sb *p;
struct page *page;
struct bio *bio;
page = alloc_page(GFP_NOFS);
if (unlikely(!page))
return -ENOMEM;
ClearPageUptodate(page);
ClearPageDirty(page);
lock_page(page);
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
bio->bi_bdev = sb->s_bdev;
bio_add_page(bio, page, PAGE_SIZE, 0);
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META);
submit_bio(bio);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {
__free_page(page);
return -EIO;
}
p = kmap(page);
gfs2_sb_in(sdp, p);
kunmap(page);
__free_page(page);
return gfs2_check_sb(sdp, silent);
}
/**
* gfs2_read_sb - Read super block
* @sdp: The GFS2 superblock
* @silent: Don't print message if mount fails
*
*/
static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
{
u32 hash_blocks, ind_blocks, leaf_blocks;
u32 tmp_blocks;
unsigned int x;
int error;
error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
if (error) {
if (!silent)
fs_err(sdp, "can't read superblock\n");
return error;
}
sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
GFS2_BASIC_BLOCK_SHIFT;
sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_dinode)) / sizeof(u64);
sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_meta_header)) / sizeof(u64);
sdp->sd_jbsize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header);
sdp->sd_hash_bsize = sdp->sd_sb.sb_bsize / 2;
sdp->sd_hash_bsize_shift = sdp->sd_sb.sb_bsize_shift - 1;
sdp->sd_hash_ptrs = sdp->sd_hash_bsize / sizeof(u64);
sdp->sd_qc_per_block = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_meta_header)) /
sizeof(struct gfs2_quota_change);
sdp->sd_blocks_per_bitmap = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_meta_header))
* GFS2_NBBY; /* not the rgrp bitmap, subsequent bitmaps only */
/* Compute maximum reservation required to add a entry to a directory */
hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
sdp->sd_jbsize);
ind_blocks = 0;
for (tmp_blocks = hash_blocks; tmp_blocks > sdp->sd_diptrs;) {
tmp_blocks = DIV_ROUND_UP(tmp_blocks, sdp->sd_inptrs);
ind_blocks += tmp_blocks;
}
leaf_blocks = 2 + GFS2_DIR_MAX_DEPTH;
sdp->sd_max_dirres = hash_blocks + ind_blocks + leaf_blocks;
sdp->sd_heightsize[0] = sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_dinode);
sdp->sd_heightsize[1] = sdp->sd_sb.sb_bsize * sdp->sd_diptrs;
for (x = 2;; x++) {
u64 space, d;
u32 m;
space = sdp->sd_heightsize[x - 1] * sdp->sd_inptrs;
d = space;
m = do_div(d, sdp->sd_inptrs);
if (d != sdp->sd_heightsize[x - 1] || m)
break;
sdp->sd_heightsize[x] = space;
}
sdp->sd_max_height = x;
sdp->sd_heightsize[x] = ~0;
gfs2_assert(sdp, sdp->sd_max_height <= GFS2_MAX_META_HEIGHT);
sdp->sd_jheightsize[0] = sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_dinode);
sdp->sd_jheightsize[1] = sdp->sd_jbsize * sdp->sd_diptrs;
for (x = 2;; x++) {
u64 space, d;
u32 m;
space = sdp->sd_jheightsize[x - 1] * sdp->sd_inptrs;
d = space;
m = do_div(d, sdp->sd_inptrs);
if (d != sdp->sd_jheightsize[x - 1] || m)
break;
sdp->sd_jheightsize[x] = space;
}
sdp->sd_max_jheight = x;
sdp->sd_jheightsize[x] = ~0;
gfs2_assert(sdp, sdp->sd_max_jheight <= GFS2_MAX_META_HEIGHT);
sdp->sd_max_dents_per_leaf = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_leaf)) /
GFS2_MIN_DIRENT_SIZE;
return 0;
}
static int init_names(struct gfs2_sbd *sdp, int silent)
{
char *proto, *table;
int error = 0;
proto = sdp->sd_args.ar_lockproto;
table = sdp->sd_args.ar_locktable;
/* Try to autodetect */
if (!proto[0] || !table[0]) {
error = gfs2_read_super(sdp, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift, silent);
if (error)
return error;
if (!proto[0])
proto = sdp->sd_sb.sb_lockproto;
if (!table[0])
table = sdp->sd_sb.sb_locktable;
}
if (!table[0])
table = sdp->sd_vfs->s_id;
strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
table = sdp->sd_table_name;
while ((table = strchr(table, '/')))
*table = '_';
return error;
}
static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
int undo)
{
int error = 0;
if (undo)
goto fail_trans;
error = gfs2_glock_nq_num(sdp,
GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
mount_gh);
if (error) {
fs_err(sdp, "can't acquire mount glock: %d\n", error);
goto fail;
}
error = gfs2_glock_nq_num(sdp,
GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
LM_ST_SHARED,
LM_FLAG_NOEXP | GL_EXACT,
&sdp->sd_live_gh);
if (error) {
fs_err(sdp, "can't acquire live glock: %d\n", error);
goto fail_mount;
}
error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
CREATE, &sdp->sd_rename_gl);
if (error) {
fs_err(sdp, "can't create rename glock: %d\n", error);
goto fail_live;
}
error = gfs2_glock_get(sdp, GFS2_FREEZE_LOCK, &gfs2_freeze_glops,
CREATE, &sdp->sd_freeze_gl);
if (error) {
fs_err(sdp, "can't create transaction glock: %d\n", error);
goto fail_rename;
}
return 0;
fail_trans:
gfs2_glock_put(sdp->sd_freeze_gl);
fail_rename:
gfs2_glock_put(sdp->sd_rename_gl);
fail_live:
gfs2_glock_dq_uninit(&sdp->sd_live_gh);
fail_mount:
gfs2_glock_dq_uninit(mount_gh);
fail:
return error;
}
static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
u64 no_addr, const char *name)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct dentry *dentry;
struct inode *inode;
inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
GFS2_BLKST_FREE /* ignore */);
if (IS_ERR(inode)) {
fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
return PTR_ERR(inode);
}
dentry = d_make_root(inode);
if (!dentry) {
fs_err(sdp, "can't alloc %s dentry\n", name);
return -ENOMEM;
}
*dptr = dentry;
return 0;
}
static int init_sb(struct gfs2_sbd *sdp, int silent)
{
struct super_block *sb = sdp->sd_vfs;
struct gfs2_holder sb_gh;
u64 no_addr;
int ret;
ret = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
LM_ST_SHARED, 0, &sb_gh);
if (ret) {
fs_err(sdp, "can't acquire superblock glock: %d\n", ret);
return ret;
}
ret = gfs2_read_sb(sdp, silent);
if (ret) {
fs_err(sdp, "can't read superblock: %d\n", ret);
goto out;
}
/* Set up the buffer cache and SB for real */
if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) {
ret = -EINVAL;
fs_err(sdp, "FS block size (%u) is too small for device "
"block size (%u)\n",
sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev));
goto out;
}
if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
ret = -EINVAL;
fs_err(sdp, "FS block size (%u) is too big for machine "
"page size (%u)\n",
sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
goto out;
}
sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
/* Get the root inode */
no_addr = sdp->sd_sb.sb_root_dir.no_addr;
ret = gfs2_lookup_root(sb, &sdp->sd_root_dir, no_addr, "root");
if (ret)
goto out;
/* Get the master inode */
no_addr = sdp->sd_sb.sb_master_dir.no_addr;
ret = gfs2_lookup_root(sb, &sdp->sd_master_dir, no_addr, "master");
if (ret) {
dput(sdp->sd_root_dir);
goto out;
}
sb->s_root = dget(sdp->sd_args.ar_meta ? sdp->sd_master_dir : sdp->sd_root_dir);
out:
gfs2_glock_dq_uninit(&sb_gh);
return ret;
}
static void gfs2_others_may_mount(struct gfs2_sbd *sdp)
{
char *message = "FIRSTMOUNT=Done";
char *envp[] = { message, NULL };
fs_info(sdp, "first mount done, others may mount\n");
if (sdp->sd_lockstruct.ls_ops->lm_first_done)
sdp->sd_lockstruct.ls_ops->lm_first_done(sdp);
kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
}
/**
* gfs2_jindex_hold - Grab a lock on the jindex
* @sdp: The GFS2 superblock
* @ji_gh: the holder for the jindex glock
*
* Returns: errno
*/
static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
{
struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex);
struct qstr name;
char buf[20];
struct gfs2_jdesc *jd;
int error;
name.name = buf;
mutex_lock(&sdp->sd_jindex_mutex);
for (;;) {
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
if (error)
break;
name.len = sprintf(buf, "journal%u", sdp->sd_journals);
name.hash = gfs2_disk_hash(name.name, name.len);
error = gfs2_dir_check(sdp->sd_jindex, &name, NULL);
if (error == -ENOENT) {
error = 0;
break;
}
gfs2_glock_dq_uninit(ji_gh);
if (error)
break;
error = -ENOMEM;
jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL);
if (!jd)
break;
INIT_LIST_HEAD(&jd->extent_list);
INIT_LIST_HEAD(&jd->jd_revoke_list);
INIT_WORK(&jd->jd_work, gfs2_recover_func);
jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1);
if (!jd->jd_inode || IS_ERR(jd->jd_inode)) {
if (!jd->jd_inode)
error = -ENOENT;
else
error = PTR_ERR(jd->jd_inode);
kfree(jd);
break;
}
spin_lock(&sdp->sd_jindex_spin);
jd->jd_jid = sdp->sd_journals++;
list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
spin_unlock(&sdp->sd_jindex_spin);
}
mutex_unlock(&sdp->sd_jindex_mutex);
return error;
}
/**
* check_journal_clean - Make sure a journal is clean for a spectator mount
* @sdp: The GFS2 superblock
* @jd: The journal descriptor
*
* Returns: 0 if the journal is clean or locked, else an error
*/
static int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
{
int error;
struct gfs2_holder j_gh;
struct gfs2_log_header_host head;
struct gfs2_inode *ip;
ip = GFS2_I(jd->jd_inode);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP |
GL_EXACT | GL_NOCACHE, &j_gh);
if (error) {
fs_err(sdp, "Error locking journal for spectator mount.\n");
return -EPERM;
}
error = gfs2_jdesc_check(jd);
if (error) {
fs_err(sdp, "Error checking journal for spectator mount.\n");
goto out_unlock;
}
error = gfs2_find_jhead(jd, &head);
if (error) {
fs_err(sdp, "Error parsing journal for spectator mount.\n");
goto out_unlock;
}
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
error = -EPERM;
fs_err(sdp, "jid=%u: Journal is dirty, so the first mounter "
"must not be a spectator.\n", jd->jd_jid);
}
out_unlock:
gfs2_glock_dq_uninit(&j_gh);
return error;
}
static int init_journal(struct gfs2_sbd *sdp, int undo)
{
struct inode *master = d_inode(sdp->sd_master_dir);
struct gfs2_holder ji_gh;
struct gfs2_inode *ip;
int jindex = 1;
int error = 0;
if (undo) {
jindex = 0;
goto fail_jinode_gh;
}
sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
if (IS_ERR(sdp->sd_jindex)) {
fs_err(sdp, "can't lookup journal index: %d\n", error);
return PTR_ERR(sdp->sd_jindex);
}
/* Load in the journal index special file */
error = gfs2_jindex_hold(sdp, &ji_gh);
if (error) {
fs_err(sdp, "can't read journal index: %d\n", error);
goto fail;
}
error = -EUSERS;
if (!gfs2_jindex_size(sdp)) {
fs_err(sdp, "no journals!\n");
goto fail_jindex;
}
if (sdp->sd_args.ar_spectator) {
sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
} else {
if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
fs_err(sdp, "can't mount journal #%u\n",
sdp->sd_lockstruct.ls_jid);
fs_err(sdp, "there are only %u journals (0 - %u)\n",
gfs2_jindex_size(sdp),
gfs2_jindex_size(sdp) - 1);
goto fail_jindex;
}
sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
&gfs2_journal_glops,
LM_ST_EXCLUSIVE, LM_FLAG_NOEXP,
&sdp->sd_journal_gh);
if (error) {
fs_err(sdp, "can't acquire journal glock: %d\n", error);
goto fail_jindex;
}
ip = GFS2_I(sdp->sd_jdesc->jd_inode);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
&sdp->sd_jinode_gh);
if (error) {
fs_err(sdp, "can't acquire journal inode glock: %d\n",
error);
goto fail_journal_gh;
}
error = gfs2_jdesc_check(sdp->sd_jdesc);
if (error) {
fs_err(sdp, "my journal (%u) is bad: %d\n",
sdp->sd_jdesc->jd_jid, error);
goto fail_jinode_gh;
}
atomic_set(&sdp->sd_log_blks_free, sdp->sd_jdesc->jd_blocks);
atomic_set(&sdp->sd_log_thresh1, 2*sdp->sd_jdesc->jd_blocks/5);
atomic_set(&sdp->sd_log_thresh2, 4*sdp->sd_jdesc->jd_blocks/5);
/* Map the extents for this journal's blocks */
gfs2_map_journal_extents(sdp, sdp->sd_jdesc);
}
trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free));
if (sdp->sd_lockstruct.ls_first) {
unsigned int x;
for (x = 0; x < sdp->sd_journals; x++) {
struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x);
if (sdp->sd_args.ar_spectator) {
error = check_journal_clean(sdp, jd);
if (error)
goto fail_jinode_gh;
continue;
}
error = gfs2_recover_journal(jd, true);
if (error) {
fs_err(sdp, "error recovering journal %u: %d\n",
x, error);
goto fail_jinode_gh;
}
}
gfs2_others_may_mount(sdp);
} else if (!sdp->sd_args.ar_spectator) {
error = gfs2_recover_journal(sdp->sd_jdesc, true);
if (error) {
fs_err(sdp, "error recovering my journal: %d\n", error);
goto fail_jinode_gh;
}
}
sdp->sd_log_idle = 1;
set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
gfs2_glock_dq_uninit(&ji_gh);
jindex = 0;
INIT_WORK(&sdp->sd_freeze_work, gfs2_freeze_func);
return 0;
fail_jinode_gh:
if (!sdp->sd_args.ar_spectator)
gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
fail_journal_gh:
if (!sdp->sd_args.ar_spectator)
gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
fail_jindex:
gfs2_jindex_free(sdp);
if (jindex)
gfs2_glock_dq_uninit(&ji_gh);
fail:
iput(sdp->sd_jindex);
return error;
}
static struct lock_class_key gfs2_quota_imutex_key;
static int init_inodes(struct gfs2_sbd *sdp, int undo)
{
int error = 0;
struct inode *master = d_inode(sdp->sd_master_dir);
if (undo)
goto fail_qinode;
error = init_journal(sdp, undo);
complete_all(&sdp->sd_journal_ready);
if (error)
goto fail;
/* Read in the master statfs inode */
sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
if (IS_ERR(sdp->sd_statfs_inode)) {
error = PTR_ERR(sdp->sd_statfs_inode);
fs_err(sdp, "can't read in statfs inode: %d\n", error);
goto fail_journal;
}
/* Read in the resource index inode */
sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
if (IS_ERR(sdp->sd_rindex)) {
error = PTR_ERR(sdp->sd_rindex);
fs_err(sdp, "can't get resource index inode: %d\n", error);
goto fail_statfs;
}
sdp->sd_rindex_uptodate = 0;
/* Read in the quota inode */
sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
if (IS_ERR(sdp->sd_quota_inode)) {
error = PTR_ERR(sdp->sd_quota_inode);
fs_err(sdp, "can't get quota file inode: %d\n", error);
goto fail_rindex;
}
/*
* i_mutex on quota files is special. Since this inode is hidden system
* file, we are safe to define locking ourselves.
*/
lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,
&gfs2_quota_imutex_key);
error = gfs2_rindex_update(sdp);
if (error)
goto fail_qinode;
return 0;
fail_qinode:
iput(sdp->sd_quota_inode);
fail_rindex:
gfs2_clear_rgrpd(sdp);
iput(sdp->sd_rindex);
fail_statfs:
iput(sdp->sd_statfs_inode);
fail_journal:
init_journal(sdp, UNDO);
fail:
return error;
}
static int init_per_node(struct gfs2_sbd *sdp, int undo)
{
struct inode *pn = NULL;
char buf[30];
int error = 0;
struct gfs2_inode *ip;
struct inode *master = d_inode(sdp->sd_master_dir);
if (sdp->sd_args.ar_spectator)
return 0;
if (undo)
goto fail_qc_gh;
pn = gfs2_lookup_simple(master, "per_node");
if (IS_ERR(pn)) {
error = PTR_ERR(pn);
fs_err(sdp, "can't find per_node directory: %d\n", error);
return error;
}
sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid);
sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf);
if (IS_ERR(sdp->sd_sc_inode)) {
error = PTR_ERR(sdp->sd_sc_inode);
fs_err(sdp, "can't find local \"sc\" file: %d\n", error);
goto fail;
}
sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
if (IS_ERR(sdp->sd_qc_inode)) {
error = PTR_ERR(sdp->sd_qc_inode);
fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
goto fail_ut_i;
}
iput(pn);
pn = NULL;
ip = GFS2_I(sdp->sd_sc_inode);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
&sdp->sd_sc_gh);
if (error) {
fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
goto fail_qc_i;
}
ip = GFS2_I(sdp->sd_qc_inode);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0,
&sdp->sd_qc_gh);
if (error) {
fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
goto fail_ut_gh;
}
return 0;
fail_qc_gh:
gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
fail_ut_gh:
gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
fail_qc_i:
iput(sdp->sd_qc_inode);
fail_ut_i:
iput(sdp->sd_sc_inode);
fail:
iput(pn);
return error;
}
static const match_table_t nolock_tokens = {
{ Opt_jid, "jid=%d\n", },
{ Opt_err, NULL },
};
static const struct lm_lockops nolock_ops = {
.lm_proto_name = "lock_nolock",
.lm_put_lock = gfs2_glock_free,
.lm_tokens = &nolock_tokens,
};
/**
* gfs2_lm_mount - mount a locking protocol
* @sdp: the filesystem
* @args: mount arguments
* @silent: if 1, don't complain if the FS isn't a GFS2 fs
*
* Returns: errno
*/
static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
{
const struct lm_lockops *lm;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
struct gfs2_args *args = &sdp->sd_args;
const char *proto = sdp->sd_proto_name;
const char *table = sdp->sd_table_name;
char *o, *options;
int ret;
if (!strcmp("lock_nolock", proto)) {
lm = &nolock_ops;
sdp->sd_args.ar_localflocks = 1;
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
} else if (!strcmp("lock_dlm", proto)) {
lm = &gfs2_dlm_ops;
#endif
} else {
pr_info("can't find protocol %s\n", proto);
return -ENOENT;
}
fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
ls->ls_ops = lm;
ls->ls_first = 1;
for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) {
substring_t tmp[MAX_OPT_ARGS];
int token, option;
if (!o || !*o)
continue;
token = match_token(o, *lm->lm_tokens, tmp);
switch (token) {
case Opt_jid:
ret = match_int(&tmp[0], &option);
if (ret || option < 0)
goto hostdata_error;
if (test_and_clear_bit(SDF_NOJOURNALID, &sdp->sd_flags))
ls->ls_jid = option;
break;
case Opt_id:
case Opt_nodir:
/* Obsolete, but left for backward compat purposes */
break;
case Opt_first:
ret = match_int(&tmp[0], &option);
if (ret || (option != 0 && option != 1))
goto hostdata_error;
ls->ls_first = option;
break;
case Opt_err:
default:
hostdata_error:
fs_info(sdp, "unknown hostdata (%s)\n", o);
return -EINVAL;
}
}
if (lm->lm_mount == NULL) {
fs_info(sdp, "Now mounting FS...\n");
complete_all(&sdp->sd_locking_init);
return 0;
}
ret = lm->lm_mount(sdp, table);
if (ret == 0)
fs_info(sdp, "Joined cluster. Now mounting FS...\n");
complete_all(&sdp->sd_locking_init);
return ret;
}
void gfs2_lm_unmount(struct gfs2_sbd *sdp)
{
const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
lm->lm_unmount)
lm->lm_unmount(sdp);
}
static int wait_on_journal(struct gfs2_sbd *sdp)
{
if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
return 0;
return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE)
? -EINTR : 0;
}
void gfs2_online_uevent(struct gfs2_sbd *sdp)
{
struct super_block *sb = sdp->sd_vfs;
char ro[20];
char spectator[20];
char *envp[] = { ro, spectator, NULL };
sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0);
sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
}
/**
* fill_super - Read in superblock
* @sb: The VFS superblock
* @data: Mount options
* @silent: Don't complain if it's not a GFS2 filesystem
*
* Returns: errno
*/
static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent)
{
struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh;
int error;
sdp = init_sbd(sb);
if (!sdp) {
pr_warn("can't alloc struct gfs2_sbd\n");
return -ENOMEM;
}
sdp->sd_args = *args;
if (sdp->sd_args.ar_spectator) {
sb->s_flags |= MS_RDONLY;
set_bit(SDF_RORECOVERY, &sdp->sd_flags);
}
if (sdp->sd_args.ar_posix_acl)
sb->s_flags |= MS_POSIXACL;
if (sdp->sd_args.ar_nobarrier)
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
sb->s_flags |= MS_NOSEC;
sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops;
sb->s_d_op = &gfs2_dops;
sb->s_export_op = &gfs2_export_ops;
sb->s_xattr = gfs2_xattr_handlers;
sb->s_qcop = &gfs2_quotactl_ops;
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
sb->s_time_gran = 1;
sb->s_maxbytes = MAX_LFS_FILESIZE;
/* Set up the buffer cache and fill in some fake block size values
to allow us to read-in the on-disk superblock. */
sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
GFS2_BASIC_BLOCK_SHIFT;
sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
if (sdp->sd_args.ar_statfs_quantum) {
sdp->sd_tune.gt_statfs_slow = 0;
sdp->sd_tune.gt_statfs_quantum = sdp->sd_args.ar_statfs_quantum;
} else {
sdp->sd_tune.gt_statfs_slow = 1;
sdp->sd_tune.gt_statfs_quantum = 30;
}
error = init_names(sdp, silent);
if (error) {
/* In this case, we haven't initialized sysfs, so we have to
manually free the sdp. */
free_percpu(sdp->sd_lkstats);
kfree(sdp);
sb->s_fs_info = NULL;
return error;
}
snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s", sdp->sd_table_name);
error = gfs2_sys_fs_add(sdp);
/*
* If we hit an error here, gfs2_sys_fs_add will have called function
* kobject_put which causes the sysfs usage count to go to zero, which
* causes sysfs to call function gfs2_sbd_release, which frees sdp.
* Subsequent error paths here will call gfs2_sys_fs_del, which also
* kobject_put to free sdp.
*/
if (error)
return error;
gfs2_create_debugfs_file(sdp);
error = gfs2_lm_mount(sdp, silent);
if (error)
goto fail_debug;
error = init_locking(sdp, &mount_gh, DO);
if (error)
goto fail_lm;
error = init_sb(sdp, silent);
if (error)
goto fail_locking;
error = wait_on_journal(sdp);
if (error)
goto fail_sb;
/*
* If user space has failed to join the cluster or some similar
* failure has occurred, then the journal id will contain a
* negative (error) number. This will then be returned to the
* caller (of the mount syscall). We do this even for spectator
* mounts (which just write a jid of 0 to indicate "ok" even though
* the jid is unused in the spectator case)
*/
if (sdp->sd_lockstruct.ls_jid < 0) {
error = sdp->sd_lockstruct.ls_jid;
sdp->sd_lockstruct.ls_jid = 0;
goto fail_sb;
}
if (sdp->sd_args.ar_spectator)
snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s",
sdp->sd_table_name);
else
snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u",
sdp->sd_table_name, sdp->sd_lockstruct.ls_jid);
error = init_inodes(sdp, DO);
if (error)
goto fail_sb;
error = init_per_node(sdp, DO);
if (error)
goto fail_inodes;
error = gfs2_statfs_init(sdp);
if (error) {
fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
goto fail_per_node;
}
if (!(sb->s_flags & MS_RDONLY)) {
error = gfs2_make_fs_rw(sdp);
if (error) {
fs_err(sdp, "can't make FS RW: %d\n", error);
goto fail_per_node;
}
}
gfs2_glock_dq_uninit(&mount_gh);
gfs2_online_uevent(sdp);
return 0;
fail_per_node:
init_per_node(sdp, UNDO);
fail_inodes:
init_inodes(sdp, UNDO);
fail_sb:
if (sdp->sd_root_dir)
dput(sdp->sd_root_dir);
if (sdp->sd_master_dir)
dput(sdp->sd_master_dir);
if (sb->s_root)
dput(sb->s_root);
sb->s_root = NULL;
fail_locking:
init_locking(sdp, &mount_gh, UNDO);
fail_lm:
complete_all(&sdp->sd_journal_ready);
gfs2_gl_hash_clear(sdp);
gfs2_lm_unmount(sdp);
fail_debug:
gfs2_delete_debugfs_file(sdp);
free_percpu(sdp->sd_lkstats);
/* gfs2_sys_fs_del must be the last thing we do, since it causes
* sysfs to call function gfs2_sbd_release, which frees sdp. */
gfs2_sys_fs_del(sdp);
sb->s_fs_info = NULL;
return error;
}
static int set_gfs2_super(struct super_block *s, void *data)
{
s->s_bdev = data;
s->s_dev = s->s_bdev->bd_dev;
/*
* We set the bdi here to the queue backing, file systems can
* overwrite this in ->fill_super()
*/
s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
return 0;
}
static int test_gfs2_super(struct super_block *s, void *ptr)
{
struct block_device *bdev = ptr;
return (bdev == s->s_bdev);
}
/**
* gfs2_mount - Get the GFS2 superblock
* @fs_type: The GFS2 filesystem type
* @flags: Mount flags
* @dev_name: The name of the device
* @data: The mount arguments
*
* Q. Why not use get_sb_bdev() ?
* A. We need to select one of two root directories to mount, independent
* of whether this is the initial, or subsequent, mount of this sb
*
* Returns: 0 or -ve on error
*/
static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
struct block_device *bdev;
struct super_block *s;
fmode_t mode = FMODE_READ | FMODE_EXCL;
int error;
struct gfs2_args args;
struct gfs2_sbd *sdp;
if (!(flags & MS_RDONLY))
mode |= FMODE_WRITE;
bdev = blkdev_get_by_path(dev_name, mode, fs_type);
if (IS_ERR(bdev))
return ERR_CAST(bdev);
/*
* once the super is inserted into the list by sget, s_umount
* will protect the lockfs code from trying to start a snapshot
* while we are mounting
*/
mutex_lock(&bdev->bd_fsfreeze_mutex);
if (bdev->bd_fsfreeze_count > 0) {
mutex_unlock(&bdev->bd_fsfreeze_mutex);
error = -EBUSY;
goto error_bdev;
}
s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
error = PTR_ERR(s);
if (IS_ERR(s))
goto error_bdev;
if (s->s_root) {
/*
* s_umount nests inside bd_mutex during
* __invalidate_device(). blkdev_put() acquires
* bd_mutex and can't be called under s_umount. Drop
* s_umount temporarily. This is safe as we're
* holding an active reference.
*/
up_write(&s->s_umount);
blkdev_put(bdev, mode);
down_write(&s->s_umount);
} else {
/* s_mode must be set before deactivate_locked_super calls */
s->s_mode = mode;
}
memset(&args, 0, sizeof(args));
args.ar_quota = GFS2_QUOTA_DEFAULT;
args.ar_data = GFS2_DATA_DEFAULT;
args.ar_commit = 30;
args.ar_statfs_quantum = 30;
args.ar_quota_quantum = 60;
args.ar_errors = GFS2_ERRORS_DEFAULT;
error = gfs2_mount_args(&args, data);
if (error) {
pr_warn("can't parse mount arguments\n");
goto error_super;
}
if (s->s_root) {
error = -EBUSY;
if ((flags ^ s->s_flags) & MS_RDONLY)
goto error_super;
} else {
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
sb_set_blocksize(s, block_size(bdev));
error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
if (error)
goto error_super;
s->s_flags |= MS_ACTIVE;
bdev->bd_super = s;
}
sdp = s->s_fs_info;
if (args.ar_meta)
return dget(sdp->sd_master_dir);
else
return dget(sdp->sd_root_dir);
error_super:
deactivate_locked_super(s);
return ERR_PTR(error);
error_bdev:
blkdev_put(bdev, mode);
return ERR_PTR(error);
}
static int set_meta_super(struct super_block *s, void *ptr)
{
return -EINVAL;
}
static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
struct super_block *s;
struct gfs2_sbd *sdp;
struct path path;
int error;
error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
if (error) {
pr_warn("path_lookup on %s returned error %d\n",
dev_name, error);
return ERR_PTR(error);
}
s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
path.dentry->d_sb->s_bdev);
path_put(&path);
if (IS_ERR(s)) {
pr_warn("gfs2 mount does not exist\n");
return ERR_CAST(s);
}
if ((flags ^ s->s_flags) & MS_RDONLY) {
deactivate_locked_super(s);
return ERR_PTR(-EBUSY);
}
sdp = s->s_fs_info;
return dget(sdp->sd_master_dir);
}
static void gfs2_kill_sb(struct super_block *sb)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
if (sdp == NULL) {
kill_block_super(sb);
return;
}
gfs2_log_flush(sdp, NULL, SYNC_FLUSH);
dput(sdp->sd_root_dir);
dput(sdp->sd_master_dir);
sdp->sd_root_dir = NULL;
sdp->sd_master_dir = NULL;
shrink_dcache_sb(sb);
gfs2_delete_debugfs_file(sdp);
free_percpu(sdp->sd_lkstats);
kill_block_super(sb);
}
struct file_system_type gfs2_fs_type = {
.name = "gfs2",
.fs_flags = FS_REQUIRES_DEV,
.mount = gfs2_mount,
.kill_sb = gfs2_kill_sb,
.owner = THIS_MODULE,
};
MODULE_ALIAS_FS("gfs2");
struct file_system_type gfs2meta_fs_type = {
.name = "gfs2meta",
.fs_flags = FS_REQUIRES_DEV,
.mount = gfs2_mount_meta,
.owner = THIS_MODULE,
};
MODULE_ALIAS_FS("gfs2meta");
| gpl-2.0 |
allan888/Linux_kernel_asynchronous | drivers/phy/phy-omap-usb2.c | 79 | 9434 | /*
* omap-usb2.c - USB PHY, talking to musb controller in OMAP.
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Author: Kishon Vijay Abraham I <kishon@ti.com>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/phy/omap_usb.h>
#include <linux/usb/phy_companion.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/phy/omap_control_phy.h>
#include <linux/phy/phy.h>
#include <linux/of_platform.h>
#define USB2PHY_DISCON_BYP_LATCH (1 << 31)
#define USB2PHY_ANA_CONFIG1 0x4c
/**
* omap_usb2_set_comparator - links the comparator present in the sytem with
* this phy
* @comparator - the companion phy(comparator) for this phy
*
* The phy companion driver should call this API passing the phy_companion
* filled with set_vbus and start_srp to be used by usb phy.
*
* For use by phy companion driver
*/
int omap_usb2_set_comparator(struct phy_companion *comparator)
{
struct omap_usb *phy;
struct usb_phy *x = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR(x))
return -ENODEV;
phy = phy_to_omapusb(x);
phy->comparator = comparator;
return 0;
}
EXPORT_SYMBOL_GPL(omap_usb2_set_comparator);
static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
if (!phy->comparator)
return -ENODEV;
return phy->comparator->set_vbus(phy->comparator, enabled);
}
static int omap_usb_start_srp(struct usb_otg *otg)
{
struct omap_usb *phy = phy_to_omapusb(otg->usb_phy);
if (!phy->comparator)
return -ENODEV;
return phy->comparator->start_srp(phy->comparator);
}
static int omap_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
{
otg->host = host;
if (!host)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static int omap_usb_set_peripheral(struct usb_otg *otg,
struct usb_gadget *gadget)
{
otg->gadget = gadget;
if (!gadget)
otg->state = OTG_STATE_UNDEFINED;
return 0;
}
static int omap_usb_power_off(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
omap_control_phy_power(phy->control_dev, 0);
return 0;
}
static int omap_usb_power_on(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
omap_control_phy_power(phy->control_dev, 1);
return 0;
}
static int omap_usb_init(struct phy *x)
{
struct omap_usb *phy = phy_get_drvdata(x);
u32 val;
if (phy->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
/*
*
* Reduce the sensitivity of internal PHY by enabling the
* DISCON_BYP_LATCH of the USB2PHY_ANA_CONFIG1 register. This
* resolves issues with certain devices which can otherwise
* be prone to false disconnects.
*
*/
val = omap_usb_readl(phy->phy_base, USB2PHY_ANA_CONFIG1);
val |= USB2PHY_DISCON_BYP_LATCH;
omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val);
}
return 0;
}
static struct phy_ops ops = {
.init = omap_usb_init,
.power_on = omap_usb_power_on,
.power_off = omap_usb_power_off,
.owner = THIS_MODULE,
};
#ifdef CONFIG_OF
static const struct usb_phy_data omap_usb2_data = {
.label = "omap_usb2",
.flags = OMAP_USB2_HAS_START_SRP | OMAP_USB2_HAS_SET_VBUS,
};
static const struct usb_phy_data omap5_usb2_data = {
.label = "omap5_usb2",
.flags = 0,
};
static const struct usb_phy_data dra7x_usb2_data = {
.label = "dra7x_usb2",
.flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT,
};
static const struct usb_phy_data am437x_usb2_data = {
.label = "am437x_usb2",
.flags = 0,
};
static const struct of_device_id omap_usb2_id_table[] = {
{
.compatible = "ti,omap-usb2",
.data = &omap_usb2_data,
},
{
.compatible = "ti,omap5-usb2",
.data = &omap5_usb2_data,
},
{
.compatible = "ti,dra7x-usb2",
.data = &dra7x_usb2_data,
},
{
.compatible = "ti,am437x-usb2",
.data = &am437x_usb2_data,
},
{},
};
MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
#endif
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
struct phy *generic_phy;
struct resource *res;
struct phy_provider *phy_provider;
struct usb_otg *otg;
struct device_node *node = pdev->dev.of_node;
struct device_node *control_node;
struct platform_device *control_pdev;
const struct of_device_id *of_id;
struct usb_phy_data *phy_data;
of_id = of_match_device(of_match_ptr(omap_usb2_id_table), &pdev->dev);
if (!of_id)
return -EINVAL;
phy_data = (struct usb_phy_data *)of_id->data;
phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL);
if (!otg)
return -ENOMEM;
phy->dev = &pdev->dev;
phy->phy.dev = phy->dev;
phy->phy.label = phy_data->label;
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(phy->phy_base))
return PTR_ERR(phy->phy_base);
phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
}
control_node = of_parse_phandle(node, "ctrl-module", 0);
if (!control_node) {
dev_err(&pdev->dev, "Failed to get control device phandle\n");
return -EINVAL;
}
control_pdev = of_find_device_by_node(control_node);
if (!control_pdev) {
dev_err(&pdev->dev, "Failed to get control device\n");
return -EINVAL;
}
phy->control_dev = &control_pdev->dev;
omap_control_phy_power(phy->control_dev, 0);
otg->set_host = omap_usb_set_host;
otg->set_peripheral = omap_usb_set_peripheral;
if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS)
otg->set_vbus = omap_usb_set_vbus;
if (phy_data->flags & OMAP_USB2_HAS_START_SRP)
otg->start_srp = omap_usb_start_srp;
otg->usb_phy = &phy->phy;
platform_set_drvdata(pdev, phy);
pm_runtime_enable(phy->dev);
generic_phy = devm_phy_create(phy->dev, NULL, &ops);
if (IS_ERR(generic_phy)) {
pm_runtime_disable(phy->dev);
return PTR_ERR(generic_phy);
}
phy_set_drvdata(generic_phy, phy);
phy_provider = devm_of_phy_provider_register(phy->dev,
of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
pm_runtime_disable(phy->dev);
return PTR_ERR(phy_provider);
}
phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
if (IS_ERR(phy->wkupclk)) {
dev_warn(&pdev->dev, "unable to get wkupclk, trying old name\n");
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
return PTR_ERR(phy->wkupclk);
} else {
dev_warn(&pdev->dev,
"found usb_phy_cm_clk32k, please fix DTS\n");
}
}
clk_prepare(phy->wkupclk);
phy->optclk = devm_clk_get(phy->dev, "refclk");
if (IS_ERR(phy->optclk)) {
dev_dbg(&pdev->dev, "unable to get refclk, trying old name\n");
phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m");
if (IS_ERR(phy->optclk)) {
dev_dbg(&pdev->dev,
"unable to get usb_otg_ss_refclk960m\n");
} else {
dev_warn(&pdev->dev,
"found usb_otg_ss_refclk960m, please fix DTS\n");
}
}
if (!IS_ERR(phy->optclk))
clk_prepare(phy->optclk);
usb_add_phy_dev(&phy->phy);
return 0;
}
static int omap_usb2_remove(struct platform_device *pdev)
{
struct omap_usb *phy = platform_get_drvdata(pdev);
clk_unprepare(phy->wkupclk);
if (!IS_ERR(phy->optclk))
clk_unprepare(phy->optclk);
usb_remove_phy(&phy->phy);
pm_runtime_disable(phy->dev);
return 0;
}
#ifdef CONFIG_PM
static int omap_usb2_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_usb *phy = platform_get_drvdata(pdev);
clk_disable(phy->wkupclk);
if (!IS_ERR(phy->optclk))
clk_disable(phy->optclk);
return 0;
}
static int omap_usb2_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_usb *phy = platform_get_drvdata(pdev);
int ret;
ret = clk_enable(phy->wkupclk);
if (ret < 0) {
dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
goto err0;
}
if (!IS_ERR(phy->optclk)) {
ret = clk_enable(phy->optclk);
if (ret < 0) {
dev_err(phy->dev, "Failed to enable optclk %d\n", ret);
goto err1;
}
}
return 0;
err1:
clk_disable(phy->wkupclk);
err0:
return ret;
}
static const struct dev_pm_ops omap_usb2_pm_ops = {
SET_RUNTIME_PM_OPS(omap_usb2_runtime_suspend, omap_usb2_runtime_resume,
NULL)
};
#define DEV_PM_OPS (&omap_usb2_pm_ops)
#else
#define DEV_PM_OPS NULL
#endif
static struct platform_driver omap_usb2_driver = {
.probe = omap_usb2_probe,
.remove = omap_usb2_remove,
.driver = {
.name = "omap-usb2",
.pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(omap_usb2_id_table),
},
};
module_platform_driver(omap_usb2_driver);
MODULE_ALIAS("platform:omap_usb2");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP USB2 phy driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
abhisit/TechNexion-linux | drivers/scsi/sd_dif.c | 847 | 5448 | /*
* sd_dif.c - SCSI Data Integrity Field
*
* Copyright (C) 2007, 2008 Oracle Corporation
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
* USA.
*
*/
#include <linux/blkdev.h>
#include <linux/t10-pi.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsicam.h>
#include "sd.h"
/*
* Configure exchange of protection information between OS and HBA.
*/
void sd_dif_config_host(struct scsi_disk *sdkp)
{
struct scsi_device *sdp = sdkp->device;
struct gendisk *disk = sdkp->disk;
u8 type = sdkp->protection_type;
int dif, dix;
dif = scsi_host_dif_capable(sdp->host, type);
dix = scsi_host_dix_capable(sdp->host, type);
if (!dix && scsi_host_dix_capable(sdp->host, 0)) {
dif = 0; dix = 1;
}
if (!dix)
return;
/* Enable DMA of protection information */
if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
if (type == SD_DIF_TYPE3_PROTECTION)
blk_integrity_register(disk, &t10_pi_type3_ip);
else
blk_integrity_register(disk, &t10_pi_type1_ip);
disk->integrity->flags |= BLK_INTEGRITY_IP_CHECKSUM;
} else
if (type == SD_DIF_TYPE3_PROTECTION)
blk_integrity_register(disk, &t10_pi_type3_crc);
else
blk_integrity_register(disk, &t10_pi_type1_crc);
sd_printk(KERN_NOTICE, sdkp,
"Enabling DIX %s protection\n", disk->integrity->name);
/* Signal to block layer that we support sector tagging */
if (dif && type) {
disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
if (!sdkp->ATO)
return;
if (type == SD_DIF_TYPE3_PROTECTION)
disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
else
disk->integrity->tag_size = sizeof(u16);
sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
disk->integrity->tag_size);
}
}
/*
* The virtual start sector is the one that was originally submitted
* by the block layer. Due to partitioning, MD/DM cloning, etc. the
* actual physical start sector is likely to be different. Remap
* protection information to match the physical LBA.
*
* From a protocol perspective there's a slight difference between
* Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the
* reference tag is seeded in the CDB. This gives us the potential to
* avoid virt->phys remapping during write. However, at read time we
* don't know whether the virt sector is the same as when we wrote it
* (we could be reading from real disk as opposed to MD/DM device. So
* we always remap Type 2 making it identical to Type 1.
*
* Type 3 does not have a reference tag so no remapping is required.
*/
void sd_dif_prepare(struct scsi_cmnd *scmd)
{
const int tuple_sz = sizeof(struct t10_pi_tuple);
struct bio *bio;
struct scsi_disk *sdkp;
struct t10_pi_tuple *pi;
u32 phys, virt;
sdkp = scsi_disk(scmd->request->rq_disk);
if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION)
return;
phys = scsi_prot_ref_tag(scmd);
__rq_for_each_bio(bio, scmd->request) {
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_vec iv;
struct bvec_iter iter;
unsigned int j;
/* Already remapped? */
if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
break;
virt = bip_get_seed(bip) & 0xffffffff;
bip_for_each_vec(iv, bip, iter) {
pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
if (be32_to_cpu(pi->ref_tag) == virt)
pi->ref_tag = cpu_to_be32(phys);
virt++;
phys++;
}
kunmap_atomic(pi);
}
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
}
}
/*
* Remap physical sector values in the reference tag to the virtual
* values expected by the block layer.
*/
void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
{
const int tuple_sz = sizeof(struct t10_pi_tuple);
struct scsi_disk *sdkp;
struct bio *bio;
struct t10_pi_tuple *pi;
unsigned int j, intervals;
u32 phys, virt;
sdkp = scsi_disk(scmd->request->rq_disk);
if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0)
return;
intervals = good_bytes / scsi_prot_interval(scmd);
phys = scsi_prot_ref_tag(scmd);
__rq_for_each_bio(bio, scmd->request) {
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_vec iv;
struct bvec_iter iter;
virt = bip_get_seed(bip) & 0xffffffff;
bip_for_each_vec(iv, bip, iter) {
pi = kmap_atomic(iv.bv_page) + iv.bv_offset;
for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) {
if (intervals == 0) {
kunmap_atomic(pi);
return;
}
if (be32_to_cpu(pi->ref_tag) == phys)
pi->ref_tag = cpu_to_be32(virt);
virt++;
phys++;
intervals--;
}
kunmap_atomic(pi);
}
}
}
| gpl-2.0 |
ISTweak/android_kernel_panasonic_p01d-cm9 | fs/dlm/lock.c | 847 | 123630 | /******************************************************************************
*******************************************************************************
**
** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
** of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/
/* Central locking logic has four stages:
dlm_lock()
dlm_unlock()
request_lock(ls, lkb)
convert_lock(ls, lkb)
unlock_lock(ls, lkb)
cancel_lock(ls, lkb)
_request_lock(r, lkb)
_convert_lock(r, lkb)
_unlock_lock(r, lkb)
_cancel_lock(r, lkb)
do_request(r, lkb)
do_convert(r, lkb)
do_unlock(r, lkb)
do_cancel(r, lkb)
Stage 1 (lock, unlock) is mainly about checking input args and
splitting into one of the four main operations:
dlm_lock = request_lock
dlm_lock+CONVERT = convert_lock
dlm_unlock = unlock_lock
dlm_unlock+CANCEL = cancel_lock
Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
provided to the next stage.
Stage 3, _xxxx_lock(), determines if the operation is local or remote.
When remote, it calls send_xxxx(), when local it calls do_xxxx().
Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
given rsb and lkb and queues callbacks.
For remote operations, send_xxxx() results in the corresponding do_xxxx()
function being executed on the remote node. The connecting send/receive
calls on local (L) and remote (R) nodes:
L: send_xxxx() -> R: receive_xxxx()
R: do_xxxx()
L: receive_xxxx_reply() <- R: send_xxxx_reply()
*/
#include <linux/types.h>
#include <linux/slab.h>
#include "dlm_internal.h"
#include <linux/dlm_device.h>
#include "memory.h"
#include "lowcomms.h"
#include "requestqueue.h"
#include "util.h"
#include "dir.h"
#include "member.h"
#include "lockspace.h"
#include "ast.h"
#include "lock.h"
#include "rcom.h"
#include "recover.h"
#include "lvb_table.h"
#include "user.h"
#include "config.h"
static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
static int send_remove(struct dlm_rsb *r);
static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
struct dlm_message *ms);
static int receive_extralen(struct dlm_message *ms);
static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
static void del_timeout(struct dlm_lkb *lkb);
/*
* Lock compatibilty matrix - thanks Steve
* UN = Unlocked state. Not really a state, used as a flag
* PD = Padding. Used to make the matrix a nice power of two in size
* Other states are the same as the VMS DLM.
* Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
*/
static const int __dlm_compat_matrix[8][8] = {
/* UN NL CR CW PR PW EX PD */
{1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
{1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
{1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
{1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
{1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
{1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
{1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
{0, 0, 0, 0, 0, 0, 0, 0} /* PD */
};
/*
* This defines the direction of transfer of LVB data.
* Granted mode is the row; requested mode is the column.
* Usage: matrix[grmode+1][rqmode+1]
* 1 = LVB is returned to the caller
* 0 = LVB is written to the resource
* -1 = nothing happens to the LVB
*/
const int dlm_lvb_operations[8][8] = {
/* UN NL CR CW PR PW EX PD*/
{ -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
{ -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
{ -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
{ -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
{ -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
{ -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
{ -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
{ -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
};
#define modes_compat(gr, rq) \
__dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
int dlm_modes_compat(int mode1, int mode2)
{
return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
}
/*
* Compatibility matrix for conversions with QUECVT set.
* Granted mode is the row; requested mode is the column.
* Usage: matrix[grmode+1][rqmode+1]
*/
static const int __quecvt_compat_matrix[8][8] = {
/* UN NL CR CW PR PW EX PD */
{0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
{0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
{0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
{0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
{0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
{0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
{0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
{0, 0, 0, 0, 0, 0, 0, 0} /* PD */
};
void dlm_print_lkb(struct dlm_lkb *lkb)
{
printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
" status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
}
static void dlm_print_rsb(struct dlm_rsb *r)
{
printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
r->res_nodeid, r->res_flags, r->res_first_lkid,
r->res_recover_locks_count, r->res_name);
}
void dlm_dump_rsb(struct dlm_rsb *r)
{
struct dlm_lkb *lkb;
dlm_print_rsb(r);
printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
printk(KERN_ERR "rsb lookup list\n");
list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
dlm_print_lkb(lkb);
printk(KERN_ERR "rsb grant queue:\n");
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
dlm_print_lkb(lkb);
printk(KERN_ERR "rsb convert queue:\n");
list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
dlm_print_lkb(lkb);
printk(KERN_ERR "rsb wait queue:\n");
list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
dlm_print_lkb(lkb);
}
/* Threads cannot use the lockspace while it's being recovered */
static inline void dlm_lock_recovery(struct dlm_ls *ls)
{
down_read(&ls->ls_in_recovery);
}
void dlm_unlock_recovery(struct dlm_ls *ls)
{
up_read(&ls->ls_in_recovery);
}
int dlm_lock_recovery_try(struct dlm_ls *ls)
{
return down_read_trylock(&ls->ls_in_recovery);
}
static inline int can_be_queued(struct dlm_lkb *lkb)
{
return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
}
static inline int force_blocking_asts(struct dlm_lkb *lkb)
{
return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
}
static inline int is_demoted(struct dlm_lkb *lkb)
{
return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
}
static inline int is_altmode(struct dlm_lkb *lkb)
{
return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
}
static inline int is_granted(struct dlm_lkb *lkb)
{
return (lkb->lkb_status == DLM_LKSTS_GRANTED);
}
static inline int is_remote(struct dlm_rsb *r)
{
DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
return !!r->res_nodeid;
}
static inline int is_process_copy(struct dlm_lkb *lkb)
{
return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
}
static inline int is_master_copy(struct dlm_lkb *lkb)
{
if (lkb->lkb_flags & DLM_IFL_MSTCPY)
DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
}
static inline int middle_conversion(struct dlm_lkb *lkb)
{
if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
(lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
return 1;
return 0;
}
static inline int down_conversion(struct dlm_lkb *lkb)
{
return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
}
static inline int is_overlap_unlock(struct dlm_lkb *lkb)
{
return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
}
static inline int is_overlap_cancel(struct dlm_lkb *lkb)
{
return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
}
static inline int is_overlap(struct dlm_lkb *lkb)
{
return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
DLM_IFL_OVERLAP_CANCEL));
}
static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
{
if (is_master_copy(lkb))
return;
del_timeout(lkb);
DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
/* if the operation was a cancel, then return -DLM_ECANCEL, if a
timeout caused the cancel then return -ETIMEDOUT */
if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
rv = -ETIMEDOUT;
}
if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
rv = -EDEADLK;
}
lkb->lkb_lksb->sb_status = rv;
lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
dlm_add_ast(lkb, AST_COMP, lkb->lkb_grmode);
}
static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
queue_cast(r, lkb,
is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
}
static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
{
lkb->lkb_time_bast = ktime_get();
if (is_master_copy(lkb)) {
lkb->lkb_bastmode = rqmode; /* printed by debugfs */
send_bast(r, lkb, rqmode);
} else {
dlm_add_ast(lkb, AST_BAST, rqmode);
}
}
/*
* Basic operations on rsb's and lkb's
*/
static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
{
struct dlm_rsb *r;
r = dlm_allocate_rsb(ls, len);
if (!r)
return NULL;
r->res_ls = ls;
r->res_length = len;
memcpy(r->res_name, name, len);
mutex_init(&r->res_mutex);
INIT_LIST_HEAD(&r->res_lookup);
INIT_LIST_HEAD(&r->res_grantqueue);
INIT_LIST_HEAD(&r->res_convertqueue);
INIT_LIST_HEAD(&r->res_waitqueue);
INIT_LIST_HEAD(&r->res_root_list);
INIT_LIST_HEAD(&r->res_recover_list);
return r;
}
static int search_rsb_list(struct list_head *head, char *name, int len,
unsigned int flags, struct dlm_rsb **r_ret)
{
struct dlm_rsb *r;
int error = 0;
list_for_each_entry(r, head, res_hashchain) {
if (len == r->res_length && !memcmp(name, r->res_name, len))
goto found;
}
*r_ret = NULL;
return -EBADR;
found:
if (r->res_nodeid && (flags & R_MASTER))
error = -ENOTBLK;
*r_ret = r;
return error;
}
static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
unsigned int flags, struct dlm_rsb **r_ret)
{
struct dlm_rsb *r;
int error;
error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
if (!error) {
kref_get(&r->res_ref);
goto out;
}
error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
if (error)
goto out;
list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
if (dlm_no_directory(ls))
goto out;
if (r->res_nodeid == -1) {
rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
r->res_first_lkid = 0;
} else if (r->res_nodeid > 0) {
rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
r->res_first_lkid = 0;
} else {
DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
}
out:
*r_ret = r;
return error;
}
static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
unsigned int flags, struct dlm_rsb **r_ret)
{
int error;
spin_lock(&ls->ls_rsbtbl[b].lock);
error = _search_rsb(ls, name, len, b, flags, r_ret);
spin_unlock(&ls->ls_rsbtbl[b].lock);
return error;
}
/*
* Find rsb in rsbtbl and potentially create/add one
*
* Delaying the release of rsb's has a similar benefit to applications keeping
* NL locks on an rsb, but without the guarantee that the cached master value
* will still be valid when the rsb is reused. Apps aren't always smart enough
* to keep NL locks on an rsb that they may lock again shortly; this can lead
* to excessive master lookups and removals if we don't delay the release.
*
* Searching for an rsb means looking through both the normal list and toss
* list. When found on the toss list the rsb is moved to the normal list with
* ref count of 1; when found on normal list the ref count is incremented.
*/
static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
unsigned int flags, struct dlm_rsb **r_ret)
{
struct dlm_rsb *r = NULL, *tmp;
uint32_t hash, bucket;
int error = -EINVAL;
if (namelen > DLM_RESNAME_MAXLEN)
goto out;
if (dlm_no_directory(ls))
flags |= R_CREATE;
error = 0;
hash = jhash(name, namelen, 0);
bucket = hash & (ls->ls_rsbtbl_size - 1);
error = search_rsb(ls, name, namelen, bucket, flags, &r);
if (!error)
goto out;
if (error == -EBADR && !(flags & R_CREATE))
goto out;
/* the rsb was found but wasn't a master copy */
if (error == -ENOTBLK)
goto out;
error = -ENOMEM;
r = create_rsb(ls, name, namelen);
if (!r)
goto out;
r->res_hash = hash;
r->res_bucket = bucket;
r->res_nodeid = -1;
kref_init(&r->res_ref);
/* With no directory, the master can be set immediately */
if (dlm_no_directory(ls)) {
int nodeid = dlm_dir_nodeid(r);
if (nodeid == dlm_our_nodeid())
nodeid = 0;
r->res_nodeid = nodeid;
}
spin_lock(&ls->ls_rsbtbl[bucket].lock);
error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
if (!error) {
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
dlm_free_rsb(r);
r = tmp;
goto out;
}
list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
error = 0;
out:
*r_ret = r;
return error;
}
/* This is only called to add a reference when the code already holds
a valid reference to the rsb, so there's no need for locking. */
static inline void hold_rsb(struct dlm_rsb *r)
{
kref_get(&r->res_ref);
}
void dlm_hold_rsb(struct dlm_rsb *r)
{
hold_rsb(r);
}
static void toss_rsb(struct kref *kref)
{
struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
struct dlm_ls *ls = r->res_ls;
DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
kref_init(&r->res_ref);
list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
r->res_toss_time = jiffies;
if (r->res_lvbptr) {
dlm_free_lvb(r->res_lvbptr);
r->res_lvbptr = NULL;
}
}
/* When all references to the rsb are gone it's transfered to
the tossed list for later disposal. */
static void put_rsb(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
uint32_t bucket = r->res_bucket;
spin_lock(&ls->ls_rsbtbl[bucket].lock);
kref_put(&r->res_ref, toss_rsb);
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
}
void dlm_put_rsb(struct dlm_rsb *r)
{
put_rsb(r);
}
/* See comment for unhold_lkb */
static void unhold_rsb(struct dlm_rsb *r)
{
int rv;
rv = kref_put(&r->res_ref, toss_rsb);
DLM_ASSERT(!rv, dlm_dump_rsb(r););
}
static void kill_rsb(struct kref *kref)
{
struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
/* All work is done after the return from kref_put() so we
can release the write_lock before the remove and free. */
DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
}
/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
The rsb must exist as long as any lkb's for it do. */
static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
hold_rsb(r);
lkb->lkb_resource = r;
}
static void detach_lkb(struct dlm_lkb *lkb)
{
if (lkb->lkb_resource) {
put_rsb(lkb->lkb_resource);
lkb->lkb_resource = NULL;
}
}
static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
{
struct dlm_lkb *lkb, *tmp;
uint32_t lkid = 0;
uint16_t bucket;
lkb = dlm_allocate_lkb(ls);
if (!lkb)
return -ENOMEM;
lkb->lkb_nodeid = -1;
lkb->lkb_grmode = DLM_LOCK_IV;
kref_init(&lkb->lkb_ref);
INIT_LIST_HEAD(&lkb->lkb_ownqueue);
INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
INIT_LIST_HEAD(&lkb->lkb_time_list);
get_random_bytes(&bucket, sizeof(bucket));
bucket &= (ls->ls_lkbtbl_size - 1);
write_lock(&ls->ls_lkbtbl[bucket].lock);
/* counter can roll over so we must verify lkid is not in use */
while (lkid == 0) {
lkid = (bucket << 16) | ls->ls_lkbtbl[bucket].counter++;
list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
lkb_idtbl_list) {
if (tmp->lkb_id != lkid)
continue;
lkid = 0;
break;
}
}
lkb->lkb_id = lkid;
list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
write_unlock(&ls->ls_lkbtbl[bucket].lock);
*lkb_ret = lkb;
return 0;
}
static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
{
struct dlm_lkb *lkb;
uint16_t bucket = (lkid >> 16);
list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
if (lkb->lkb_id == lkid)
return lkb;
}
return NULL;
}
static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
{
struct dlm_lkb *lkb;
uint16_t bucket = (lkid >> 16);
if (bucket >= ls->ls_lkbtbl_size)
return -EBADSLT;
read_lock(&ls->ls_lkbtbl[bucket].lock);
lkb = __find_lkb(ls, lkid);
if (lkb)
kref_get(&lkb->lkb_ref);
read_unlock(&ls->ls_lkbtbl[bucket].lock);
*lkb_ret = lkb;
return lkb ? 0 : -ENOENT;
}
static void kill_lkb(struct kref *kref)
{
struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
/* All work is done after the return from kref_put() so we
can release the write_lock before the detach_lkb */
DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
}
/* __put_lkb() is used when an lkb may not have an rsb attached to
it so we need to provide the lockspace explicitly */
static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
uint16_t bucket = (lkb->lkb_id >> 16);
write_lock(&ls->ls_lkbtbl[bucket].lock);
if (kref_put(&lkb->lkb_ref, kill_lkb)) {
list_del(&lkb->lkb_idtbl_list);
write_unlock(&ls->ls_lkbtbl[bucket].lock);
detach_lkb(lkb);
/* for local/process lkbs, lvbptr points to caller's lksb */
if (lkb->lkb_lvbptr && is_master_copy(lkb))
dlm_free_lvb(lkb->lkb_lvbptr);
dlm_free_lkb(lkb);
return 1;
} else {
write_unlock(&ls->ls_lkbtbl[bucket].lock);
return 0;
}
}
int dlm_put_lkb(struct dlm_lkb *lkb)
{
struct dlm_ls *ls;
DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
ls = lkb->lkb_resource->res_ls;
return __put_lkb(ls, lkb);
}
/* This is only called to add a reference when the code already holds
a valid reference to the lkb, so there's no need for locking. */
static inline void hold_lkb(struct dlm_lkb *lkb)
{
kref_get(&lkb->lkb_ref);
}
/* This is called when we need to remove a reference and are certain
it's not the last ref. e.g. del_lkb is always called between a
find_lkb/put_lkb and is always the inverse of a previous add_lkb.
put_lkb would work fine, but would involve unnecessary locking */
static inline void unhold_lkb(struct dlm_lkb *lkb)
{
int rv;
rv = kref_put(&lkb->lkb_ref, kill_lkb);
DLM_ASSERT(!rv, dlm_print_lkb(lkb););
}
static void lkb_add_ordered(struct list_head *new, struct list_head *head,
int mode)
{
struct dlm_lkb *lkb = NULL;
list_for_each_entry(lkb, head, lkb_statequeue)
if (lkb->lkb_rqmode < mode)
break;
__list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
}
/* add/remove lkb to rsb's grant/convert/wait queue */
static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
{
kref_get(&lkb->lkb_ref);
DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
lkb->lkb_timestamp = ktime_get();
lkb->lkb_status = status;
switch (status) {
case DLM_LKSTS_WAITING:
if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
else
list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
break;
case DLM_LKSTS_GRANTED:
/* convention says granted locks kept in order of grmode */
lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
lkb->lkb_grmode);
break;
case DLM_LKSTS_CONVERT:
if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
else
list_add_tail(&lkb->lkb_statequeue,
&r->res_convertqueue);
break;
default:
DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
}
}
static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
lkb->lkb_status = 0;
list_del(&lkb->lkb_statequeue);
unhold_lkb(lkb);
}
static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
{
hold_lkb(lkb);
del_lkb(r, lkb);
add_lkb(r, lkb, sts);
unhold_lkb(lkb);
}
static int msg_reply_type(int mstype)
{
switch (mstype) {
case DLM_MSG_REQUEST:
return DLM_MSG_REQUEST_REPLY;
case DLM_MSG_CONVERT:
return DLM_MSG_CONVERT_REPLY;
case DLM_MSG_UNLOCK:
return DLM_MSG_UNLOCK_REPLY;
case DLM_MSG_CANCEL:
return DLM_MSG_CANCEL_REPLY;
case DLM_MSG_LOOKUP:
return DLM_MSG_LOOKUP_REPLY;
}
return -1;
}
/* add/remove lkb from global waiters list of lkb's waiting for
a reply from a remote node */
static int add_to_waiters(struct dlm_lkb *lkb, int mstype)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error = 0;
mutex_lock(&ls->ls_waiters_mutex);
if (is_overlap_unlock(lkb) ||
(is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
error = -EINVAL;
goto out;
}
if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
switch (mstype) {
case DLM_MSG_UNLOCK:
lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
break;
case DLM_MSG_CANCEL:
lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
break;
default:
error = -EBUSY;
goto out;
}
lkb->lkb_wait_count++;
hold_lkb(lkb);
log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
lkb->lkb_id, lkb->lkb_wait_type, mstype,
lkb->lkb_wait_count, lkb->lkb_flags);
goto out;
}
DLM_ASSERT(!lkb->lkb_wait_count,
dlm_print_lkb(lkb);
printk("wait_count %d\n", lkb->lkb_wait_count););
lkb->lkb_wait_count++;
lkb->lkb_wait_type = mstype;
hold_lkb(lkb);
list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
out:
if (error)
log_error(ls, "addwait error %x %d flags %x %d %d %s",
lkb->lkb_id, error, lkb->lkb_flags, mstype,
lkb->lkb_wait_type, lkb->lkb_resource->res_name);
mutex_unlock(&ls->ls_waiters_mutex);
return error;
}
/* We clear the RESEND flag because we might be taking an lkb off the waiters
list as part of process_requestqueue (e.g. a lookup that has an optimized
request reply on the requestqueue) between dlm_recover_waiters_pre() which
set RESEND and dlm_recover_waiters_post() */
static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
struct dlm_message *ms)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int overlap_done = 0;
if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
overlap_done = 1;
goto out_del;
}
if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
overlap_done = 1;
goto out_del;
}
/* Cancel state was preemptively cleared by a successful convert,
see next comment, nothing to do. */
if ((mstype == DLM_MSG_CANCEL_REPLY) &&
(lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
log_debug(ls, "remwait %x cancel_reply wait_type %d",
lkb->lkb_id, lkb->lkb_wait_type);
return -1;
}
/* Remove for the convert reply, and premptively remove for the
cancel reply. A convert has been granted while there's still
an outstanding cancel on it (the cancel is moot and the result
in the cancel reply should be 0). We preempt the cancel reply
because the app gets the convert result and then can follow up
with another op, like convert. This subsequent op would see the
lingering state of the cancel and fail with -EBUSY. */
if ((mstype == DLM_MSG_CONVERT_REPLY) &&
(lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
is_overlap_cancel(lkb) && ms && !ms->m_result) {
log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
lkb->lkb_id);
lkb->lkb_wait_type = 0;
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
lkb->lkb_wait_count--;
goto out_del;
}
/* N.B. type of reply may not always correspond to type of original
msg due to lookup->request optimization, verify others? */
if (lkb->lkb_wait_type) {
lkb->lkb_wait_type = 0;
goto out_del;
}
log_error(ls, "remwait error %x reply %d flags %x no wait_type",
lkb->lkb_id, mstype, lkb->lkb_flags);
return -1;
out_del:
/* the force-unlock/cancel has completed and we haven't recvd a reply
to the op that was in progress prior to the unlock/cancel; we
give up on any reply to the earlier op. FIXME: not sure when/how
this would happen */
if (overlap_done && lkb->lkb_wait_type) {
log_error(ls, "remwait error %x reply %d wait_type %d overlap",
lkb->lkb_id, mstype, lkb->lkb_wait_type);
lkb->lkb_wait_count--;
lkb->lkb_wait_type = 0;
}
DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
lkb->lkb_flags &= ~DLM_IFL_RESEND;
lkb->lkb_wait_count--;
if (!lkb->lkb_wait_count)
list_del_init(&lkb->lkb_wait_reply);
unhold_lkb(lkb);
return 0;
}
static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error;
mutex_lock(&ls->ls_waiters_mutex);
error = _remove_from_waiters(lkb, mstype, NULL);
mutex_unlock(&ls->ls_waiters_mutex);
return error;
}
/* Handles situations where we might be processing a "fake" or "stub" reply in
which we can't try to take waiters_mutex again. */
static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error;
if (ms != &ls->ls_stub_ms)
mutex_lock(&ls->ls_waiters_mutex);
error = _remove_from_waiters(lkb, ms->m_type, ms);
if (ms != &ls->ls_stub_ms)
mutex_unlock(&ls->ls_waiters_mutex);
return error;
}
static void dir_remove(struct dlm_rsb *r)
{
int to_nodeid;
if (dlm_no_directory(r->res_ls))
return;
to_nodeid = dlm_dir_nodeid(r);
if (to_nodeid != dlm_our_nodeid())
send_remove(r);
else
dlm_dir_remove_entry(r->res_ls, to_nodeid,
r->res_name, r->res_length);
}
/* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
found since they are in order of newest to oldest? */
static int shrink_bucket(struct dlm_ls *ls, int b)
{
struct dlm_rsb *r;
int count = 0, found;
for (;;) {
found = 0;
spin_lock(&ls->ls_rsbtbl[b].lock);
list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
res_hashchain) {
if (!time_after_eq(jiffies, r->res_toss_time +
dlm_config.ci_toss_secs * HZ))
continue;
found = 1;
break;
}
if (!found) {
spin_unlock(&ls->ls_rsbtbl[b].lock);
break;
}
if (kref_put(&r->res_ref, kill_rsb)) {
list_del(&r->res_hashchain);
spin_unlock(&ls->ls_rsbtbl[b].lock);
if (is_master(r))
dir_remove(r);
dlm_free_rsb(r);
count++;
} else {
spin_unlock(&ls->ls_rsbtbl[b].lock);
log_error(ls, "tossed rsb in use %s", r->res_name);
}
}
return count;
}
void dlm_scan_rsbs(struct dlm_ls *ls)
{
int i;
for (i = 0; i < ls->ls_rsbtbl_size; i++) {
shrink_bucket(ls, i);
if (dlm_locking_stopped(ls))
break;
cond_resched();
}
}
static void add_timeout(struct dlm_lkb *lkb)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
if (is_master_copy(lkb))
return;
if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
goto add_it;
}
if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
goto add_it;
return;
add_it:
DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
mutex_lock(&ls->ls_timeout_mutex);
hold_lkb(lkb);
list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
mutex_unlock(&ls->ls_timeout_mutex);
}
static void del_timeout(struct dlm_lkb *lkb)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
mutex_lock(&ls->ls_timeout_mutex);
if (!list_empty(&lkb->lkb_time_list)) {
list_del_init(&lkb->lkb_time_list);
unhold_lkb(lkb);
}
mutex_unlock(&ls->ls_timeout_mutex);
}
/* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
and then lock rsb because of lock ordering in add_timeout. We may need
to specify some special timeout-related bits in the lkb that are just to
be accessed under the timeout_mutex. */
void dlm_scan_timeout(struct dlm_ls *ls)
{
struct dlm_rsb *r;
struct dlm_lkb *lkb;
int do_cancel, do_warn;
s64 wait_us;
for (;;) {
if (dlm_locking_stopped(ls))
break;
do_cancel = 0;
do_warn = 0;
mutex_lock(&ls->ls_timeout_mutex);
list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
wait_us = ktime_to_us(ktime_sub(ktime_get(),
lkb->lkb_timestamp));
if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
wait_us >= (lkb->lkb_timeout_cs * 10000))
do_cancel = 1;
if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
wait_us >= dlm_config.ci_timewarn_cs * 10000)
do_warn = 1;
if (!do_cancel && !do_warn)
continue;
hold_lkb(lkb);
break;
}
mutex_unlock(&ls->ls_timeout_mutex);
if (!do_cancel && !do_warn)
break;
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
if (do_warn) {
/* clear flag so we only warn once */
lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
del_timeout(lkb);
dlm_timeout_warn(lkb);
}
if (do_cancel) {
log_debug(ls, "timeout cancel %x node %d %s",
lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
del_timeout(lkb);
_cancel_lock(r, lkb);
}
unlock_rsb(r);
unhold_rsb(r);
dlm_put_lkb(lkb);
}
}
/* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
dlm_recoverd before checking/setting ls_recover_begin. */
void dlm_adjust_timeouts(struct dlm_ls *ls)
{
struct dlm_lkb *lkb;
u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
ls->ls_recover_begin = 0;
mutex_lock(&ls->ls_timeout_mutex);
list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
mutex_unlock(&ls->ls_timeout_mutex);
}
/* lkb is master or local copy */
static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int b, len = r->res_ls->ls_lvblen;
/* b=1 lvb returned to caller
b=0 lvb written to rsb or invalidated
b=-1 do nothing */
b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
if (b == 1) {
if (!lkb->lkb_lvbptr)
return;
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
return;
if (!r->res_lvbptr)
return;
memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
lkb->lkb_lvbseq = r->res_lvbseq;
} else if (b == 0) {
if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
rsb_set_flag(r, RSB_VALNOTVALID);
return;
}
if (!lkb->lkb_lvbptr)
return;
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
return;
if (!r->res_lvbptr)
r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
if (!r->res_lvbptr)
return;
memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
r->res_lvbseq++;
lkb->lkb_lvbseq = r->res_lvbseq;
rsb_clear_flag(r, RSB_VALNOTVALID);
}
if (rsb_flag(r, RSB_VALNOTVALID))
lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
}
static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
if (lkb->lkb_grmode < DLM_LOCK_PW)
return;
if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
rsb_set_flag(r, RSB_VALNOTVALID);
return;
}
if (!lkb->lkb_lvbptr)
return;
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
return;
if (!r->res_lvbptr)
r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
if (!r->res_lvbptr)
return;
memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
r->res_lvbseq++;
rsb_clear_flag(r, RSB_VALNOTVALID);
}
/* lkb is process copy (pc) */
static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
struct dlm_message *ms)
{
int b;
if (!lkb->lkb_lvbptr)
return;
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
return;
b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
if (b == 1) {
int len = receive_extralen(ms);
if (len > DLM_RESNAME_MAXLEN)
len = DLM_RESNAME_MAXLEN;
memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
lkb->lkb_lvbseq = ms->m_lvbseq;
}
}
/* Manipulate lkb's on rsb's convert/granted/waiting queues
remove_lock -- used for unlock, removes lkb from granted
revert_lock -- used for cancel, moves lkb from convert to granted
grant_lock -- used for request and convert, adds lkb to granted or
moves lkb from convert or waiting to granted
Each of these is used for master or local copy lkb's. There is
also a _pc() variation used to make the corresponding change on
a process copy (pc) lkb. */
static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
del_lkb(r, lkb);
lkb->lkb_grmode = DLM_LOCK_IV;
/* this unhold undoes the original ref from create_lkb()
so this leads to the lkb being freed */
unhold_lkb(lkb);
}
static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
set_lvb_unlock(r, lkb);
_remove_lock(r, lkb);
}
static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
_remove_lock(r, lkb);
}
/* returns: 0 did nothing
1 moved lock to granted
-1 removed lock */
static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int rv = 0;
lkb->lkb_rqmode = DLM_LOCK_IV;
switch (lkb->lkb_status) {
case DLM_LKSTS_GRANTED:
break;
case DLM_LKSTS_CONVERT:
move_lkb(r, lkb, DLM_LKSTS_GRANTED);
rv = 1;
break;
case DLM_LKSTS_WAITING:
del_lkb(r, lkb);
lkb->lkb_grmode = DLM_LOCK_IV;
/* this unhold undoes the original ref from create_lkb()
so this leads to the lkb being freed */
unhold_lkb(lkb);
rv = -1;
break;
default:
log_print("invalid status for revert %d", lkb->lkb_status);
}
return rv;
}
static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
return revert_lock(r, lkb);
}
static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
if (lkb->lkb_grmode != lkb->lkb_rqmode) {
lkb->lkb_grmode = lkb->lkb_rqmode;
if (lkb->lkb_status)
move_lkb(r, lkb, DLM_LKSTS_GRANTED);
else
add_lkb(r, lkb, DLM_LKSTS_GRANTED);
}
lkb->lkb_rqmode = DLM_LOCK_IV;
}
static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
set_lvb_lock(r, lkb);
_grant_lock(r, lkb);
lkb->lkb_highbast = 0;
}
static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
struct dlm_message *ms)
{
set_lvb_lock_pc(r, lkb, ms);
_grant_lock(r, lkb);
}
/* called by grant_pending_locks() which means an async grant message must
be sent to the requesting node in addition to granting the lock if the
lkb belongs to a remote node. */
static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
grant_lock(r, lkb);
if (is_master_copy(lkb))
send_grant(r, lkb);
else
queue_cast(r, lkb, 0);
}
/* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
change the granted/requested modes. We're munging things accordingly in
the process copy.
CONVDEADLK: our grmode may have been forced down to NL to resolve a
conversion deadlock
ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
compatible with other granted locks */
static void munge_demoted(struct dlm_lkb *lkb, struct dlm_message *ms)
{
if (ms->m_type != DLM_MSG_CONVERT_REPLY) {
log_print("munge_demoted %x invalid reply type %d",
lkb->lkb_id, ms->m_type);
return;
}
if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
log_print("munge_demoted %x invalid modes gr %d rq %d",
lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
return;
}
lkb->lkb_grmode = DLM_LOCK_NL;
}
static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
{
if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
ms->m_type != DLM_MSG_GRANT) {
log_print("munge_altmode %x invalid reply type %d",
lkb->lkb_id, ms->m_type);
return;
}
if (lkb->lkb_exflags & DLM_LKF_ALTPR)
lkb->lkb_rqmode = DLM_LOCK_PR;
else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
lkb->lkb_rqmode = DLM_LOCK_CW;
else {
log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
dlm_print_lkb(lkb);
}
}
static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
{
struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
lkb_statequeue);
if (lkb->lkb_id == first->lkb_id)
return 1;
return 0;
}
/* Check if the given lkb conflicts with another lkb on the queue. */
static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
{
struct dlm_lkb *this;
list_for_each_entry(this, head, lkb_statequeue) {
if (this == lkb)
continue;
if (!modes_compat(this, lkb))
return 1;
}
return 0;
}
/*
* "A conversion deadlock arises with a pair of lock requests in the converting
* queue for one resource. The granted mode of each lock blocks the requested
* mode of the other lock."
*
* Part 2: if the granted mode of lkb is preventing an earlier lkb in the
* convert queue from being granted, then deadlk/demote lkb.
*
* Example:
* Granted Queue: empty
* Convert Queue: NL->EX (first lock)
* PR->EX (second lock)
*
* The first lock can't be granted because of the granted mode of the second
* lock and the second lock can't be granted because it's not first in the
* list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
* demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
* flag set and return DEMOTED in the lksb flags.
*
* Originally, this function detected conv-deadlk in a more limited scope:
* - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
* - if lkb1 was the first entry in the queue (not just earlier), and was
* blocked by the granted mode of lkb2, and there was nothing on the
* granted queue preventing lkb1 from being granted immediately, i.e.
* lkb2 was the only thing preventing lkb1 from being granted.
*
* That second condition meant we'd only say there was conv-deadlk if
* resolving it (by demotion) would lead to the first lock on the convert
* queue being granted right away. It allowed conversion deadlocks to exist
* between locks on the convert queue while they couldn't be granted anyway.
*
* Now, we detect and take action on conversion deadlocks immediately when
* they're created, even if they may not be immediately consequential. If
* lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
* mode that would prevent lkb1's conversion from being granted, we do a
* deadlk/demote on lkb2 right away and don't let it onto the convert queue.
* I think this means that the lkb_is_ahead condition below should always
* be zero, i.e. there will never be conv-deadlk between two locks that are
* both already on the convert queue.
*/
static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
{
struct dlm_lkb *lkb1;
int lkb_is_ahead = 0;
list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
if (lkb1 == lkb2) {
lkb_is_ahead = 1;
continue;
}
if (!lkb_is_ahead) {
if (!modes_compat(lkb2, lkb1))
return 1;
} else {
if (!modes_compat(lkb2, lkb1) &&
!modes_compat(lkb1, lkb2))
return 1;
}
}
return 0;
}
/*
* Return 1 if the lock can be granted, 0 otherwise.
* Also detect and resolve conversion deadlocks.
*
* lkb is the lock to be granted
*
* now is 1 if the function is being called in the context of the
* immediate request, it is 0 if called later, after the lock has been
* queued.
*
* References are from chapter 6 of "VAXcluster Principles" by Roy Davis
*/
static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
{
int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
/*
* 6-10: Version 5.4 introduced an option to address the phenomenon of
* a new request for a NL mode lock being blocked.
*
* 6-11: If the optional EXPEDITE flag is used with the new NL mode
* request, then it would be granted. In essence, the use of this flag
* tells the Lock Manager to expedite theis request by not considering
* what may be in the CONVERTING or WAITING queues... As of this
* writing, the EXPEDITE flag can be used only with new requests for NL
* mode locks. This flag is not valid for conversion requests.
*
* A shortcut. Earlier checks return an error if EXPEDITE is used in a
* conversion or used with a non-NL requested mode. We also know an
* EXPEDITE request is always granted immediately, so now must always
* be 1. The full condition to grant an expedite request: (now &&
* !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
* therefore be shortened to just checking the flag.
*/
if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
return 1;
/*
* A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
* added to the remaining conditions.
*/
if (queue_conflict(&r->res_grantqueue, lkb))
goto out;
/*
* 6-3: By default, a conversion request is immediately granted if the
* requested mode is compatible with the modes of all other granted
* locks
*/
if (queue_conflict(&r->res_convertqueue, lkb))
goto out;
/*
* 6-5: But the default algorithm for deciding whether to grant or
* queue conversion requests does not by itself guarantee that such
* requests are serviced on a "first come first serve" basis. This, in
* turn, can lead to a phenomenon known as "indefinate postponement".
*
* 6-7: This issue is dealt with by using the optional QUECVT flag with
* the system service employed to request a lock conversion. This flag
* forces certain conversion requests to be queued, even if they are
* compatible with the granted modes of other locks on the same
* resource. Thus, the use of this flag results in conversion requests
* being ordered on a "first come first servce" basis.
*
* DCT: This condition is all about new conversions being able to occur
* "in place" while the lock remains on the granted queue (assuming
* nothing else conflicts.) IOW if QUECVT isn't set, a conversion
* doesn't _have_ to go onto the convert queue where it's processed in
* order. The "now" variable is necessary to distinguish converts
* being received and processed for the first time now, because once a
* convert is moved to the conversion queue the condition below applies
* requiring fifo granting.
*/
if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
return 1;
/*
* The NOORDER flag is set to avoid the standard vms rules on grant
* order.
*/
if (lkb->lkb_exflags & DLM_LKF_NOORDER)
return 1;
/*
* 6-3: Once in that queue [CONVERTING], a conversion request cannot be
* granted until all other conversion requests ahead of it are granted
* and/or canceled.
*/
if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
return 1;
/*
* 6-4: By default, a new request is immediately granted only if all
* three of the following conditions are satisfied when the request is
* issued:
* - The queue of ungranted conversion requests for the resource is
* empty.
* - The queue of ungranted new requests for the resource is empty.
* - The mode of the new request is compatible with the most
* restrictive mode of all granted locks on the resource.
*/
if (now && !conv && list_empty(&r->res_convertqueue) &&
list_empty(&r->res_waitqueue))
return 1;
/*
* 6-4: Once a lock request is in the queue of ungranted new requests,
* it cannot be granted until the queue of ungranted conversion
* requests is empty, all ungranted new requests ahead of it are
* granted and/or canceled, and it is compatible with the granted mode
* of the most restrictive lock granted on the resource.
*/
if (!now && !conv && list_empty(&r->res_convertqueue) &&
first_in_list(lkb, &r->res_waitqueue))
return 1;
out:
return 0;
}
static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
int *err)
{
int rv;
int8_t alt = 0, rqmode = lkb->lkb_rqmode;
int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
if (err)
*err = 0;
rv = _can_be_granted(r, lkb, now);
if (rv)
goto out;
/*
* The CONVDEADLK flag is non-standard and tells the dlm to resolve
* conversion deadlocks by demoting grmode to NL, otherwise the dlm
* cancels one of the locks.
*/
if (is_convert && can_be_queued(lkb) &&
conversion_deadlock_detect(r, lkb)) {
if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
lkb->lkb_grmode = DLM_LOCK_NL;
lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
} else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
if (err)
*err = -EDEADLK;
else {
log_print("can_be_granted deadlock %x now %d",
lkb->lkb_id, now);
dlm_dump_rsb(r);
}
}
goto out;
}
/*
* The ALTPR and ALTCW flags are non-standard and tell the dlm to try
* to grant a request in a mode other than the normal rqmode. It's a
* simple way to provide a big optimization to applications that can
* use them.
*/
if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
alt = DLM_LOCK_PR;
else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
alt = DLM_LOCK_CW;
if (alt) {
lkb->lkb_rqmode = alt;
rv = _can_be_granted(r, lkb, now);
if (rv)
lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
else
lkb->lkb_rqmode = rqmode;
}
out:
return rv;
}
/* FIXME: I don't think that can_be_granted() can/will demote or find deadlock
for locks pending on the convert list. Once verified (watch for these
log_prints), we should be able to just call _can_be_granted() and not
bother with the demote/deadlk cases here (and there's no easy way to deal
with a deadlk here, we'd have to generate something like grant_lock with
the deadlk error.) */
/* Returns the highest requested mode of all blocked conversions; sets
cw if there's a blocked conversion to DLM_LOCK_CW. */
static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw)
{
struct dlm_lkb *lkb, *s;
int hi, demoted, quit, grant_restart, demote_restart;
int deadlk;
quit = 0;
restart:
grant_restart = 0;
demote_restart = 0;
hi = DLM_LOCK_IV;
list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
demoted = is_demoted(lkb);
deadlk = 0;
if (can_be_granted(r, lkb, 0, &deadlk)) {
grant_lock_pending(r, lkb);
grant_restart = 1;
continue;
}
if (!demoted && is_demoted(lkb)) {
log_print("WARN: pending demoted %x node %d %s",
lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
demote_restart = 1;
continue;
}
if (deadlk) {
log_print("WARN: pending deadlock %x node %d %s",
lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
dlm_dump_rsb(r);
continue;
}
hi = max_t(int, lkb->lkb_rqmode, hi);
if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
*cw = 1;
}
if (grant_restart)
goto restart;
if (demote_restart && !quit) {
quit = 1;
goto restart;
}
return max_t(int, high, hi);
}
static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw)
{
struct dlm_lkb *lkb, *s;
list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
if (can_be_granted(r, lkb, 0, NULL))
grant_lock_pending(r, lkb);
else {
high = max_t(int, lkb->lkb_rqmode, high);
if (lkb->lkb_rqmode == DLM_LOCK_CW)
*cw = 1;
}
}
return high;
}
/* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
on either the convert or waiting queue.
high is the largest rqmode of all locks blocked on the convert or
waiting queue. */
static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
{
if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
if (gr->lkb_highbast < DLM_LOCK_EX)
return 1;
return 0;
}
if (gr->lkb_highbast < high &&
!__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
return 1;
return 0;
}
static void grant_pending_locks(struct dlm_rsb *r)
{
struct dlm_lkb *lkb, *s;
int high = DLM_LOCK_IV;
int cw = 0;
DLM_ASSERT(is_master(r), dlm_dump_rsb(r););
high = grant_pending_convert(r, high, &cw);
high = grant_pending_wait(r, high, &cw);
if (high == DLM_LOCK_IV)
return;
/*
* If there are locks left on the wait/convert queue then send blocking
* ASTs to granted locks based on the largest requested mode (high)
* found above.
*/
list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
if (cw && high == DLM_LOCK_PR &&
lkb->lkb_grmode == DLM_LOCK_PR)
queue_bast(r, lkb, DLM_LOCK_CW);
else
queue_bast(r, lkb, high);
lkb->lkb_highbast = high;
}
}
}
static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
{
if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
(gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
if (gr->lkb_highbast < DLM_LOCK_EX)
return 1;
return 0;
}
if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
return 1;
return 0;
}
static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
struct dlm_lkb *lkb)
{
struct dlm_lkb *gr;
list_for_each_entry(gr, head, lkb_statequeue) {
if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
queue_bast(r, gr, lkb->lkb_rqmode);
gr->lkb_highbast = lkb->lkb_rqmode;
}
}
}
static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
send_bast_queue(r, &r->res_grantqueue, lkb);
}
static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
send_bast_queue(r, &r->res_grantqueue, lkb);
send_bast_queue(r, &r->res_convertqueue, lkb);
}
/* set_master(r, lkb) -- set the master nodeid of a resource
The purpose of this function is to set the nodeid field in the given
lkb using the nodeid field in the given rsb. If the rsb's nodeid is
known, it can just be copied to the lkb and the function will return
0. If the rsb's nodeid is _not_ known, it needs to be looked up
before it can be copied to the lkb.
When the rsb nodeid is being looked up remotely, the initial lkb
causing the lookup is kept on the ls_waiters list waiting for the
lookup reply. Other lkb's waiting for the same rsb lookup are kept
on the rsb's res_lookup list until the master is verified.
Return values:
0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1: the rsb master is not available and the lkb has been placed on
a wait queue
*/
static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
struct dlm_ls *ls = r->res_ls;
int i, error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
r->res_first_lkid = lkb->lkb_id;
lkb->lkb_nodeid = r->res_nodeid;
return 0;
}
if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
return 1;
}
if (r->res_nodeid == 0) {
lkb->lkb_nodeid = 0;
return 0;
}
if (r->res_nodeid > 0) {
lkb->lkb_nodeid = r->res_nodeid;
return 0;
}
DLM_ASSERT(r->res_nodeid == -1, dlm_dump_rsb(r););
dir_nodeid = dlm_dir_nodeid(r);
if (dir_nodeid != our_nodeid) {
r->res_first_lkid = lkb->lkb_id;
send_lookup(r, lkb);
return 1;
}
for (i = 0; i < 2; i++) {
/* It's possible for dlm_scand to remove an old rsb for
this same resource from the toss list, us to create
a new one, look up the master locally, and find it
already exists just before dlm_scand does the
dir_remove() on the previous rsb. */
error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
r->res_length, &ret_nodeid);
if (!error)
break;
log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
schedule();
}
if (error && error != -EEXIST)
return error;
if (ret_nodeid == our_nodeid) {
r->res_first_lkid = 0;
r->res_nodeid = 0;
lkb->lkb_nodeid = 0;
} else {
r->res_first_lkid = lkb->lkb_id;
r->res_nodeid = ret_nodeid;
lkb->lkb_nodeid = ret_nodeid;
}
return 0;
}
static void process_lookup_list(struct dlm_rsb *r)
{
struct dlm_lkb *lkb, *safe;
list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
list_del_init(&lkb->lkb_rsb_lookup);
_request_lock(r, lkb);
schedule();
}
}
/* confirm_master -- confirm (or deny) an rsb's master nodeid */
static void confirm_master(struct dlm_rsb *r, int error)
{
struct dlm_lkb *lkb;
if (!r->res_first_lkid)
return;
switch (error) {
case 0:
case -EINPROGRESS:
r->res_first_lkid = 0;
process_lookup_list(r);
break;
case -EAGAIN:
case -EBADR:
case -ENOTBLK:
/* the remote request failed and won't be retried (it was
a NOQUEUE, or has been canceled/unlocked); make a waiting
lkb the first_lkid */
r->res_first_lkid = 0;
if (!list_empty(&r->res_lookup)) {
lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
lkb_rsb_lookup);
list_del_init(&lkb->lkb_rsb_lookup);
r->res_first_lkid = lkb->lkb_id;
_request_lock(r, lkb);
}
break;
default:
log_error(r->res_ls, "confirm_master unknown error %d", error);
}
}
static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
int namelen, unsigned long timeout_cs,
void (*ast) (void *astparam),
void *astparam,
void (*bast) (void *astparam, int mode),
struct dlm_args *args)
{
int rv = -EINVAL;
/* check for invalid arg usage */
if (mode < 0 || mode > DLM_LOCK_EX)
goto out;
if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
goto out;
if (flags & DLM_LKF_CANCEL)
goto out;
if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
goto out;
if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
goto out;
if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
goto out;
if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
goto out;
if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
goto out;
if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
goto out;
if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
goto out;
if (!ast || !lksb)
goto out;
if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
goto out;
if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
goto out;
/* these args will be copied to the lkb in validate_lock_args,
it cannot be done now because when converting locks, fields in
an active lkb cannot be modified before locking the rsb */
args->flags = flags;
args->astfn = ast;
args->astparam = astparam;
args->bastfn = bast;
args->timeout = timeout_cs;
args->mode = mode;
args->lksb = lksb;
rv = 0;
out:
return rv;
}
static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
{
if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
DLM_LKF_FORCEUNLOCK))
return -EINVAL;
if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
return -EINVAL;
args->flags = flags;
args->astparam = astarg;
return 0;
}
static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_args *args)
{
int rv = -EINVAL;
if (args->flags & DLM_LKF_CONVERT) {
if (lkb->lkb_flags & DLM_IFL_MSTCPY)
goto out;
if (args->flags & DLM_LKF_QUECVT &&
!__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
goto out;
rv = -EBUSY;
if (lkb->lkb_status != DLM_LKSTS_GRANTED)
goto out;
if (lkb->lkb_wait_type)
goto out;
if (is_overlap(lkb))
goto out;
}
lkb->lkb_exflags = args->flags;
lkb->lkb_sbflags = 0;
lkb->lkb_astfn = args->astfn;
lkb->lkb_astparam = args->astparam;
lkb->lkb_bastfn = args->bastfn;
lkb->lkb_rqmode = args->mode;
lkb->lkb_lksb = args->lksb;
lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
lkb->lkb_ownpid = (int) current->pid;
lkb->lkb_timeout_cs = args->timeout;
rv = 0;
out:
if (rv)
log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
lkb->lkb_status, lkb->lkb_wait_type,
lkb->lkb_resource->res_name);
return rv;
}
/* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
for success */
/* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
because there may be a lookup in progress and it's valid to do
cancel/unlockf on it */
static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int rv = -EINVAL;
if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
dlm_print_lkb(lkb);
goto out;
}
/* an lkb may still exist even though the lock is EOL'ed due to a
cancel, unlock or failed noqueue request; an app can't use these
locks; return same error as if the lkid had not been found at all */
if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
rv = -ENOENT;
goto out;
}
/* an lkb may be waiting for an rsb lookup to complete where the
lookup was initiated by another lock */
if (!list_empty(&lkb->lkb_rsb_lookup)) {
if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
list_del_init(&lkb->lkb_rsb_lookup);
queue_cast(lkb->lkb_resource, lkb,
args->flags & DLM_LKF_CANCEL ?
-DLM_ECANCEL : -DLM_EUNLOCK);
unhold_lkb(lkb); /* undoes create_lkb() */
}
/* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
rv = -EBUSY;
goto out;
}
/* cancel not allowed with another cancel/unlock in progress */
if (args->flags & DLM_LKF_CANCEL) {
if (lkb->lkb_exflags & DLM_LKF_CANCEL)
goto out;
if (is_overlap(lkb))
goto out;
/* don't let scand try to do a cancel */
del_timeout(lkb);
if (lkb->lkb_flags & DLM_IFL_RESEND) {
lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
rv = -EBUSY;
goto out;
}
/* there's nothing to cancel */
if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
!lkb->lkb_wait_type) {
rv = -EBUSY;
goto out;
}
switch (lkb->lkb_wait_type) {
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
rv = -EBUSY;
goto out;
case DLM_MSG_UNLOCK:
case DLM_MSG_CANCEL:
goto out;
}
/* add_to_waiters() will set OVERLAP_CANCEL */
goto out_ok;
}
/* do we need to allow a force-unlock if there's a normal unlock
already in progress? in what conditions could the normal unlock
fail such that we'd want to send a force-unlock to be sure? */
if (args->flags & DLM_LKF_FORCEUNLOCK) {
if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
goto out;
if (is_overlap_unlock(lkb))
goto out;
/* don't let scand try to do a cancel */
del_timeout(lkb);
if (lkb->lkb_flags & DLM_IFL_RESEND) {
lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
rv = -EBUSY;
goto out;
}
switch (lkb->lkb_wait_type) {
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
rv = -EBUSY;
goto out;
case DLM_MSG_UNLOCK:
goto out;
}
/* add_to_waiters() will set OVERLAP_UNLOCK */
goto out_ok;
}
/* normal unlock not allowed if there's any op in progress */
rv = -EBUSY;
if (lkb->lkb_wait_type || lkb->lkb_wait_count)
goto out;
out_ok:
/* an overlapping op shouldn't blow away exflags from other op */
lkb->lkb_exflags |= args->flags;
lkb->lkb_sbflags = 0;
lkb->lkb_astparam = args->astparam;
rv = 0;
out:
if (rv)
log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
args->flags, lkb->lkb_wait_type,
lkb->lkb_resource->res_name);
return rv;
}
/*
* Four stage 4 varieties:
* do_request(), do_convert(), do_unlock(), do_cancel()
* These are called on the master node for the given lock and
* from the central locking logic.
*/
static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error = 0;
if (can_be_granted(r, lkb, 1, NULL)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
goto out;
}
if (can_be_queued(lkb)) {
error = -EINPROGRESS;
add_lkb(r, lkb, DLM_LKSTS_WAITING);
add_timeout(lkb);
goto out;
}
error = -EAGAIN;
queue_cast(r, lkb, -EAGAIN);
out:
return error;
}
static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
int error)
{
switch (error) {
case -EAGAIN:
if (force_blocking_asts(lkb))
send_blocking_asts_all(r, lkb);
break;
case -EINPROGRESS:
send_blocking_asts(r, lkb);
break;
}
}
static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error = 0;
int deadlk = 0;
/* changing an existing lock may allow others to be granted */
if (can_be_granted(r, lkb, 1, &deadlk)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
goto out;
}
/* can_be_granted() detected that this lock would block in a conversion
deadlock, so we leave it on the granted queue and return EDEADLK in
the ast for the convert. */
if (deadlk) {
/* it's left on the granted queue */
log_debug(r->res_ls, "deadlock %x node %d sts%d g%d r%d %s",
lkb->lkb_id, lkb->lkb_nodeid, lkb->lkb_status,
lkb->lkb_grmode, lkb->lkb_rqmode, r->res_name);
revert_lock(r, lkb);
queue_cast(r, lkb, -EDEADLK);
error = -EDEADLK;
goto out;
}
/* is_demoted() means the can_be_granted() above set the grmode
to NL, and left us on the granted queue. This auto-demotion
(due to CONVDEADLK) might mean other locks, and/or this lock, are
now grantable. We have to try to grant other converting locks
before we try again to grant this one. */
if (is_demoted(lkb)) {
grant_pending_convert(r, DLM_LOCK_IV, NULL);
if (_can_be_granted(r, lkb, 1)) {
grant_lock(r, lkb);
queue_cast(r, lkb, 0);
goto out;
}
/* else fall through and move to convert queue */
}
if (can_be_queued(lkb)) {
error = -EINPROGRESS;
del_lkb(r, lkb);
add_lkb(r, lkb, DLM_LKSTS_CONVERT);
add_timeout(lkb);
goto out;
}
error = -EAGAIN;
queue_cast(r, lkb, -EAGAIN);
out:
return error;
}
static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
int error)
{
switch (error) {
case 0:
grant_pending_locks(r);
/* grant_pending_locks also sends basts */
break;
case -EAGAIN:
if (force_blocking_asts(lkb))
send_blocking_asts_all(r, lkb);
break;
case -EINPROGRESS:
send_blocking_asts(r, lkb);
break;
}
}
static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
remove_lock(r, lkb);
queue_cast(r, lkb, -DLM_EUNLOCK);
return -DLM_EUNLOCK;
}
static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
int error)
{
grant_pending_locks(r);
}
/* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
error = revert_lock(r, lkb);
if (error) {
queue_cast(r, lkb, -DLM_ECANCEL);
return -DLM_ECANCEL;
}
return 0;
}
static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
int error)
{
if (error)
grant_pending_locks(r);
}
/*
* Four stage 3 varieties:
* _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
*/
/* add a new lkb to a possibly new rsb, called by requesting process */
static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
/* set_master: sets lkb nodeid from r */
error = set_master(r, lkb);
if (error < 0)
goto out;
if (error) {
error = 0;
goto out;
}
if (is_remote(r)) {
/* receive_request() calls do_request() on remote node */
error = send_request(r, lkb);
} else {
error = do_request(r, lkb);
/* for remote locks the request_reply is sent
between do_request and do_request_effects */
do_request_effects(r, lkb, error);
}
out:
return error;
}
/* change some property of an existing lkb, e.g. mode */
static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
if (is_remote(r)) {
/* receive_convert() calls do_convert() on remote node */
error = send_convert(r, lkb);
} else {
error = do_convert(r, lkb);
/* for remote locks the convert_reply is sent
between do_convert and do_convert_effects */
do_convert_effects(r, lkb, error);
}
return error;
}
/* remove an existing lkb from the granted queue */
static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
if (is_remote(r)) {
/* receive_unlock() calls do_unlock() on remote node */
error = send_unlock(r, lkb);
} else {
error = do_unlock(r, lkb);
/* for remote locks the unlock_reply is sent
between do_unlock and do_unlock_effects */
do_unlock_effects(r, lkb, error);
}
return error;
}
/* remove an existing lkb from the convert or wait queue */
static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
if (is_remote(r)) {
/* receive_cancel() calls do_cancel() on remote node */
error = send_cancel(r, lkb);
} else {
error = do_cancel(r, lkb);
/* for remote locks the cancel_reply is sent
between do_cancel and do_cancel_effects */
do_cancel_effects(r, lkb, error);
}
return error;
}
/*
* Four stage 2 varieties:
* request_lock(), convert_lock(), unlock_lock(), cancel_lock()
*/
static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
int len, struct dlm_args *args)
{
struct dlm_rsb *r;
int error;
error = validate_lock_args(ls, lkb, args);
if (error)
goto out;
error = find_rsb(ls, name, len, R_CREATE, &r);
if (error)
goto out;
lock_rsb(r);
attach_lkb(r, lkb);
lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
error = _request_lock(r, lkb);
unlock_rsb(r);
put_rsb(r);
out:
return error;
}
static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_args *args)
{
struct dlm_rsb *r;
int error;
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_lock_args(ls, lkb, args);
if (error)
goto out;
error = _convert_lock(r, lkb);
out:
unlock_rsb(r);
put_rsb(r);
return error;
}
static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_args *args)
{
struct dlm_rsb *r;
int error;
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_unlock_args(lkb, args);
if (error)
goto out;
error = _unlock_lock(r, lkb);
out:
unlock_rsb(r);
put_rsb(r);
return error;
}
static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_args *args)
{
struct dlm_rsb *r;
int error;
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_unlock_args(lkb, args);
if (error)
goto out;
error = _cancel_lock(r, lkb);
out:
unlock_rsb(r);
put_rsb(r);
return error;
}
/*
* Two stage 1 varieties: dlm_lock() and dlm_unlock()
*/
int dlm_lock(dlm_lockspace_t *lockspace,
int mode,
struct dlm_lksb *lksb,
uint32_t flags,
void *name,
unsigned int namelen,
uint32_t parent_lkid,
void (*ast) (void *astarg),
void *astarg,
void (*bast) (void *astarg, int mode))
{
struct dlm_ls *ls;
struct dlm_lkb *lkb;
struct dlm_args args;
int error, convert = flags & DLM_LKF_CONVERT;
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -EINVAL;
dlm_lock_recovery(ls);
if (convert)
error = find_lkb(ls, lksb->sb_lkid, &lkb);
else
error = create_lkb(ls, &lkb);
if (error)
goto out;
error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
astarg, bast, &args);
if (error)
goto out_put;
if (convert)
error = convert_lock(ls, lkb, &args);
else
error = request_lock(ls, lkb, name, namelen, &args);
if (error == -EINPROGRESS)
error = 0;
out_put:
if (convert || error)
__put_lkb(ls, lkb);
if (error == -EAGAIN || error == -EDEADLK)
error = 0;
out:
dlm_unlock_recovery(ls);
dlm_put_lockspace(ls);
return error;
}
int dlm_unlock(dlm_lockspace_t *lockspace,
uint32_t lkid,
uint32_t flags,
struct dlm_lksb *lksb,
void *astarg)
{
struct dlm_ls *ls;
struct dlm_lkb *lkb;
struct dlm_args args;
int error;
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -EINVAL;
dlm_lock_recovery(ls);
error = find_lkb(ls, lkid, &lkb);
if (error)
goto out;
error = set_unlock_args(flags, astarg, &args);
if (error)
goto out_put;
if (flags & DLM_LKF_CANCEL)
error = cancel_lock(ls, lkb, &args);
else
error = unlock_lock(ls, lkb, &args);
if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
error = 0;
if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
error = 0;
out_put:
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
dlm_put_lockspace(ls);
return error;
}
/*
* send/receive routines for remote operations and replies
*
* send_args
* send_common
* send_request receive_request
* send_convert receive_convert
* send_unlock receive_unlock
* send_cancel receive_cancel
* send_grant receive_grant
* send_bast receive_bast
* send_lookup receive_lookup
* send_remove receive_remove
*
* send_common_reply
* receive_request_reply send_request_reply
* receive_convert_reply send_convert_reply
* receive_unlock_reply send_unlock_reply
* receive_cancel_reply send_cancel_reply
* receive_lookup_reply send_lookup_reply
*/
static int _create_message(struct dlm_ls *ls, int mb_len,
int to_nodeid, int mstype,
struct dlm_message **ms_ret,
struct dlm_mhandle **mh_ret)
{
struct dlm_message *ms;
struct dlm_mhandle *mh;
char *mb;
/* get_buffer gives us a message handle (mh) that we need to
pass into lowcomms_commit and a message buffer (mb) that we
write our data into */
mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
if (!mh)
return -ENOBUFS;
memset(mb, 0, mb_len);
ms = (struct dlm_message *) mb;
ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
ms->m_header.h_lockspace = ls->ls_global_id;
ms->m_header.h_nodeid = dlm_our_nodeid();
ms->m_header.h_length = mb_len;
ms->m_header.h_cmd = DLM_MSG;
ms->m_type = mstype;
*mh_ret = mh;
*ms_ret = ms;
return 0;
}
static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
int to_nodeid, int mstype,
struct dlm_message **ms_ret,
struct dlm_mhandle **mh_ret)
{
int mb_len = sizeof(struct dlm_message);
switch (mstype) {
case DLM_MSG_REQUEST:
case DLM_MSG_LOOKUP:
case DLM_MSG_REMOVE:
mb_len += r->res_length;
break;
case DLM_MSG_CONVERT:
case DLM_MSG_UNLOCK:
case DLM_MSG_REQUEST_REPLY:
case DLM_MSG_CONVERT_REPLY:
case DLM_MSG_GRANT:
if (lkb && lkb->lkb_lvbptr)
mb_len += r->res_ls->ls_lvblen;
break;
}
return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
ms_ret, mh_ret);
}
/* further lowcomms enhancements or alternate implementations may make
the return value from this function useful at some point */
static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
{
dlm_message_out(ms);
dlm_lowcomms_commit_buffer(mh);
return 0;
}
static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
struct dlm_message *ms)
{
ms->m_nodeid = lkb->lkb_nodeid;
ms->m_pid = lkb->lkb_ownpid;
ms->m_lkid = lkb->lkb_id;
ms->m_remid = lkb->lkb_remid;
ms->m_exflags = lkb->lkb_exflags;
ms->m_sbflags = lkb->lkb_sbflags;
ms->m_flags = lkb->lkb_flags;
ms->m_lvbseq = lkb->lkb_lvbseq;
ms->m_status = lkb->lkb_status;
ms->m_grmode = lkb->lkb_grmode;
ms->m_rqmode = lkb->lkb_rqmode;
ms->m_hash = r->res_hash;
/* m_result and m_bastmode are set from function args,
not from lkb fields */
if (lkb->lkb_bastfn)
ms->m_asts |= AST_BAST;
if (lkb->lkb_astfn)
ms->m_asts |= AST_COMP;
/* compare with switch in create_message; send_remove() doesn't
use send_args() */
switch (ms->m_type) {
case DLM_MSG_REQUEST:
case DLM_MSG_LOOKUP:
memcpy(ms->m_extra, r->res_name, r->res_length);
break;
case DLM_MSG_CONVERT:
case DLM_MSG_UNLOCK:
case DLM_MSG_REQUEST_REPLY:
case DLM_MSG_CONVERT_REPLY:
case DLM_MSG_GRANT:
if (!lkb->lkb_lvbptr)
break;
memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
break;
}
}
static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
{
struct dlm_message *ms;
struct dlm_mhandle *mh;
int to_nodeid, error;
error = add_to_waiters(lkb, mstype);
if (error)
return error;
to_nodeid = r->res_nodeid;
error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
if (error)
goto fail;
send_args(r, lkb, ms);
error = send_message(mh, ms);
if (error)
goto fail;
return 0;
fail:
remove_from_waiters(lkb, msg_reply_type(mstype));
return error;
}
static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
return send_common(r, lkb, DLM_MSG_REQUEST);
}
static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
int error;
error = send_common(r, lkb, DLM_MSG_CONVERT);
/* down conversions go without a reply from the master */
if (!error && down_conversion(lkb)) {
remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
r->res_ls->ls_stub_ms.m_result = 0;
r->res_ls->ls_stub_ms.m_flags = lkb->lkb_flags;
__receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
}
return error;
}
/* FIXME: if this lkb is the only lock we hold on the rsb, then set
MASTER_UNCERTAIN to force the next request on the rsb to confirm
that the master is still correct. */
static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
return send_common(r, lkb, DLM_MSG_UNLOCK);
}
static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
return send_common(r, lkb, DLM_MSG_CANCEL);
}
static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
struct dlm_message *ms;
struct dlm_mhandle *mh;
int to_nodeid, error;
to_nodeid = lkb->lkb_nodeid;
error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
if (error)
goto out;
send_args(r, lkb, ms);
ms->m_result = 0;
error = send_message(mh, ms);
out:
return error;
}
static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
{
struct dlm_message *ms;
struct dlm_mhandle *mh;
int to_nodeid, error;
to_nodeid = lkb->lkb_nodeid;
error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
if (error)
goto out;
send_args(r, lkb, ms);
ms->m_bastmode = mode;
error = send_message(mh, ms);
out:
return error;
}
static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
{
struct dlm_message *ms;
struct dlm_mhandle *mh;
int to_nodeid, error;
error = add_to_waiters(lkb, DLM_MSG_LOOKUP);
if (error)
return error;
to_nodeid = dlm_dir_nodeid(r);
error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
if (error)
goto fail;
send_args(r, lkb, ms);
error = send_message(mh, ms);
if (error)
goto fail;
return 0;
fail:
remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
return error;
}
static int send_remove(struct dlm_rsb *r)
{
struct dlm_message *ms;
struct dlm_mhandle *mh;
int to_nodeid, error;
to_nodeid = dlm_dir_nodeid(r);
error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
if (error)
goto out;
memcpy(ms->m_extra, r->res_name, r->res_length);
ms->m_hash = r->res_hash;
error = send_message(mh, ms);
out:
return error;
}
static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
int mstype, int rv)
{
struct dlm_message *ms;
struct dlm_mhandle *mh;
int to_nodeid, error;
to_nodeid = lkb->lkb_nodeid;
error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
if (error)
goto out;
send_args(r, lkb, ms);
ms->m_result = rv;
error = send_message(mh, ms);
out:
return error;
}
static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
{
return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
}
static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
{
return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
}
static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
{
return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
}
static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
{
return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
}
static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
int ret_nodeid, int rv)
{
struct dlm_rsb *r = &ls->ls_stub_rsb;
struct dlm_message *ms;
struct dlm_mhandle *mh;
int error, nodeid = ms_in->m_header.h_nodeid;
error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
if (error)
goto out;
ms->m_lkid = ms_in->m_lkid;
ms->m_result = rv;
ms->m_nodeid = ret_nodeid;
error = send_message(mh, ms);
out:
return error;
}
/* which args we save from a received message depends heavily on the type
of message, unlike the send side where we can safely send everything about
the lkb for any type of message */
static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
{
lkb->lkb_exflags = ms->m_exflags;
lkb->lkb_sbflags = ms->m_sbflags;
lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
(ms->m_flags & 0x0000FFFF);
}
static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
{
lkb->lkb_sbflags = ms->m_sbflags;
lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
(ms->m_flags & 0x0000FFFF);
}
static int receive_extralen(struct dlm_message *ms)
{
return (ms->m_header.h_length - sizeof(struct dlm_message));
}
static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_message *ms)
{
int len;
if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
if (!lkb->lkb_lvbptr)
lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
if (!lkb->lkb_lvbptr)
return -ENOMEM;
len = receive_extralen(ms);
if (len > DLM_RESNAME_MAXLEN)
len = DLM_RESNAME_MAXLEN;
memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
}
return 0;
}
static void fake_bastfn(void *astparam, int mode)
{
log_print("fake_bastfn should not be called");
}
static void fake_astfn(void *astparam)
{
log_print("fake_astfn should not be called");
}
static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_message *ms)
{
lkb->lkb_nodeid = ms->m_header.h_nodeid;
lkb->lkb_ownpid = ms->m_pid;
lkb->lkb_remid = ms->m_lkid;
lkb->lkb_grmode = DLM_LOCK_IV;
lkb->lkb_rqmode = ms->m_rqmode;
lkb->lkb_bastfn = (ms->m_asts & AST_BAST) ? &fake_bastfn : NULL;
lkb->lkb_astfn = (ms->m_asts & AST_COMP) ? &fake_astfn : NULL;
if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
/* lkb was just created so there won't be an lvb yet */
lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
if (!lkb->lkb_lvbptr)
return -ENOMEM;
}
return 0;
}
static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_message *ms)
{
if (lkb->lkb_status != DLM_LKSTS_GRANTED)
return -EBUSY;
if (receive_lvb(ls, lkb, ms))
return -ENOMEM;
lkb->lkb_rqmode = ms->m_rqmode;
lkb->lkb_lvbseq = ms->m_lvbseq;
return 0;
}
static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_message *ms)
{
if (receive_lvb(ls, lkb, ms))
return -ENOMEM;
return 0;
}
/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
uses to send a reply and that the remote end uses to process the reply. */
static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb = &ls->ls_stub_lkb;
lkb->lkb_nodeid = ms->m_header.h_nodeid;
lkb->lkb_remid = ms->m_lkid;
}
/* This is called after the rsb is locked so that we can safely inspect
fields in the lkb. */
static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
{
int from = ms->m_header.h_nodeid;
int error = 0;
switch (ms->m_type) {
case DLM_MSG_CONVERT:
case DLM_MSG_UNLOCK:
case DLM_MSG_CANCEL:
if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
error = -EINVAL;
break;
case DLM_MSG_CONVERT_REPLY:
case DLM_MSG_UNLOCK_REPLY:
case DLM_MSG_CANCEL_REPLY:
case DLM_MSG_GRANT:
case DLM_MSG_BAST:
if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
error = -EINVAL;
break;
case DLM_MSG_REQUEST_REPLY:
if (!is_process_copy(lkb))
error = -EINVAL;
else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
error = -EINVAL;
break;
default:
error = -EINVAL;
}
if (error)
log_error(lkb->lkb_resource->res_ls,
"ignore invalid message %d from %d %x %x %x %d",
ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
lkb->lkb_flags, lkb->lkb_nodeid);
return error;
}
static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error, namelen;
error = create_lkb(ls, &lkb);
if (error)
goto fail;
receive_flags(lkb, ms);
lkb->lkb_flags |= DLM_IFL_MSTCPY;
error = receive_request_args(ls, lkb, ms);
if (error) {
__put_lkb(ls, lkb);
goto fail;
}
namelen = receive_extralen(ms);
error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
if (error) {
__put_lkb(ls, lkb);
goto fail;
}
lock_rsb(r);
attach_lkb(r, lkb);
error = do_request(r, lkb);
send_request_reply(r, lkb, error);
do_request_effects(r, lkb, error);
unlock_rsb(r);
put_rsb(r);
if (error == -EINPROGRESS)
error = 0;
if (error)
dlm_put_lkb(lkb);
return;
fail:
setup_stub_lkb(ls, ms);
send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
}
static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error, reply = 1;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error)
goto fail;
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_message(lkb, ms);
if (error)
goto out;
receive_flags(lkb, ms);
error = receive_convert_args(ls, lkb, ms);
if (error) {
send_convert_reply(r, lkb, error);
goto out;
}
reply = !down_conversion(lkb);
error = do_convert(r, lkb);
if (reply)
send_convert_reply(r, lkb, error);
do_convert_effects(r, lkb, error);
out:
unlock_rsb(r);
put_rsb(r);
dlm_put_lkb(lkb);
return;
fail:
setup_stub_lkb(ls, ms);
send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
}
static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error)
goto fail;
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_message(lkb, ms);
if (error)
goto out;
receive_flags(lkb, ms);
error = receive_unlock_args(ls, lkb, ms);
if (error) {
send_unlock_reply(r, lkb, error);
goto out;
}
error = do_unlock(r, lkb);
send_unlock_reply(r, lkb, error);
do_unlock_effects(r, lkb, error);
out:
unlock_rsb(r);
put_rsb(r);
dlm_put_lkb(lkb);
return;
fail:
setup_stub_lkb(ls, ms);
send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
}
static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error)
goto fail;
receive_flags(lkb, ms);
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_message(lkb, ms);
if (error)
goto out;
error = do_cancel(r, lkb);
send_cancel_reply(r, lkb, error);
do_cancel_effects(r, lkb, error);
out:
unlock_rsb(r);
put_rsb(r);
dlm_put_lkb(lkb);
return;
fail:
setup_stub_lkb(ls, ms);
send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
}
static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error) {
log_debug(ls, "receive_grant from %d no lkb %x",
ms->m_header.h_nodeid, ms->m_remid);
return;
}
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_message(lkb, ms);
if (error)
goto out;
receive_flags_reply(lkb, ms);
if (is_altmode(lkb))
munge_altmode(lkb, ms);
grant_lock_pc(r, lkb, ms);
queue_cast(r, lkb, 0);
out:
unlock_rsb(r);
put_rsb(r);
dlm_put_lkb(lkb);
}
static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error) {
log_debug(ls, "receive_bast from %d no lkb %x",
ms->m_header.h_nodeid, ms->m_remid);
return;
}
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_message(lkb, ms);
if (error)
goto out;
queue_bast(r, lkb, ms->m_bastmode);
out:
unlock_rsb(r);
put_rsb(r);
dlm_put_lkb(lkb);
}
static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
{
int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
from_nodeid = ms->m_header.h_nodeid;
our_nodeid = dlm_our_nodeid();
len = receive_extralen(ms);
dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
if (dir_nodeid != our_nodeid) {
log_error(ls, "lookup dir_nodeid %d from %d",
dir_nodeid, from_nodeid);
error = -EINVAL;
ret_nodeid = -1;
goto out;
}
error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
/* Optimization: we're master so treat lookup as a request */
if (!error && ret_nodeid == our_nodeid) {
receive_request(ls, ms);
return;
}
out:
send_lookup_reply(ls, ms, ret_nodeid, error);
}
static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
{
int len, dir_nodeid, from_nodeid;
from_nodeid = ms->m_header.h_nodeid;
len = receive_extralen(ms);
dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
if (dir_nodeid != dlm_our_nodeid()) {
log_error(ls, "remove dir entry dir_nodeid %d from %d",
dir_nodeid, from_nodeid);
return;
}
dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
}
static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
{
do_purge(ls, ms->m_nodeid, ms->m_pid);
}
static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error, mstype, result;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error) {
log_debug(ls, "receive_request_reply from %d no lkb %x",
ms->m_header.h_nodeid, ms->m_remid);
return;
}
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_message(lkb, ms);
if (error)
goto out;
mstype = lkb->lkb_wait_type;
error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
if (error)
goto out;
/* Optimization: the dir node was also the master, so it took our
lookup as a request and sent request reply instead of lookup reply */
if (mstype == DLM_MSG_LOOKUP) {
r->res_nodeid = ms->m_header.h_nodeid;
lkb->lkb_nodeid = r->res_nodeid;
}
/* this is the value returned from do_request() on the master */
result = ms->m_result;
switch (result) {
case -EAGAIN:
/* request would block (be queued) on remote master */
queue_cast(r, lkb, -EAGAIN);
confirm_master(r, -EAGAIN);
unhold_lkb(lkb); /* undoes create_lkb() */
break;
case -EINPROGRESS:
case 0:
/* request was queued or granted on remote master */
receive_flags_reply(lkb, ms);
lkb->lkb_remid = ms->m_lkid;
if (is_altmode(lkb))
munge_altmode(lkb, ms);
if (result) {
add_lkb(r, lkb, DLM_LKSTS_WAITING);
add_timeout(lkb);
} else {
grant_lock_pc(r, lkb, ms);
queue_cast(r, lkb, 0);
}
confirm_master(r, result);
break;
case -EBADR:
case -ENOTBLK:
/* find_rsb failed to find rsb or rsb wasn't master */
log_debug(ls, "receive_request_reply %x %x master diff %d %d",
lkb->lkb_id, lkb->lkb_flags, r->res_nodeid, result);
r->res_nodeid = -1;
lkb->lkb_nodeid = -1;
if (is_overlap(lkb)) {
/* we'll ignore error in cancel/unlock reply */
queue_cast_overlap(r, lkb);
confirm_master(r, result);
unhold_lkb(lkb); /* undoes create_lkb() */
} else
_request_lock(r, lkb);
break;
default:
log_error(ls, "receive_request_reply %x error %d",
lkb->lkb_id, result);
}
if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
log_debug(ls, "receive_request_reply %x result %d unlock",
lkb->lkb_id, result);
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
send_unlock(r, lkb);
} else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
send_cancel(r, lkb);
} else {
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
}
out:
unlock_rsb(r);
put_rsb(r);
dlm_put_lkb(lkb);
}
static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
struct dlm_message *ms)
{
/* this is the value returned from do_convert() on the master */
switch (ms->m_result) {
case -EAGAIN:
/* convert would block (be queued) on remote master */
queue_cast(r, lkb, -EAGAIN);
break;
case -EDEADLK:
receive_flags_reply(lkb, ms);
revert_lock_pc(r, lkb);
queue_cast(r, lkb, -EDEADLK);
break;
case -EINPROGRESS:
/* convert was queued on remote master */
receive_flags_reply(lkb, ms);
if (is_demoted(lkb))
munge_demoted(lkb, ms);
del_lkb(r, lkb);
add_lkb(r, lkb, DLM_LKSTS_CONVERT);
add_timeout(lkb);
break;
case 0:
/* convert was granted on remote master */
receive_flags_reply(lkb, ms);
if (is_demoted(lkb))
munge_demoted(lkb, ms);
grant_lock_pc(r, lkb, ms);
queue_cast(r, lkb, 0);
break;
default:
log_error(r->res_ls, "receive_convert_reply %x error %d",
lkb->lkb_id, ms->m_result);
}
}
static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
{
struct dlm_rsb *r = lkb->lkb_resource;
int error;
hold_rsb(r);
lock_rsb(r);
error = validate_message(lkb, ms);
if (error)
goto out;
/* stub reply can happen with waiters_mutex held */
error = remove_from_waiters_ms(lkb, ms);
if (error)
goto out;
__receive_convert_reply(r, lkb, ms);
out:
unlock_rsb(r);
put_rsb(r);
}
static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
int error;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error) {
log_debug(ls, "receive_convert_reply from %d no lkb %x",
ms->m_header.h_nodeid, ms->m_remid);
return;
}
_receive_convert_reply(lkb, ms);
dlm_put_lkb(lkb);
}
static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
{
struct dlm_rsb *r = lkb->lkb_resource;
int error;
hold_rsb(r);
lock_rsb(r);
error = validate_message(lkb, ms);
if (error)
goto out;
/* stub reply can happen with waiters_mutex held */
error = remove_from_waiters_ms(lkb, ms);
if (error)
goto out;
/* this is the value returned from do_unlock() on the master */
switch (ms->m_result) {
case -DLM_EUNLOCK:
receive_flags_reply(lkb, ms);
remove_lock_pc(r, lkb);
queue_cast(r, lkb, -DLM_EUNLOCK);
break;
case -ENOENT:
break;
default:
log_error(r->res_ls, "receive_unlock_reply %x error %d",
lkb->lkb_id, ms->m_result);
}
out:
unlock_rsb(r);
put_rsb(r);
}
static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
int error;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error) {
log_debug(ls, "receive_unlock_reply from %d no lkb %x",
ms->m_header.h_nodeid, ms->m_remid);
return;
}
_receive_unlock_reply(lkb, ms);
dlm_put_lkb(lkb);
}
static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
{
struct dlm_rsb *r = lkb->lkb_resource;
int error;
hold_rsb(r);
lock_rsb(r);
error = validate_message(lkb, ms);
if (error)
goto out;
/* stub reply can happen with waiters_mutex held */
error = remove_from_waiters_ms(lkb, ms);
if (error)
goto out;
/* this is the value returned from do_cancel() on the master */
switch (ms->m_result) {
case -DLM_ECANCEL:
receive_flags_reply(lkb, ms);
revert_lock_pc(r, lkb);
queue_cast(r, lkb, -DLM_ECANCEL);
break;
case 0:
break;
default:
log_error(r->res_ls, "receive_cancel_reply %x error %d",
lkb->lkb_id, ms->m_result);
}
out:
unlock_rsb(r);
put_rsb(r);
}
static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
int error;
error = find_lkb(ls, ms->m_remid, &lkb);
if (error) {
log_debug(ls, "receive_cancel_reply from %d no lkb %x",
ms->m_header.h_nodeid, ms->m_remid);
return;
}
_receive_cancel_reply(lkb, ms);
dlm_put_lkb(lkb);
}
static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error, ret_nodeid;
error = find_lkb(ls, ms->m_lkid, &lkb);
if (error) {
log_error(ls, "receive_lookup_reply no lkb");
return;
}
/* ms->m_result is the value returned by dlm_dir_lookup on dir node
FIXME: will a non-zero error ever be returned? */
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
if (error)
goto out;
ret_nodeid = ms->m_nodeid;
if (ret_nodeid == dlm_our_nodeid()) {
r->res_nodeid = 0;
ret_nodeid = 0;
r->res_first_lkid = 0;
} else {
/* set_master() will copy res_nodeid to lkb_nodeid */
r->res_nodeid = ret_nodeid;
}
if (is_overlap(lkb)) {
log_debug(ls, "receive_lookup_reply %x unlock %x",
lkb->lkb_id, lkb->lkb_flags);
queue_cast_overlap(r, lkb);
unhold_lkb(lkb); /* undoes create_lkb() */
goto out_list;
}
_request_lock(r, lkb);
out_list:
if (!ret_nodeid)
process_lookup_list(r);
out:
unlock_rsb(r);
put_rsb(r);
dlm_put_lkb(lkb);
}
static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms)
{
if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
log_debug(ls, "ignore non-member message %d from %d %x %x %d",
ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
ms->m_remid, ms->m_result);
return;
}
switch (ms->m_type) {
/* messages sent to a master node */
case DLM_MSG_REQUEST:
receive_request(ls, ms);
break;
case DLM_MSG_CONVERT:
receive_convert(ls, ms);
break;
case DLM_MSG_UNLOCK:
receive_unlock(ls, ms);
break;
case DLM_MSG_CANCEL:
receive_cancel(ls, ms);
break;
/* messages sent from a master node (replies to above) */
case DLM_MSG_REQUEST_REPLY:
receive_request_reply(ls, ms);
break;
case DLM_MSG_CONVERT_REPLY:
receive_convert_reply(ls, ms);
break;
case DLM_MSG_UNLOCK_REPLY:
receive_unlock_reply(ls, ms);
break;
case DLM_MSG_CANCEL_REPLY:
receive_cancel_reply(ls, ms);
break;
/* messages sent from a master node (only two types of async msg) */
case DLM_MSG_GRANT:
receive_grant(ls, ms);
break;
case DLM_MSG_BAST:
receive_bast(ls, ms);
break;
/* messages sent to a dir node */
case DLM_MSG_LOOKUP:
receive_lookup(ls, ms);
break;
case DLM_MSG_REMOVE:
receive_remove(ls, ms);
break;
/* messages sent from a dir node (remove has no reply) */
case DLM_MSG_LOOKUP_REPLY:
receive_lookup_reply(ls, ms);
break;
/* other messages */
case DLM_MSG_PURGE:
receive_purge(ls, ms);
break;
default:
log_error(ls, "unknown message type %d", ms->m_type);
}
dlm_astd_wake();
}
/* If the lockspace is in recovery mode (locking stopped), then normal
messages are saved on the requestqueue for processing after recovery is
done. When not in recovery mode, we wait for dlm_recoverd to drain saved
messages off the requestqueue before we process new ones. This occurs right
after recovery completes when we transition from saving all messages on
requestqueue, to processing all the saved messages, to processing new
messages as they arrive. */
static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
int nodeid)
{
if (dlm_locking_stopped(ls)) {
dlm_add_requestqueue(ls, nodeid, ms);
} else {
dlm_wait_requestqueue(ls);
_receive_message(ls, ms);
}
}
/* This is called by dlm_recoverd to process messages that were saved on
the requestqueue. */
void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms)
{
_receive_message(ls, ms);
}
/* This is called by the midcomms layer when something is received for
the lockspace. It could be either a MSG (normal message sent as part of
standard locking activity) or an RCOM (recovery message sent as part of
lockspace recovery). */
void dlm_receive_buffer(union dlm_packet *p, int nodeid)
{
struct dlm_header *hd = &p->header;
struct dlm_ls *ls;
int type = 0;
switch (hd->h_cmd) {
case DLM_MSG:
dlm_message_in(&p->message);
type = p->message.m_type;
break;
case DLM_RCOM:
dlm_rcom_in(&p->rcom);
type = p->rcom.rc_type;
break;
default:
log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
return;
}
if (hd->h_nodeid != nodeid) {
log_print("invalid h_nodeid %d from %d lockspace %x",
hd->h_nodeid, nodeid, hd->h_lockspace);
return;
}
ls = dlm_find_lockspace_global(hd->h_lockspace);
if (!ls) {
if (dlm_config.ci_log_debug)
log_print("invalid lockspace %x from %d cmd %d type %d",
hd->h_lockspace, nodeid, hd->h_cmd, type);
if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
dlm_send_ls_not_ready(nodeid, &p->rcom);
return;
}
/* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
be inactive (in this ls) before transitioning to recovery mode */
down_read(&ls->ls_recv_active);
if (hd->h_cmd == DLM_MSG)
dlm_receive_message(ls, &p->message, nodeid);
else
dlm_receive_rcom(ls, &p->rcom, nodeid);
up_read(&ls->ls_recv_active);
dlm_put_lockspace(ls);
}
static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
if (middle_conversion(lkb)) {
hold_lkb(lkb);
ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
ls->ls_stub_ms.m_result = -EINPROGRESS;
ls->ls_stub_ms.m_flags = lkb->lkb_flags;
ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid;
_receive_convert_reply(lkb, &ls->ls_stub_ms);
/* Same special case as in receive_rcom_lock_args() */
lkb->lkb_grmode = DLM_LOCK_IV;
rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
unhold_lkb(lkb);
} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
lkb->lkb_flags |= DLM_IFL_RESEND;
}
/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
conversions are async; there's no reply from the remote master */
}
/* A waiting lkb needs recovery if the master node has failed, or
the master node is changing (only when no directory is used) */
static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
if (dlm_is_removed(ls, lkb->lkb_nodeid))
return 1;
if (!dlm_no_directory(ls))
return 0;
if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
return 1;
return 0;
}
/* Recovery for locks that are waiting for replies from nodes that are now
gone. We can just complete unlocks and cancels by faking a reply from the
dead node. Requests and up-conversions we flag to be resent after
recovery. Down-conversions can just be completed with a fake reply like
unlocks. Conversions between PR and CW need special attention. */
void dlm_recover_waiters_pre(struct dlm_ls *ls)
{
struct dlm_lkb *lkb, *safe;
int wait_type, stub_unlock_result, stub_cancel_result;
mutex_lock(&ls->ls_waiters_mutex);
list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags);
/* all outstanding lookups, regardless of destination will be
resent after recovery is done */
if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
lkb->lkb_flags |= DLM_IFL_RESEND;
continue;
}
if (!waiter_needs_recovery(ls, lkb))
continue;
wait_type = lkb->lkb_wait_type;
stub_unlock_result = -DLM_EUNLOCK;
stub_cancel_result = -DLM_ECANCEL;
/* Main reply may have been received leaving a zero wait_type,
but a reply for the overlapping op may not have been
received. In that case we need to fake the appropriate
reply for the overlap op. */
if (!wait_type) {
if (is_overlap_cancel(lkb)) {
wait_type = DLM_MSG_CANCEL;
if (lkb->lkb_grmode == DLM_LOCK_IV)
stub_cancel_result = 0;
}
if (is_overlap_unlock(lkb)) {
wait_type = DLM_MSG_UNLOCK;
if (lkb->lkb_grmode == DLM_LOCK_IV)
stub_unlock_result = -ENOENT;
}
log_debug(ls, "rwpre overlap %x %x %d %d %d",
lkb->lkb_id, lkb->lkb_flags, wait_type,
stub_cancel_result, stub_unlock_result);
}
switch (wait_type) {
case DLM_MSG_REQUEST:
lkb->lkb_flags |= DLM_IFL_RESEND;
break;
case DLM_MSG_CONVERT:
recover_convert_waiter(ls, lkb);
break;
case DLM_MSG_UNLOCK:
hold_lkb(lkb);
ls->ls_stub_ms.m_type = DLM_MSG_UNLOCK_REPLY;
ls->ls_stub_ms.m_result = stub_unlock_result;
ls->ls_stub_ms.m_flags = lkb->lkb_flags;
ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid;
_receive_unlock_reply(lkb, &ls->ls_stub_ms);
dlm_put_lkb(lkb);
break;
case DLM_MSG_CANCEL:
hold_lkb(lkb);
ls->ls_stub_ms.m_type = DLM_MSG_CANCEL_REPLY;
ls->ls_stub_ms.m_result = stub_cancel_result;
ls->ls_stub_ms.m_flags = lkb->lkb_flags;
ls->ls_stub_ms.m_header.h_nodeid = lkb->lkb_nodeid;
_receive_cancel_reply(lkb, &ls->ls_stub_ms);
dlm_put_lkb(lkb);
break;
default:
log_error(ls, "invalid lkb wait_type %d %d",
lkb->lkb_wait_type, wait_type);
}
schedule();
}
mutex_unlock(&ls->ls_waiters_mutex);
}
static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
{
struct dlm_lkb *lkb;
int found = 0;
mutex_lock(&ls->ls_waiters_mutex);
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
if (lkb->lkb_flags & DLM_IFL_RESEND) {
hold_lkb(lkb);
found = 1;
break;
}
}
mutex_unlock(&ls->ls_waiters_mutex);
if (!found)
lkb = NULL;
return lkb;
}
/* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
master or dir-node for r. Processing the lkb may result in it being placed
back on waiters. */
/* We do this after normal locking has been enabled and any saved messages
(in requestqueue) have been processed. We should be confident that at
this point we won't get or process a reply to any of these waiting
operations. But, new ops may be coming in on the rsbs/locks here from
userspace or remotely. */
/* there may have been an overlap unlock/cancel prior to recovery or after
recovery. if before, the lkb may still have a pos wait_count; if after, the
overlap flag would just have been set and nothing new sent. we can be
confident here than any replies to either the initial op or overlap ops
prior to recovery have been received. */
int dlm_recover_waiters_post(struct dlm_ls *ls)
{
struct dlm_lkb *lkb;
struct dlm_rsb *r;
int error = 0, mstype, err, oc, ou;
while (1) {
if (dlm_locking_stopped(ls)) {
log_debug(ls, "recover_waiters_post aborted");
error = -EINTR;
break;
}
lkb = find_resend_waiter(ls);
if (!lkb)
break;
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
mstype = lkb->lkb_wait_type;
oc = is_overlap_cancel(lkb);
ou = is_overlap_unlock(lkb);
err = 0;
log_debug(ls, "recover_waiters_post %x type %d flags %x %s",
lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name);
/* At this point we assume that we won't get a reply to any
previous op or overlap op on this lock. First, do a big
remove_from_waiters() for all previous ops. */
lkb->lkb_flags &= ~DLM_IFL_RESEND;
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
lkb->lkb_wait_type = 0;
lkb->lkb_wait_count = 0;
mutex_lock(&ls->ls_waiters_mutex);
list_del_init(&lkb->lkb_wait_reply);
mutex_unlock(&ls->ls_waiters_mutex);
unhold_lkb(lkb); /* for waiters list */
if (oc || ou) {
/* do an unlock or cancel instead of resending */
switch (mstype) {
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
-DLM_ECANCEL);
unhold_lkb(lkb); /* undoes create_lkb() */
break;
case DLM_MSG_CONVERT:
if (oc) {
queue_cast(r, lkb, -DLM_ECANCEL);
} else {
lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
_unlock_lock(r, lkb);
}
break;
default:
err = 1;
}
} else {
switch (mstype) {
case DLM_MSG_LOOKUP:
case DLM_MSG_REQUEST:
_request_lock(r, lkb);
if (is_master(r))
confirm_master(r, 0);
break;
case DLM_MSG_CONVERT:
_convert_lock(r, lkb);
break;
default:
err = 1;
}
}
if (err)
log_error(ls, "recover_waiters_post %x %d %x %d %d",
lkb->lkb_id, mstype, lkb->lkb_flags, oc, ou);
unlock_rsb(r);
put_rsb(r);
dlm_put_lkb(lkb);
}
return error;
}
static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
{
struct dlm_ls *ls = r->res_ls;
struct dlm_lkb *lkb, *safe;
list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
if (test(ls, lkb)) {
rsb_set_flag(r, RSB_LOCKS_PURGED);
del_lkb(r, lkb);
/* this put should free the lkb */
if (!dlm_put_lkb(lkb))
log_error(ls, "purged lkb not released");
}
}
}
static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
}
static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
return is_master_copy(lkb);
}
static void purge_dead_locks(struct dlm_rsb *r)
{
purge_queue(r, &r->res_grantqueue, &purge_dead_test);
purge_queue(r, &r->res_convertqueue, &purge_dead_test);
purge_queue(r, &r->res_waitqueue, &purge_dead_test);
}
void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
{
purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
}
/* Get rid of locks held by nodes that are gone. */
int dlm_purge_locks(struct dlm_ls *ls)
{
struct dlm_rsb *r;
log_debug(ls, "dlm_purge_locks");
down_write(&ls->ls_root_sem);
list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
hold_rsb(r);
lock_rsb(r);
if (is_master(r))
purge_dead_locks(r);
unlock_rsb(r);
unhold_rsb(r);
schedule();
}
up_write(&ls->ls_root_sem);
return 0;
}
static struct dlm_rsb *find_purged_rsb(struct dlm_ls *ls, int bucket)
{
struct dlm_rsb *r, *r_ret = NULL;
spin_lock(&ls->ls_rsbtbl[bucket].lock);
list_for_each_entry(r, &ls->ls_rsbtbl[bucket].list, res_hashchain) {
if (!rsb_flag(r, RSB_LOCKS_PURGED))
continue;
hold_rsb(r);
rsb_clear_flag(r, RSB_LOCKS_PURGED);
r_ret = r;
break;
}
spin_unlock(&ls->ls_rsbtbl[bucket].lock);
return r_ret;
}
void dlm_grant_after_purge(struct dlm_ls *ls)
{
struct dlm_rsb *r;
int bucket = 0;
while (1) {
r = find_purged_rsb(ls, bucket);
if (!r) {
if (bucket == ls->ls_rsbtbl_size - 1)
break;
bucket++;
continue;
}
lock_rsb(r);
if (is_master(r)) {
grant_pending_locks(r);
confirm_master(r, 0);
}
unlock_rsb(r);
put_rsb(r);
schedule();
}
}
static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
uint32_t remid)
{
struct dlm_lkb *lkb;
list_for_each_entry(lkb, head, lkb_statequeue) {
if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
return lkb;
}
return NULL;
}
static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
uint32_t remid)
{
struct dlm_lkb *lkb;
lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
if (lkb)
return lkb;
lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
if (lkb)
return lkb;
lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
if (lkb)
return lkb;
return NULL;
}
/* needs at least dlm_rcom + rcom_lock */
static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_rsb *r, struct dlm_rcom *rc)
{
struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
lkb->lkb_nodeid = rc->rc_header.h_nodeid;
lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
lkb->lkb_flags |= DLM_IFL_MSTCPY;
lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
lkb->lkb_rqmode = rl->rl_rqmode;
lkb->lkb_grmode = rl->rl_grmode;
/* don't set lkb_status because add_lkb wants to itself */
lkb->lkb_bastfn = (rl->rl_asts & AST_BAST) ? &fake_bastfn : NULL;
lkb->lkb_astfn = (rl->rl_asts & AST_COMP) ? &fake_astfn : NULL;
if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
sizeof(struct rcom_lock);
if (lvblen > ls->ls_lvblen)
return -EINVAL;
lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
if (!lkb->lkb_lvbptr)
return -ENOMEM;
memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
}
/* Conversions between PR and CW (middle modes) need special handling.
The real granted mode of these converting locks cannot be determined
until all locks have been rebuilt on the rsb (recover_conversion) */
if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
middle_conversion(lkb)) {
rl->rl_status = DLM_LKSTS_CONVERT;
lkb->lkb_grmode = DLM_LOCK_IV;
rsb_set_flag(r, RSB_RECOVER_CONVERT);
}
return 0;
}
/* This lkb may have been recovered in a previous aborted recovery so we need
to check if the rsb already has an lkb with the given remote nodeid/lkid.
If so we just send back a standard reply. If not, we create a new lkb with
the given values and send back our lkid. We send back our lkid by sending
back the rcom_lock struct we got but with the remid field filled in. */
/* needs at least dlm_rcom + rcom_lock */
int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
{
struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
struct dlm_rsb *r;
struct dlm_lkb *lkb;
int error;
if (rl->rl_parent_lkid) {
error = -EOPNOTSUPP;
goto out;
}
error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
R_MASTER, &r);
if (error)
goto out;
lock_rsb(r);
lkb = search_remid(r, rc->rc_header.h_nodeid, le32_to_cpu(rl->rl_lkid));
if (lkb) {
error = -EEXIST;
goto out_remid;
}
error = create_lkb(ls, &lkb);
if (error)
goto out_unlock;
error = receive_rcom_lock_args(ls, lkb, r, rc);
if (error) {
__put_lkb(ls, lkb);
goto out_unlock;
}
attach_lkb(r, lkb);
add_lkb(r, lkb, rl->rl_status);
error = 0;
out_remid:
/* this is the new value returned to the lock holder for
saving in its process-copy lkb */
rl->rl_remid = cpu_to_le32(lkb->lkb_id);
out_unlock:
unlock_rsb(r);
put_rsb(r);
out:
if (error)
log_debug(ls, "recover_master_copy %d %x", error,
le32_to_cpu(rl->rl_lkid));
rl->rl_result = cpu_to_le32(error);
return error;
}
/* needs at least dlm_rcom + rcom_lock */
int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
{
struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
struct dlm_rsb *r;
struct dlm_lkb *lkb;
int error;
error = find_lkb(ls, le32_to_cpu(rl->rl_lkid), &lkb);
if (error) {
log_error(ls, "recover_process_copy no lkid %x",
le32_to_cpu(rl->rl_lkid));
return error;
}
DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
error = le32_to_cpu(rl->rl_result);
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
switch (error) {
case -EBADR:
/* There's a chance the new master received our lock before
dlm_recover_master_reply(), this wouldn't happen if we did
a barrier between recover_masters and recover_locks. */
log_debug(ls, "master copy not ready %x r %lx %s", lkb->lkb_id,
(unsigned long)r, r->res_name);
dlm_send_rcom_lock(r, lkb);
goto out;
case -EEXIST:
log_debug(ls, "master copy exists %x", lkb->lkb_id);
/* fall through */
case 0:
lkb->lkb_remid = le32_to_cpu(rl->rl_remid);
break;
default:
log_error(ls, "dlm_recover_process_copy unknown error %d %x",
error, lkb->lkb_id);
}
/* an ack for dlm_recover_locks() which waits for replies from
all the locks it sends to new masters */
dlm_recovered_lock(r);
out:
unlock_rsb(r);
put_rsb(r);
dlm_put_lkb(lkb);
return 0;
}
int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
int mode, uint32_t flags, void *name, unsigned int namelen,
unsigned long timeout_cs)
{
struct dlm_lkb *lkb;
struct dlm_args args;
int error;
dlm_lock_recovery(ls);
error = create_lkb(ls, &lkb);
if (error) {
kfree(ua);
goto out;
}
if (flags & DLM_LKF_VALBLK) {
ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
if (!ua->lksb.sb_lvbptr) {
kfree(ua);
__put_lkb(ls, lkb);
error = -ENOMEM;
goto out;
}
}
/* After ua is attached to lkb it will be freed by dlm_free_lkb().
When DLM_IFL_USER is set, the dlm knows that this is a userspace
lock and that lkb_astparam is the dlm_user_args structure. */
error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
fake_astfn, ua, fake_bastfn, &args);
lkb->lkb_flags |= DLM_IFL_USER;
ua->old_mode = DLM_LOCK_IV;
if (error) {
__put_lkb(ls, lkb);
goto out;
}
error = request_lock(ls, lkb, name, namelen, &args);
switch (error) {
case 0:
break;
case -EINPROGRESS:
error = 0;
break;
case -EAGAIN:
error = 0;
/* fall through */
default:
__put_lkb(ls, lkb);
goto out;
}
/* add this new lkb to the per-process list of locks */
spin_lock(&ua->proc->locks_spin);
hold_lkb(lkb);
list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
spin_unlock(&ua->proc->locks_spin);
out:
dlm_unlock_recovery(ls);
return error;
}
int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
unsigned long timeout_cs)
{
struct dlm_lkb *lkb;
struct dlm_args args;
struct dlm_user_args *ua;
int error;
dlm_lock_recovery(ls);
error = find_lkb(ls, lkid, &lkb);
if (error)
goto out;
/* user can change the params on its lock when it converts it, or
add an lvb that didn't exist before */
ua = lkb->lkb_ua;
if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
if (!ua->lksb.sb_lvbptr) {
error = -ENOMEM;
goto out_put;
}
}
if (lvb_in && ua->lksb.sb_lvbptr)
memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
ua->xid = ua_tmp->xid;
ua->castparam = ua_tmp->castparam;
ua->castaddr = ua_tmp->castaddr;
ua->bastparam = ua_tmp->bastparam;
ua->bastaddr = ua_tmp->bastaddr;
ua->user_lksb = ua_tmp->user_lksb;
ua->old_mode = lkb->lkb_grmode;
error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
fake_astfn, ua, fake_bastfn, &args);
if (error)
goto out_put;
error = convert_lock(ls, lkb, &args);
if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
error = 0;
out_put:
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
kfree(ua_tmp);
return error;
}
int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
uint32_t flags, uint32_t lkid, char *lvb_in)
{
struct dlm_lkb *lkb;
struct dlm_args args;
struct dlm_user_args *ua;
int error;
dlm_lock_recovery(ls);
error = find_lkb(ls, lkid, &lkb);
if (error)
goto out;
ua = lkb->lkb_ua;
if (lvb_in && ua->lksb.sb_lvbptr)
memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
if (ua_tmp->castparam)
ua->castparam = ua_tmp->castparam;
ua->user_lksb = ua_tmp->user_lksb;
error = set_unlock_args(flags, ua, &args);
if (error)
goto out_put;
error = unlock_lock(ls, lkb, &args);
if (error == -DLM_EUNLOCK)
error = 0;
/* from validate_unlock_args() */
if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
error = 0;
if (error)
goto out_put;
spin_lock(&ua->proc->locks_spin);
/* dlm_user_add_ast() may have already taken lkb off the proc list */
if (!list_empty(&lkb->lkb_ownqueue))
list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
spin_unlock(&ua->proc->locks_spin);
out_put:
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
kfree(ua_tmp);
return error;
}
int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
uint32_t flags, uint32_t lkid)
{
struct dlm_lkb *lkb;
struct dlm_args args;
struct dlm_user_args *ua;
int error;
dlm_lock_recovery(ls);
error = find_lkb(ls, lkid, &lkb);
if (error)
goto out;
ua = lkb->lkb_ua;
if (ua_tmp->castparam)
ua->castparam = ua_tmp->castparam;
ua->user_lksb = ua_tmp->user_lksb;
error = set_unlock_args(flags, ua, &args);
if (error)
goto out_put;
error = cancel_lock(ls, lkb, &args);
if (error == -DLM_ECANCEL)
error = 0;
/* from validate_unlock_args() */
if (error == -EBUSY)
error = 0;
out_put:
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
kfree(ua_tmp);
return error;
}
int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
{
struct dlm_lkb *lkb;
struct dlm_args args;
struct dlm_user_args *ua;
struct dlm_rsb *r;
int error;
dlm_lock_recovery(ls);
error = find_lkb(ls, lkid, &lkb);
if (error)
goto out;
ua = lkb->lkb_ua;
error = set_unlock_args(flags, ua, &args);
if (error)
goto out_put;
/* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
r = lkb->lkb_resource;
hold_rsb(r);
lock_rsb(r);
error = validate_unlock_args(lkb, &args);
if (error)
goto out_r;
lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
error = _cancel_lock(r, lkb);
out_r:
unlock_rsb(r);
put_rsb(r);
if (error == -DLM_ECANCEL)
error = 0;
/* from validate_unlock_args() */
if (error == -EBUSY)
error = 0;
out_put:
dlm_put_lkb(lkb);
out:
dlm_unlock_recovery(ls);
return error;
}
/* lkb's that are removed from the waiters list by revert are just left on the
orphans list with the granted orphan locks, to be freed by purge */
static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
struct dlm_args args;
int error;
hold_lkb(lkb);
mutex_lock(&ls->ls_orphans_mutex);
list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
mutex_unlock(&ls->ls_orphans_mutex);
set_unlock_args(0, lkb->lkb_ua, &args);
error = cancel_lock(ls, lkb, &args);
if (error == -DLM_ECANCEL)
error = 0;
return error;
}
/* The force flag allows the unlock to go ahead even if the lkb isn't granted.
Regardless of what rsb queue the lock is on, it's removed and freed. */
static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
struct dlm_args args;
int error;
set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args);
error = unlock_lock(ls, lkb, &args);
if (error == -DLM_EUNLOCK)
error = 0;
return error;
}
/* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
(which does lock_rsb) due to deadlock with receiving a message that does
lock_rsb followed by dlm_user_add_ast() */
static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
struct dlm_user_proc *proc)
{
struct dlm_lkb *lkb = NULL;
mutex_lock(&ls->ls_clear_proc_locks);
if (list_empty(&proc->locks))
goto out;
lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
list_del_init(&lkb->lkb_ownqueue);
if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
lkb->lkb_flags |= DLM_IFL_ORPHAN;
else
lkb->lkb_flags |= DLM_IFL_DEAD;
out:
mutex_unlock(&ls->ls_clear_proc_locks);
return lkb;
}
/* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which
1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
which we clear here. */
/* proc CLOSING flag is set so no more device_reads should look at proc->asts
list, and no more device_writes should add lkb's to proc->locks list; so we
shouldn't need to take asts_spin or locks_spin here. this assumes that
device reads/writes/closes are serialized -- FIXME: we may need to serialize
them ourself. */
void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
{
struct dlm_lkb *lkb, *safe;
dlm_lock_recovery(ls);
while (1) {
lkb = del_proc_lock(ls, proc);
if (!lkb)
break;
del_timeout(lkb);
if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
orphan_proc_lock(ls, lkb);
else
unlock_proc_lock(ls, lkb);
/* this removes the reference for the proc->locks list
added by dlm_user_request, it may result in the lkb
being freed */
dlm_put_lkb(lkb);
}
mutex_lock(&ls->ls_clear_proc_locks);
/* in-progress unlocks */
list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
list_del_init(&lkb->lkb_ownqueue);
lkb->lkb_flags |= DLM_IFL_DEAD;
dlm_put_lkb(lkb);
}
list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
lkb->lkb_ast_type = 0;
list_del(&lkb->lkb_astqueue);
dlm_put_lkb(lkb);
}
mutex_unlock(&ls->ls_clear_proc_locks);
dlm_unlock_recovery(ls);
}
static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
{
struct dlm_lkb *lkb, *safe;
while (1) {
lkb = NULL;
spin_lock(&proc->locks_spin);
if (!list_empty(&proc->locks)) {
lkb = list_entry(proc->locks.next, struct dlm_lkb,
lkb_ownqueue);
list_del_init(&lkb->lkb_ownqueue);
}
spin_unlock(&proc->locks_spin);
if (!lkb)
break;
lkb->lkb_flags |= DLM_IFL_DEAD;
unlock_proc_lock(ls, lkb);
dlm_put_lkb(lkb); /* ref from proc->locks list */
}
spin_lock(&proc->locks_spin);
list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
list_del_init(&lkb->lkb_ownqueue);
lkb->lkb_flags |= DLM_IFL_DEAD;
dlm_put_lkb(lkb);
}
spin_unlock(&proc->locks_spin);
spin_lock(&proc->asts_spin);
list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
list_del(&lkb->lkb_astqueue);
dlm_put_lkb(lkb);
}
spin_unlock(&proc->asts_spin);
}
/* pid of 0 means purge all orphans */
static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
{
struct dlm_lkb *lkb, *safe;
mutex_lock(&ls->ls_orphans_mutex);
list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
if (pid && lkb->lkb_ownpid != pid)
continue;
unlock_proc_lock(ls, lkb);
list_del_init(&lkb->lkb_ownqueue);
dlm_put_lkb(lkb);
}
mutex_unlock(&ls->ls_orphans_mutex);
}
static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
{
struct dlm_message *ms;
struct dlm_mhandle *mh;
int error;
error = _create_message(ls, sizeof(struct dlm_message), nodeid,
DLM_MSG_PURGE, &ms, &mh);
if (error)
return error;
ms->m_nodeid = nodeid;
ms->m_pid = pid;
return send_message(mh, ms);
}
int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
int nodeid, int pid)
{
int error = 0;
if (nodeid != dlm_our_nodeid()) {
error = send_purge(ls, nodeid, pid);
} else {
dlm_lock_recovery(ls);
if (pid == current->pid)
purge_proc_locks(ls, proc);
else
do_purge(ls, nodeid, pid);
dlm_unlock_recovery(ls);
}
return error;
}
| gpl-2.0 |
ISTweak/android_kernel_pantech_is11pt | samples/kprobes/kprobe_example.c | 1615 | 2469 | /*
* NOTE: This example is works on x86 and powerpc.
* Here's a sample kernel module showing the use of kprobes to dump a
* stack trace and selected registers when do_fork() is called.
*
* For more information on theory of operation of kprobes, see
* Documentation/kprobes.txt
*
* You will see the trace data in /var/log/messages and on the console
* whenever do_fork() is invoked to create a new process.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kprobes.h>
/* For each probe you need to allocate a kprobe structure */
static struct kprobe kp = {
.symbol_name = "do_fork",
};
/* kprobe pre_handler: called just before the probed instruction is executed */
static int handler_pre(struct kprobe *p, struct pt_regs *regs)
{
#ifdef CONFIG_X86
printk(KERN_INFO "pre_handler: p->addr = 0x%p, ip = %lx,"
" flags = 0x%lx\n",
p->addr, regs->ip, regs->flags);
#endif
#ifdef CONFIG_PPC
printk(KERN_INFO "pre_handler: p->addr = 0x%p, nip = 0x%lx,"
" msr = 0x%lx\n",
p->addr, regs->nip, regs->msr);
#endif
/* A dump_stack() here will give a stack backtrace */
return 0;
}
/* kprobe post_handler: called after the probed instruction is executed */
static void handler_post(struct kprobe *p, struct pt_regs *regs,
unsigned long flags)
{
#ifdef CONFIG_X86
printk(KERN_INFO "post_handler: p->addr = 0x%p, flags = 0x%lx\n",
p->addr, regs->flags);
#endif
#ifdef CONFIG_PPC
printk(KERN_INFO "post_handler: p->addr = 0x%p, msr = 0x%lx\n",
p->addr, regs->msr);
#endif
}
/*
* fault_handler: this is called if an exception is generated for any
* instruction within the pre- or post-handler, or when Kprobes
* single-steps the probed instruction.
*/
static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr)
{
printk(KERN_INFO "fault_handler: p->addr = 0x%p, trap #%dn",
p->addr, trapnr);
/* Return 0 because we don't handle the fault. */
return 0;
}
static int __init kprobe_init(void)
{
int ret;
kp.pre_handler = handler_pre;
kp.post_handler = handler_post;
kp.fault_handler = handler_fault;
ret = register_kprobe(&kp);
if (ret < 0) {
printk(KERN_INFO "register_kprobe failed, returned %d\n", ret);
return ret;
}
printk(KERN_INFO "Planted kprobe at %p\n", kp.addr);
return 0;
}
static void __exit kprobe_exit(void)
{
unregister_kprobe(&kp);
printk(KERN_INFO "kprobe at %p unregistered\n", kp.addr);
}
module_init(kprobe_init)
module_exit(kprobe_exit)
MODULE_LICENSE("GPL");
| gpl-2.0 |
AODP/android_kernel_asus_moorefield | arch/arm/mach-s5pc100/setup-sdhci-gpio.c | 2639 | 2140 | /* linux/arch/arm/plat-s5pc100/setup-sdhci-gpio.c
*
* Copyright 2009 Samsung Eletronics
*
* S5PC100 - Helper functions for setting up SDHCI device(s) GPIO (HSMMC)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <plat/gpio-cfg.h>
#include <plat/sdhci.h>
void s5pc100_setup_sdhci0_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
unsigned int num;
num = width;
/* In case of 8 width, we should decrease the 2 */
if (width == 8)
num = width - 2;
/* Set all the necessary GPG0/GPG1 pins to special-function 0 */
s3c_gpio_cfgrange_nopull(S5PC100_GPG0(0), 2 + num, S3C_GPIO_SFN(2));
if (width == 8)
s3c_gpio_cfgrange_nopull(S5PC100_GPG1(0), 2, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S5PC100_GPG1(2), S3C_GPIO_PULL_UP);
s3c_gpio_cfgpin(S5PC100_GPG1(2), S3C_GPIO_SFN(2));
}
}
void s5pc100_setup_sdhci1_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
/* Set all the necessary GPG2 pins to special-function 2 */
s3c_gpio_cfgrange_nopull(S5PC100_GPG2(0), 2 + width, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S5PC100_GPG2(6), S3C_GPIO_PULL_UP);
s3c_gpio_cfgpin(S5PC100_GPG2(6), S3C_GPIO_SFN(2));
}
}
void s5pc100_setup_sdhci2_cfg_gpio(struct platform_device *dev, int width)
{
struct s3c_sdhci_platdata *pdata = dev->dev.platform_data;
/* Set all the necessary GPG3 pins to special-function 2 */
s3c_gpio_cfgrange_nopull(S5PC100_GPG3(0), 2 + width, S3C_GPIO_SFN(2));
if (pdata->cd_type == S3C_SDHCI_CD_INTERNAL) {
s3c_gpio_setpull(S5PC100_GPG3(6), S3C_GPIO_PULL_UP);
s3c_gpio_cfgpin(S5PC100_GPG3(6), S3C_GPIO_SFN(2));
}
}
| gpl-2.0 |
weritos666/kernel_L7_II_KK_P715 | drivers/pinctrl/pinconf-generic.c | 3151 | 3559 | /*
* Core driver for the generic pin config portions of the pin control subsystem
*
* Copyright (C) 2011 ST-Ericsson SA
* Written on behalf of Linaro for ST-Ericsson
*
* Author: Linus Walleij <linus.walleij@linaro.org>
*
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) "generic pinconfig core: " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include "core.h"
#include "pinconf.h"
#ifdef CONFIG_DEBUG_FS
struct pin_config_item {
const enum pin_config_param param;
const char * const display;
const char * const format;
};
#define PCONFDUMP(a, b, c) { .param = a, .display = b, .format = c }
struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_BIAS_DISABLE, "input bias disabled", NULL),
PCONFDUMP(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, "input bias high impedance", NULL),
PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL),
PCONFDUMP(PIN_CONFIG_BIAS_PULL_DOWN, "input bias pull down", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL),
PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL),
PCONFDUMP(PIN_CONFIG_INPUT_SCHMITT, "input schmitt trigger", NULL),
PCONFDUMP(PIN_CONFIG_INPUT_DEBOUNCE, "input debounce", "time units"),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector"),
PCONFDUMP(PIN_CONFIG_LOW_POWER_MODE, "pin low power", "mode"),
};
void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
int i;
if (!ops->is_generic)
return;
for(i = 0; i < ARRAY_SIZE(conf_items); i++) {
unsigned long config;
int ret;
/* We want to check out this parameter */
config = pinconf_to_config_packed(conf_items[i].param, 0);
ret = pin_config_get_for_pin(pctldev, pin, &config);
/* These are legal errors */
if (ret == -EINVAL || ret == -ENOTSUPP)
continue;
if (ret) {
seq_printf(s, "ERROR READING CONFIG SETTING %d ", i);
continue;
}
/* Space between multiple configs */
seq_puts(s, " ");
seq_puts(s, conf_items[i].display);
/* Print unit if available */
if (conf_items[i].format &&
pinconf_to_config_argument(config) != 0)
seq_printf(s, " (%u %s)",
pinconf_to_config_argument(config),
conf_items[i].format);
}
}
void pinconf_generic_dump_group(struct pinctrl_dev *pctldev,
struct seq_file *s, const char *gname)
{
const struct pinconf_ops *ops = pctldev->desc->confops;
int i;
if (!ops->is_generic)
return;
for(i = 0; i < ARRAY_SIZE(conf_items); i++) {
unsigned long config;
int ret;
/* We want to check out this parameter */
config = pinconf_to_config_packed(conf_items[i].param, 0);
ret = pin_config_group_get(dev_name(pctldev->dev), gname,
&config);
/* These are legal errors */
if (ret == -EINVAL || ret == -ENOTSUPP)
continue;
if (ret) {
seq_printf(s, "ERROR READING CONFIG SETTING %d ", i);
continue;
}
/* Space between multiple configs */
seq_puts(s, " ");
seq_puts(s, conf_items[i].display);
/* Print unit if available */
if (conf_items[i].format && config != 0)
seq_printf(s, " (%u %s)",
pinconf_to_config_argument(config),
conf_items[i].format);
}
}
#endif
| gpl-2.0 |
motley-git/TF201-Kernel-Lite | net/mac80211/rc80211_minstrel_debugfs.c | 3151 | 4769 | /*
* Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Based on minstrel.c:
* Copyright (C) 2005-2007 Derek Smithies <derek@indranet.co.nz>
* Sponsored by Indranet Technologies Ltd
*
* Based on sample.c:
* Copyright (c) 2005 John Bicket
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <net/mac80211.h>
#include "rc80211_minstrel.h"
int
minstrel_stats_open(struct inode *inode, struct file *file)
{
struct minstrel_sta_info *mi = inode->i_private;
struct minstrel_debugfs_info *ms;
unsigned int i, tp, prob, eprob;
char *p;
ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL);
if (!ms)
return -ENOMEM;
file->private_data = ms;
p = ms->buf;
p += sprintf(p, "rate throughput ewma prob this prob "
"this succ/attempt success attempts\n");
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
*(p++) = (i == mi->max_tp_rate) ? 'T' : ' ';
*(p++) = (i == mi->max_tp_rate2) ? 't' : ' ';
*(p++) = (i == mi->max_prob_rate) ? 'P' : ' ';
p += sprintf(p, "%3u%s", mr->bitrate / 2,
(mr->bitrate & 1 ? ".5" : " "));
tp = mr->cur_tp / ((18000 << 10) / 96);
prob = mr->cur_prob / 18;
eprob = mr->probability / 18;
p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
"%3u(%3u) %8llu %8llu\n",
tp / 10, tp % 10,
eprob / 10, eprob % 10,
prob / 10, prob % 10,
mr->last_success,
mr->last_attempts,
(unsigned long long)mr->succ_hist,
(unsigned long long)mr->att_hist);
}
p += sprintf(p, "\nTotal packet count:: ideal %d "
"lookaround %d\n\n",
mi->packet_count - mi->sample_count,
mi->sample_count);
ms->len = p - ms->buf;
return 0;
}
ssize_t
minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
{
struct minstrel_debugfs_info *ms;
ms = file->private_data;
return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
}
int
minstrel_stats_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static const struct file_operations minstrel_stat_fops = {
.owner = THIS_MODULE,
.open = minstrel_stats_open,
.read = minstrel_stats_read,
.release = minstrel_stats_release,
.llseek = default_llseek,
};
void
minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
{
struct minstrel_sta_info *mi = priv_sta;
mi->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, mi,
&minstrel_stat_fops);
}
void
minstrel_remove_sta_debugfs(void *priv, void *priv_sta)
{
struct minstrel_sta_info *mi = priv_sta;
debugfs_remove(mi->dbg_stats);
}
| gpl-2.0 |
wanam/Adam-Kernel-GS4-LTE | drivers/leds/leds-regulator.c | 4943 | 5254 | /*
* leds-regulator.c - LED class driver for regulator driven LEDs.
*
* Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
*
* Inspired by leds-wm8350 driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/leds.h>
#include <linux/leds-regulator.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#define to_regulator_led(led_cdev) \
container_of(led_cdev, struct regulator_led, cdev)
struct regulator_led {
struct led_classdev cdev;
enum led_brightness value;
int enabled;
struct mutex mutex;
struct work_struct work;
struct regulator *vcc;
};
static inline int led_regulator_get_max_brightness(struct regulator *supply)
{
int ret;
int voltage = regulator_list_voltage(supply, 0);
if (voltage <= 0)
return 1;
/* even if regulator can't change voltages,
* we still assume it can change status
* and the LED can be turned on and off.
*/
ret = regulator_set_voltage(supply, voltage, voltage);
if (ret < 0)
return 1;
return regulator_count_voltages(supply);
}
static int led_regulator_get_voltage(struct regulator *supply,
enum led_brightness brightness)
{
if (brightness == 0)
return -EINVAL;
return regulator_list_voltage(supply, brightness - 1);
}
static void regulator_led_enable(struct regulator_led *led)
{
int ret;
if (led->enabled)
return;
ret = regulator_enable(led->vcc);
if (ret != 0) {
dev_err(led->cdev.dev, "Failed to enable vcc: %d\n", ret);
return;
}
led->enabled = 1;
}
static void regulator_led_disable(struct regulator_led *led)
{
int ret;
if (!led->enabled)
return;
ret = regulator_disable(led->vcc);
if (ret != 0) {
dev_err(led->cdev.dev, "Failed to disable vcc: %d\n", ret);
return;
}
led->enabled = 0;
}
static void regulator_led_set_value(struct regulator_led *led)
{
int voltage;
int ret;
mutex_lock(&led->mutex);
if (led->value == LED_OFF) {
regulator_led_disable(led);
goto out;
}
if (led->cdev.max_brightness > 1) {
voltage = led_regulator_get_voltage(led->vcc, led->value);
dev_dbg(led->cdev.dev, "brightness: %d voltage: %d\n",
led->value, voltage);
ret = regulator_set_voltage(led->vcc, voltage, voltage);
if (ret != 0)
dev_err(led->cdev.dev, "Failed to set voltage %d: %d\n",
voltage, ret);
}
regulator_led_enable(led);
out:
mutex_unlock(&led->mutex);
}
static void led_work(struct work_struct *work)
{
struct regulator_led *led;
led = container_of(work, struct regulator_led, work);
regulator_led_set_value(led);
}
static void regulator_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct regulator_led *led = to_regulator_led(led_cdev);
led->value = value;
schedule_work(&led->work);
}
static int __devinit regulator_led_probe(struct platform_device *pdev)
{
struct led_regulator_platform_data *pdata = pdev->dev.platform_data;
struct regulator_led *led;
struct regulator *vcc;
int ret = 0;
if (pdata == NULL) {
dev_err(&pdev->dev, "no platform data\n");
return -ENODEV;
}
vcc = regulator_get_exclusive(&pdev->dev, "vled");
if (IS_ERR(vcc)) {
dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
return PTR_ERR(vcc);
}
led = kzalloc(sizeof(*led), GFP_KERNEL);
if (led == NULL) {
ret = -ENOMEM;
goto err_vcc;
}
led->cdev.max_brightness = led_regulator_get_max_brightness(vcc);
if (pdata->brightness > led->cdev.max_brightness) {
dev_err(&pdev->dev, "Invalid default brightness %d\n",
pdata->brightness);
ret = -EINVAL;
goto err_led;
}
led->value = pdata->brightness;
led->cdev.brightness_set = regulator_led_brightness_set;
led->cdev.name = pdata->name;
led->cdev.flags |= LED_CORE_SUSPENDRESUME;
led->vcc = vcc;
/* to handle correctly an already enabled regulator */
if (regulator_is_enabled(led->vcc))
led->enabled = 1;
mutex_init(&led->mutex);
INIT_WORK(&led->work, led_work);
platform_set_drvdata(pdev, led);
ret = led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0) {
cancel_work_sync(&led->work);
goto err_led;
}
/* to expose the default value to userspace */
led->cdev.brightness = led->value;
/* Set the default led status */
regulator_led_set_value(led);
return 0;
err_led:
kfree(led);
err_vcc:
regulator_put(vcc);
return ret;
}
static int __devexit regulator_led_remove(struct platform_device *pdev)
{
struct regulator_led *led = platform_get_drvdata(pdev);
led_classdev_unregister(&led->cdev);
cancel_work_sync(&led->work);
regulator_led_disable(led);
regulator_put(led->vcc);
kfree(led);
return 0;
}
static struct platform_driver regulator_led_driver = {
.driver = {
.name = "leds-regulator",
.owner = THIS_MODULE,
},
.probe = regulator_led_probe,
.remove = __devexit_p(regulator_led_remove),
};
module_platform_driver(regulator_led_driver);
MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("Regulator driven LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:leds-regulator");
| gpl-2.0 |
boa19861105/B2-Test | drivers/scsi/bfa/bfa_ioc_ct.c | 4943 | 25739 | /*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include "bfad_drv.h"
#include "bfa_ioc.h"
#include "bfi_reg.h"
#include "bfa_defs.h"
BFA_TRC_FILE(CNA, IOC_CT);
#define bfa_ioc_ct_sync_pos(__ioc) \
((uint32_t) (1 << bfa_ioc_pcifn(__ioc)))
#define BFA_IOC_SYNC_REQD_SH 16
#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
/*
* forward declarations
*/
static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
static bfa_boolean_t bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc);
static bfa_boolean_t bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc);
static struct bfa_ioc_hwif_s hwif_ct;
static struct bfa_ioc_hwif_s hwif_ct2;
/*
* Return true if firmware of current driver matches the running firmware.
*/
static bfa_boolean_t
bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
{
enum bfi_ioc_state ioc_fwstate;
u32 usecnt;
struct bfi_ioc_image_hdr_s fwhdr;
/*
* If bios boot (flash based) -- do not increment usage count
*/
if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return BFA_TRUE;
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
/*
* If usage count is 0, always return TRUE.
*/
if (usecnt == 0) {
writel(1, ioc->ioc_regs.ioc_usage_reg);
readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_fail_sync);
bfa_trc(ioc, usecnt);
return BFA_TRUE;
}
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
bfa_trc(ioc, ioc_fwstate);
/*
* Use count cannot be non-zero and chip in uninitialized state.
*/
WARN_ON(ioc_fwstate == BFI_IOC_UNINIT);
/*
* Check if another driver with a different firmware is active
*/
bfa_ioc_fwver_get(ioc, &fwhdr);
if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
bfa_trc(ioc, usecnt);
return BFA_FALSE;
}
/*
* Same firmware version. Increment the reference count.
*/
usecnt++;
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
bfa_trc(ioc, usecnt);
return BFA_TRUE;
}
static void
bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
{
u32 usecnt;
/*
* If bios boot (flash based) -- do not decrement usage count
*/
if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return;
/*
* decrement usage count
*/
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
WARN_ON(usecnt <= 0);
usecnt--;
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_trc(ioc, usecnt);
readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
}
/*
* Notify other functions on HB failure.
*/
static void
bfa_ioc_ct_notify_fail(struct bfa_ioc_s *ioc)
{
if (bfa_ioc_is_cna(ioc)) {
writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
/* Wait for halt to take effect */
readl(ioc->ioc_regs.ll_halt);
readl(ioc->ioc_regs.alt_ll_halt);
} else {
writel(~0U, ioc->ioc_regs.err_set);
readl(ioc->ioc_regs.err_set);
}
}
/*
* Host to LPU mailbox message addresses
*/
static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } ct_fnreg[] = {
{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
/*
* Host <-> LPU mailbox command/status registers - port 0
*/
static struct { u32 hfn, lpu; } ct_p0reg[] = {
{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
{ HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
{ HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
};
/*
* Host <-> LPU mailbox command/status registers - port 1
*/
static struct { u32 hfn, lpu; } ct_p1reg[] = {
{ HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
{ HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
{ HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
};
static struct { uint32_t hfn_mbox, lpu_mbox, hfn_pgn, hfn, lpu, lpu_read; }
ct2_reg[] = {
{ CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
CT2_HOSTFN_LPU0_READ_STAT},
{ CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
CT2_HOSTFN_LPU1_READ_STAT},
};
static void
bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
{
void __iomem *rb;
int pcifn = bfa_ioc_pcifn(ioc);
rb = bfa_ioc_bar0(ioc);
ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
if (ioc->port_id == 0) {
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else {
ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
}
/*
* PSS control registers
*/
ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_LCLK_CTL_REG);
ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_SCLK_CTL_REG);
/*
* IOC semaphore registers and serialization
*/
ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
/*
* sram memory access
*/
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
/*
* err set reg : for notification of hb failure in fcmode
*/
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
static void
bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc)
{
void __iomem *rb;
int port = bfa_ioc_portid(ioc);
rb = bfa_ioc_bar0(ioc);
ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
if (port == 0) {
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else {
ioc->ioc_regs.heartbeat = (rb + CT2_BFA_IOC1_HBEAT_REG);
ioc->ioc_regs.ioc_fwstate = (rb + CT2_BFA_IOC1_STATE_REG);
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
}
/*
* PSS control registers
*/
ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + CT2_APP_PLL_LCLK_CTL_REG);
ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + CT2_APP_PLL_SCLK_CTL_REG);
/*
* IOC semaphore registers and serialization
*/
ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG);
ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG);
ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG);
ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT);
ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC);
/*
* sram memory access
*/
ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
/*
* err set reg : for notification of hb failure in fcmode
*/
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
/*
* Initialize IOC to port mapping.
*/
#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
static void
bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
/*
* For catapult, base port id on personality register and IOC type
*/
r32 = readl(rb + FNC_PERS_REG);
r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
bfa_trc(ioc, bfa_ioc_pcifn(ioc));
bfa_trc(ioc, ioc->port_id);
}
static void
bfa_ioc_ct2_map_port(struct bfa_ioc_s *ioc)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
bfa_trc(ioc, bfa_ioc_pcifn(ioc));
bfa_trc(ioc, ioc->port_id);
}
/*
* Set interrupt mode for a function: INTX or MSIX
*/
static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32, mode;
r32 = readl(rb + FNC_PERS_REG);
bfa_trc(ioc, r32);
mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
__F0_INTX_STATUS;
/*
* If already in desired mode, do not change anything
*/
if ((!msix && mode) || (msix && !mode))
return;
if (msix)
mode = __F0_INTX_STATUS_MSIX;
else
mode = __F0_INTX_STATUS_INTA;
r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
bfa_trc(ioc, r32);
writel(r32, rb + FNC_PERS_REG);
}
bfa_boolean_t
bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc_s *ioc)
{
u32 r32;
r32 = readl(ioc->ioc_regs.lpu_read_stat);
if (r32) {
writel(1, ioc->ioc_regs.lpu_read_stat);
return BFA_TRUE;
}
return BFA_FALSE;
}
/*
* Cleanup hw semaphore and usecnt registers
*/
static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
{
if (bfa_ioc_is_cna(ioc)) {
bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_usage_reg);
readl(ioc->ioc_regs.ioc_usage_sem_reg);
writel(1, ioc->ioc_regs.ioc_usage_sem_reg);
}
/*
* Read the hw sem reg to make sure that it is locked
* before we clear it. If it is not locked, writing 1
* will lock it instead of clearing it.
*/
readl(ioc->ioc_regs.ioc_sem_reg);
writel(1, ioc->ioc_regs.ioc_sem_reg);
}
static bfa_boolean_t
bfa_ioc_ct_sync_start(struct bfa_ioc_s *ioc)
{
uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
/*
* Driver load time. If the sync required bit for this PCI fn
* is set, it is due to an unclean exit by the driver for this
* PCI fn in the previous incarnation. Whoever comes here first
* should clean it up, no matter which PCI fn.
*/
if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
writel(0, ioc->ioc_regs.ioc_fail_sync);
writel(1, ioc->ioc_regs.ioc_usage_reg);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
return BFA_TRUE;
}
return bfa_ioc_ct_sync_complete(ioc);
}
/*
* Synchronized IOC failure processing routines
*/
static void
bfa_ioc_ct_sync_join(struct bfa_ioc_s *ioc)
{
uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
uint32_t sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
}
static void
bfa_ioc_ct_sync_leave(struct bfa_ioc_s *ioc)
{
uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
uint32_t sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
bfa_ioc_ct_sync_pos(ioc);
writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
}
static void
bfa_ioc_ct_sync_ack(struct bfa_ioc_s *ioc)
{
uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
writel((r32 | bfa_ioc_ct_sync_pos(ioc)),
ioc->ioc_regs.ioc_fail_sync);
}
static bfa_boolean_t
bfa_ioc_ct_sync_complete(struct bfa_ioc_s *ioc)
{
uint32_t r32 = readl(ioc->ioc_regs.ioc_fail_sync);
uint32_t sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
uint32_t sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
uint32_t tmp_ackd;
if (sync_ackd == 0)
return BFA_TRUE;
/*
* The check below is to see whether any other PCI fn
* has reinitialized the ASIC (reset sync_ackd bits)
* and failed again while this IOC was waiting for hw
* semaphore (in bfa_iocpf_sm_semwait()).
*/
tmp_ackd = sync_ackd;
if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
if (sync_reqd == sync_ackd) {
writel(bfa_ioc_ct_clear_sync_ackd(r32),
ioc->ioc_regs.ioc_fail_sync);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
return BFA_TRUE;
}
/*
* If another PCI fn reinitialized and failed again while
* this IOC was waiting for hw sem, the sync_ackd bit for
* this IOC need to be set again to allow reinitialization.
*/
if (tmp_ackd != sync_ackd)
writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
return BFA_FALSE;
}
/**
* Called from bfa_ioc_attach() to map asic specific calls.
*/
static void
bfa_ioc_set_ctx_hwif(struct bfa_ioc_s *ioc, struct bfa_ioc_hwif_s *hwif)
{
hwif->ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
hwif->ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
hwif->ioc_notify_fail = bfa_ioc_ct_notify_fail;
hwif->ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
hwif->ioc_sync_start = bfa_ioc_ct_sync_start;
hwif->ioc_sync_join = bfa_ioc_ct_sync_join;
hwif->ioc_sync_leave = bfa_ioc_ct_sync_leave;
hwif->ioc_sync_ack = bfa_ioc_ct_sync_ack;
hwif->ioc_sync_complete = bfa_ioc_ct_sync_complete;
}
/**
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
{
bfa_ioc_set_ctx_hwif(ioc, &hwif_ct);
hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
ioc->ioc_hwif = &hwif_ct;
}
/**
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
bfa_ioc_set_ct2_hwif(struct bfa_ioc_s *ioc)
{
bfa_ioc_set_ctx_hwif(ioc, &hwif_ct2);
hwif_ct2.ioc_pll_init = bfa_ioc_ct2_pll_init;
hwif_ct2.ioc_reg_init = bfa_ioc_ct2_reg_init;
hwif_ct2.ioc_map_port = bfa_ioc_ct2_map_port;
hwif_ct2.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat;
hwif_ct2.ioc_isr_mode_set = NULL;
ioc->ioc_hwif = &hwif_ct2;
}
/*
* Workaround for MSI-X resource allocation for catapult-2 with no asic block
*/
#define HOSTFN_MSIX_DEFAULT 64
#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
#define __MSIX_VT_NUMVT__MK 0x003ff800
#define __MSIX_VT_NUMVT__SH 11
#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
#define __MSIX_VT_OFST_ 0x000007ff
void
bfa_ioc_ct2_poweron(struct bfa_ioc_s *ioc)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
if (r32 & __MSIX_VT_NUMVT__MK) {
writel(r32 & __MSIX_VT_OFST_,
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
return;
}
writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
rb + HOSTFN_MSIX_VT_OFST_NUMVT);
writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
}
bfa_status_t
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
{
u32 pll_sclk, pll_fclk, r32;
bfa_boolean_t fcmode = (mode == BFI_ASIC_MODE_FC);
pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
__APP_PLL_SCLK_JITLMT0_1(3U) |
__APP_PLL_SCLK_CNTLMT0_1(1U);
pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
__APP_PLL_LCLK_JITLMT0_1(3U) |
__APP_PLL_LCLK_CNTLMT0_1(1U);
if (fcmode) {
writel(0, (rb + OP_MODE));
writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
__APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
} else {
writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
}
writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_LCLK_CTL_REG);
writel(pll_sclk | __APP_PLL_SCLK_LOGIC_SOFT_RESET |
__APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk | __APP_PLL_LCLK_LOGIC_SOFT_RESET |
__APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
readl(rb + HOSTFN0_INT_MSK);
udelay(2000);
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(pll_sclk | __APP_PLL_SCLK_ENABLE, rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk | __APP_PLL_LCLK_ENABLE, rb + APP_PLL_LCLK_CTL_REG);
if (!fcmode) {
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
}
r32 = readl((rb + PSS_CTL_REG));
r32 &= ~__PSS_LMEM_RESET;
writel(r32, (rb + PSS_CTL_REG));
udelay(1000);
if (!fcmode) {
writel(0, (rb + PMM_1T_RESET_REG_P0));
writel(0, (rb + PMM_1T_RESET_REG_P1));
}
writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
udelay(1000);
r32 = readl((rb + MBIST_STAT_REG));
writel(0, (rb + MBIST_CTL_REG));
return BFA_STATUS_OK;
}
static void
bfa_ioc_ct2_sclk_init(void __iomem *rb)
{
u32 r32;
/*
* put s_clk PLL and PLL FSM in reset
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
__APP_PLL_SCLK_LOGIC_SOFT_RESET);
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* Ignore mode and program for the max clock (which is FC16)
* Firmware/NFC will do the PLL init appropiately
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* while doing PLL init dont clock gate ethernet subsystem
*/
r32 = readl((rb + CT2_CHIP_MISC_PRG));
writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG));
r32 = readl((rb + CT2_PCIE_MISC_REG));
writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG));
/*
* set sclk value
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
__APP_PLL_SCLK_CLK_DIV2);
writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* poll for s_clk lock or delay 1ms
*/
udelay(1000);
}
static void
bfa_ioc_ct2_lclk_init(void __iomem *rb)
{
u32 r32;
/*
* put l_clk PLL and PLL FSM in reset
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
__APP_PLL_LCLK_LOGIC_SOFT_RESET);
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* set LPU speed (set for FC16 which will work for other modes)
*/
r32 = readl((rb + CT2_CHIP_MISC_PRG));
writel(r32, (rb + CT2_CHIP_MISC_PRG));
/*
* set LPU half speed (set for FC16 which will work for other modes)
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* set lclk for mode (set for FC16)
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
r32 |= 0x20c1731b;
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* poll for s_clk lock or delay 1ms
*/
udelay(1000);
}
static void
bfa_ioc_ct2_mem_init(void __iomem *rb)
{
u32 r32;
r32 = readl((rb + PSS_CTL_REG));
r32 &= ~__PSS_LMEM_RESET;
writel(r32, (rb + PSS_CTL_REG));
udelay(1000);
writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
udelay(1000);
writel(0, (rb + CT2_MBIST_CTL_REG));
}
void
bfa_ioc_ct2_mac_reset(void __iomem *rb)
{
u32 r32;
bfa_ioc_ct2_sclk_init(rb);
bfa_ioc_ct2_lclk_init(rb);
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
(rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
(rb + CT2_APP_PLL_LCLK_CTL_REG));
/* put port0, port1 MAC & AHB in reset */
writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
rb + CT2_CSI_MAC_CONTROL_REG(0));
writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
rb + CT2_CSI_MAC_CONTROL_REG(1));
}
#define CT2_NFC_MAX_DELAY 1000
#define CT2_NFC_VER_VALID 0x143
#define BFA_IOC_PLL_POLL 1000000
static bfa_boolean_t
bfa_ioc_ct2_nfc_halted(void __iomem *rb)
{
u32 r32;
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
if (r32 & __NFC_CONTROLLER_HALTED)
return BFA_TRUE;
return BFA_FALSE;
}
static void
bfa_ioc_ct2_nfc_resume(void __iomem *rb)
{
u32 r32;
int i;
writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
if (!(r32 & __NFC_CONTROLLER_HALTED))
return;
udelay(1000);
}
WARN_ON(1);
}
bfa_status_t
bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode)
{
u32 wgn, r32, nfc_ver, i;
wgn = readl(rb + CT2_WGN_STATUS);
nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
(nfc_ver >= CT2_NFC_VER_VALID)) {
if (bfa_ioc_ct2_nfc_halted(rb))
bfa_ioc_ct2_nfc_resume(rb);
writel(__RESET_AND_START_SCLK_LCLK_PLLS,
rb + CT2_CSI_FW_CTL_SET_REG);
for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
break;
}
WARN_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
break;
}
WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
udelay(1000);
r32 = readl(rb + CT2_CSI_FW_CTL_REG);
WARN_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
} else {
writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_SET_REG);
for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
if (r32 & __NFC_CONTROLLER_HALTED)
break;
udelay(1000);
}
bfa_ioc_ct2_mac_reset(rb);
bfa_ioc_ct2_sclk_init(rb);
bfa_ioc_ct2_lclk_init(rb);
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG);
writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
(rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
(rb + CT2_APP_PLL_LCLK_CTL_REG));
}
/*
* Announce flash device presence, if flash was corrupted.
*/
if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
r32 = readl(rb + PSS_GPIO_OUT_REG);
writel(r32 & ~1, (rb + PSS_GPIO_OUT_REG));
r32 = readl(rb + PSS_GPIO_OE_REG);
writel(r32 | 1, (rb + PSS_GPIO_OE_REG));
}
/*
* Mask the interrupts and clear any
* pending interrupts.
*/
writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
/* For first time initialization, no need to clear interrupts */
r32 = readl(rb + HOST_SEM5_REG);
if (r32 & 0x1) {
r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT);
if (r32 == 1) {
writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT);
readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
}
r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
if (r32 == 1) {
writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT);
readl(rb + CT2_LPU1_HOSTFN_CMD_STAT);
}
}
bfa_ioc_ct2_mem_init(rb);
writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG);
writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG);
return BFA_STATUS_OK;
}
| gpl-2.0 |
Renzo-Olivares/android_43_kernel_htc_monarudo | fs/configfs/inode.c | 4943 | 7745 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* inode.c - basic inode and dentry operations.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
* Please see Documentation/filesystems/configfs/configfs.txt for more
* information.
*/
#undef DEBUG
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/backing-dev.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/lockdep.h>
#include <linux/slab.h>
#include <linux/configfs.h>
#include "configfs_internal.h"
#ifdef CONFIG_LOCKDEP
static struct lock_class_key default_group_class[MAX_LOCK_DEPTH];
#endif
static const struct address_space_operations configfs_aops = {
.readpage = simple_readpage,
.write_begin = simple_write_begin,
.write_end = simple_write_end,
};
static struct backing_dev_info configfs_backing_dev_info = {
.name = "configfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static const struct inode_operations configfs_inode_operations ={
.setattr = configfs_setattr,
};
int configfs_setattr(struct dentry * dentry, struct iattr * iattr)
{
struct inode * inode = dentry->d_inode;
struct configfs_dirent * sd = dentry->d_fsdata;
struct iattr * sd_iattr;
unsigned int ia_valid = iattr->ia_valid;
int error;
if (!sd)
return -EINVAL;
sd_iattr = sd->s_iattr;
if (!sd_iattr) {
/* setting attributes for the first time, allocate now */
sd_iattr = kzalloc(sizeof(struct iattr), GFP_KERNEL);
if (!sd_iattr)
return -ENOMEM;
/* assign default attributes */
sd_iattr->ia_mode = sd->s_mode;
sd_iattr->ia_uid = 0;
sd_iattr->ia_gid = 0;
sd_iattr->ia_atime = sd_iattr->ia_mtime = sd_iattr->ia_ctime = CURRENT_TIME;
sd->s_iattr = sd_iattr;
}
/* attributes were changed atleast once in past */
error = simple_setattr(dentry, iattr);
if (error)
return error;
if (ia_valid & ATTR_UID)
sd_iattr->ia_uid = iattr->ia_uid;
if (ia_valid & ATTR_GID)
sd_iattr->ia_gid = iattr->ia_gid;
if (ia_valid & ATTR_ATIME)
sd_iattr->ia_atime = timespec_trunc(iattr->ia_atime,
inode->i_sb->s_time_gran);
if (ia_valid & ATTR_MTIME)
sd_iattr->ia_mtime = timespec_trunc(iattr->ia_mtime,
inode->i_sb->s_time_gran);
if (ia_valid & ATTR_CTIME)
sd_iattr->ia_ctime = timespec_trunc(iattr->ia_ctime,
inode->i_sb->s_time_gran);
if (ia_valid & ATTR_MODE) {
umode_t mode = iattr->ia_mode;
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
sd_iattr->ia_mode = sd->s_mode = mode;
}
return error;
}
static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
{
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
}
static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
{
inode->i_mode = iattr->ia_mode;
inode->i_uid = iattr->ia_uid;
inode->i_gid = iattr->ia_gid;
inode->i_atime = iattr->ia_atime;
inode->i_mtime = iattr->ia_mtime;
inode->i_ctime = iattr->ia_ctime;
}
struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd,
struct super_block *s)
{
struct inode * inode = new_inode(s);
if (inode) {
inode->i_ino = get_next_ino();
inode->i_mapping->a_ops = &configfs_aops;
inode->i_mapping->backing_dev_info = &configfs_backing_dev_info;
inode->i_op = &configfs_inode_operations;
if (sd->s_iattr) {
/* sysfs_dirent has non-default attributes
* get them for the new inode from persistent copy
* in sysfs_dirent
*/
set_inode_attr(inode, sd->s_iattr);
} else
set_default_inode_attr(inode, mode);
}
return inode;
}
#ifdef CONFIG_LOCKDEP
static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
struct inode *inode)
{
int depth = sd->s_depth;
if (depth > 0) {
if (depth <= ARRAY_SIZE(default_group_class)) {
lockdep_set_class(&inode->i_mutex,
&default_group_class[depth - 1]);
} else {
/*
* In practice the maximum level of locking depth is
* already reached. Just inform about possible reasons.
*/
printk(KERN_INFO "configfs: Too many levels of inodes"
" for the locking correctness validator.\n");
printk(KERN_INFO "Spurious warnings may appear.\n");
}
}
}
#else /* CONFIG_LOCKDEP */
static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
struct inode *inode)
{
}
#endif /* CONFIG_LOCKDEP */
int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *))
{
int error = 0;
struct inode *inode = NULL;
struct configfs_dirent *sd;
struct inode *p_inode;
if (!dentry)
return -ENOENT;
if (dentry->d_inode)
return -EEXIST;
sd = dentry->d_fsdata;
inode = configfs_new_inode(mode, sd, dentry->d_sb);
if (!inode)
return -ENOMEM;
p_inode = dentry->d_parent->d_inode;
p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
configfs_set_inode_lock_class(sd, inode);
if (init) {
error = init(inode);
if (error) {
iput(inode);
return error;
}
}
d_instantiate(dentry, inode);
if (S_ISDIR(mode) || S_ISLNK(mode))
dget(dentry); /* pin link and directory dentries in core */
return error;
}
/*
* Get the name for corresponding element represented by the given configfs_dirent
*/
const unsigned char * configfs_get_name(struct configfs_dirent *sd)
{
struct configfs_attribute *attr;
BUG_ON(!sd || !sd->s_element);
/* These always have a dentry, so use that */
if (sd->s_type & (CONFIGFS_DIR | CONFIGFS_ITEM_LINK))
return sd->s_dentry->d_name.name;
if (sd->s_type & CONFIGFS_ITEM_ATTR) {
attr = sd->s_element;
return attr->ca_name;
}
return NULL;
}
/*
* Unhashes the dentry corresponding to given configfs_dirent
* Called with parent inode's i_mutex held.
*/
void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
{
struct dentry * dentry = sd->s_dentry;
if (dentry) {
spin_lock(&dentry->d_lock);
if (!(d_unhashed(dentry) && dentry->d_inode)) {
dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
simple_unlink(parent->d_inode, dentry);
} else
spin_unlock(&dentry->d_lock);
}
}
void configfs_hash_and_remove(struct dentry * dir, const char * name)
{
struct configfs_dirent * sd;
struct configfs_dirent * parent_sd = dir->d_fsdata;
if (dir->d_inode == NULL)
/* no inode means this hasn't been made visible yet */
return;
mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry(sd, &parent_sd->s_children, s_sibling) {
if (!sd->s_element)
continue;
if (!strcmp(configfs_get_name(sd), name)) {
spin_lock(&configfs_dirent_lock);
list_del_init(&sd->s_sibling);
spin_unlock(&configfs_dirent_lock);
configfs_drop_dentry(sd, dir);
configfs_put(sd);
break;
}
}
mutex_unlock(&dir->d_inode->i_mutex);
}
int __init configfs_inode_init(void)
{
return bdi_init(&configfs_backing_dev_info);
}
void configfs_inode_exit(void)
{
bdi_destroy(&configfs_backing_dev_info);
}
| gpl-2.0 |
ShinySide/G900T_Permissive_FOJ4 | drivers/media/dvb/dvb-usb/anysee.c | 4943 | 34108 | /*
* DVB USB Linux driver for Anysee E30 DVB-C & DVB-T USB2.0 receiver
*
* Copyright (C) 2007 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* TODO:
* - add smart card reader support for Conditional Access (CA)
*
* Card reader in Anysee is nothing more than ISO 7816 card reader.
* There is no hardware CAM in any Anysee device sold.
* In my understanding it should be implemented by making own module
* for ISO 7816 card reader, like dvb_ca_en50221 is implemented. This
* module registers serial interface that can be used to communicate
* with any ISO 7816 smart card.
*
* Any help according to implement serial smart card reader support
* is highly welcome!
*/
#include "anysee.h"
#include "tda1002x.h"
#include "mt352.h"
#include "mt352_priv.h"
#include "zl10353.h"
#include "tda18212.h"
#include "cx24116.h"
#include "stv0900.h"
#include "stv6110.h"
#include "isl6423.h"
#include "cxd2820r.h"
/* debug */
static int dvb_usb_anysee_debug;
module_param_named(debug, dvb_usb_anysee_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level" DVB_USB_DEBUG_STATUS);
static int dvb_usb_anysee_delsys;
module_param_named(delsys, dvb_usb_anysee_delsys, int, 0644);
MODULE_PARM_DESC(delsys, "select delivery mode (0=DVB-C, 1=DVB-T)");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static DEFINE_MUTEX(anysee_usb_mutex);
static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen,
u8 *rbuf, u8 rlen)
{
struct anysee_state *state = d->priv;
int act_len, ret, i;
u8 buf[64];
memcpy(&buf[0], sbuf, slen);
buf[60] = state->seq++;
if (mutex_lock_interruptible(&anysee_usb_mutex) < 0)
return -EAGAIN;
deb_xfer(">>> ");
debug_dump(buf, slen, deb_xfer);
/* We need receive one message more after dvb_usb_generic_rw due
to weird transaction flow, which is 1 x send + 2 x receive. */
ret = dvb_usb_generic_rw(d, buf, sizeof(buf), buf, sizeof(buf), 0);
if (ret)
goto error_unlock;
/* TODO FIXME: dvb_usb_generic_rw() fails rarely with error code -32
* (EPIPE, Broken pipe). Function supports currently msleep() as a
* parameter but I would not like to use it, since according to
* Documentation/timers/timers-howto.txt it should not be used such
* short, under < 20ms, sleeps. Repeating failed message would be
* better choice as not to add unwanted delays...
* Fixing that correctly is one of those or both;
* 1) use repeat if possible
* 2) add suitable delay
*/
/* get answer, retry few times if error returned */
for (i = 0; i < 3; i++) {
/* receive 2nd answer */
ret = usb_bulk_msg(d->udev, usb_rcvbulkpipe(d->udev,
d->props.generic_bulk_ctrl_endpoint), buf, sizeof(buf),
&act_len, 2000);
if (ret) {
deb_info("%s: recv bulk message failed: %d",
__func__, ret);
} else {
deb_xfer("<<< ");
debug_dump(buf, rlen, deb_xfer);
if (buf[63] != 0x4f)
deb_info("%s: cmd failed\n", __func__);
break;
}
}
if (ret) {
/* all retries failed, it is fatal */
err("%s: recv bulk message failed: %d", __func__, ret);
goto error_unlock;
}
/* read request, copy returned data to return buf */
if (rbuf && rlen)
memcpy(rbuf, buf, rlen);
error_unlock:
mutex_unlock(&anysee_usb_mutex);
return ret;
}
static int anysee_read_reg(struct dvb_usb_device *d, u16 reg, u8 *val)
{
u8 buf[] = {CMD_REG_READ, reg >> 8, reg & 0xff, 0x01};
int ret;
ret = anysee_ctrl_msg(d, buf, sizeof(buf), val, 1);
deb_info("%s: reg:%04x val:%02x\n", __func__, reg, *val);
return ret;
}
static int anysee_write_reg(struct dvb_usb_device *d, u16 reg, u8 val)
{
u8 buf[] = {CMD_REG_WRITE, reg >> 8, reg & 0xff, 0x01, val};
deb_info("%s: reg:%04x val:%02x\n", __func__, reg, val);
return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
}
/* write single register with mask */
static int anysee_wr_reg_mask(struct dvb_usb_device *d, u16 reg, u8 val,
u8 mask)
{
int ret;
u8 tmp;
/* no need for read if whole reg is written */
if (mask != 0xff) {
ret = anysee_read_reg(d, reg, &tmp);
if (ret)
return ret;
val &= mask;
tmp &= ~mask;
val |= tmp;
}
return anysee_write_reg(d, reg, val);
}
/* read single register with mask */
static int anysee_rd_reg_mask(struct dvb_usb_device *d, u16 reg, u8 *val,
u8 mask)
{
int ret, i;
u8 tmp;
ret = anysee_read_reg(d, reg, &tmp);
if (ret)
return ret;
tmp &= mask;
/* find position of the first bit */
for (i = 0; i < 8; i++) {
if ((mask >> i) & 0x01)
break;
}
*val = tmp >> i;
return 0;
}
static int anysee_get_hw_info(struct dvb_usb_device *d, u8 *id)
{
u8 buf[] = {CMD_GET_HW_INFO};
return anysee_ctrl_msg(d, buf, sizeof(buf), id, 3);
}
static int anysee_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
u8 buf[] = {CMD_STREAMING_CTRL, (u8)onoff, 0x00};
deb_info("%s: onoff:%02x\n", __func__, onoff);
return anysee_ctrl_msg(adap->dev, buf, sizeof(buf), NULL, 0);
}
static int anysee_led_ctrl(struct dvb_usb_device *d, u8 mode, u8 interval)
{
u8 buf[] = {CMD_LED_AND_IR_CTRL, 0x01, mode, interval};
deb_info("%s: state:%02x interval:%02x\n", __func__, mode, interval);
return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
}
static int anysee_ir_ctrl(struct dvb_usb_device *d, u8 onoff)
{
u8 buf[] = {CMD_LED_AND_IR_CTRL, 0x02, onoff};
deb_info("%s: onoff:%02x\n", __func__, onoff);
return anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
}
/* I2C */
static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int ret = 0, inc, i = 0;
u8 buf[52]; /* 4 + 48 (I2C WR USB command header + I2C WR max) */
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
while (i < num) {
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
if (msg[i].len > 2 || msg[i+1].len > 60) {
ret = -EOPNOTSUPP;
break;
}
buf[0] = CMD_I2C_READ;
buf[1] = (msg[i].addr << 1) | 0x01;
buf[2] = msg[i].buf[0];
buf[3] = msg[i].buf[1];
buf[4] = msg[i].len-1;
buf[5] = msg[i+1].len;
ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf,
msg[i+1].len);
inc = 2;
} else {
if (msg[i].len > 48) {
ret = -EOPNOTSUPP;
break;
}
buf[0] = CMD_I2C_WRITE;
buf[1] = (msg[i].addr << 1);
buf[2] = msg[i].len;
buf[3] = 0x01;
memcpy(&buf[4], msg[i].buf, msg[i].len);
ret = anysee_ctrl_msg(d, buf, 4 + msg[i].len, NULL, 0);
inc = 1;
}
if (ret)
break;
i += inc;
}
mutex_unlock(&d->i2c_mutex);
return ret ? ret : i;
}
static u32 anysee_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
static struct i2c_algorithm anysee_i2c_algo = {
.master_xfer = anysee_master_xfer,
.functionality = anysee_i2c_func,
};
static int anysee_mt352_demod_init(struct dvb_frontend *fe)
{
static u8 clock_config[] = { CLOCK_CTL, 0x38, 0x28 };
static u8 reset[] = { RESET, 0x80 };
static u8 adc_ctl_1_cfg[] = { ADC_CTL_1, 0x40 };
static u8 agc_cfg[] = { AGC_TARGET, 0x28, 0x20 };
static u8 gpp_ctl_cfg[] = { GPP_CTL, 0x33 };
static u8 capt_range_cfg[] = { CAPT_RANGE, 0x32 };
mt352_write(fe, clock_config, sizeof(clock_config));
udelay(200);
mt352_write(fe, reset, sizeof(reset));
mt352_write(fe, adc_ctl_1_cfg, sizeof(adc_ctl_1_cfg));
mt352_write(fe, agc_cfg, sizeof(agc_cfg));
mt352_write(fe, gpp_ctl_cfg, sizeof(gpp_ctl_cfg));
mt352_write(fe, capt_range_cfg, sizeof(capt_range_cfg));
return 0;
}
/* Callbacks for DVB USB */
static struct tda10023_config anysee_tda10023_config = {
.demod_address = (0x1a >> 1),
.invert = 0,
.xtal = 16000000,
.pll_m = 11,
.pll_p = 3,
.pll_n = 1,
.output_mode = TDA10023_OUTPUT_MODE_PARALLEL_C,
.deltaf = 0xfeeb,
};
static struct mt352_config anysee_mt352_config = {
.demod_address = (0x1e >> 1),
.demod_init = anysee_mt352_demod_init,
};
static struct zl10353_config anysee_zl10353_config = {
.demod_address = (0x1e >> 1),
.parallel_ts = 1,
};
static struct zl10353_config anysee_zl10353_tda18212_config2 = {
.demod_address = (0x1e >> 1),
.parallel_ts = 1,
.disable_i2c_gate_ctrl = 1,
.no_tuner = 1,
.if2 = 41500,
};
static struct zl10353_config anysee_zl10353_tda18212_config = {
.demod_address = (0x18 >> 1),
.parallel_ts = 1,
.disable_i2c_gate_ctrl = 1,
.no_tuner = 1,
.if2 = 41500,
};
static struct tda10023_config anysee_tda10023_tda18212_config = {
.demod_address = (0x1a >> 1),
.xtal = 16000000,
.pll_m = 12,
.pll_p = 3,
.pll_n = 1,
.output_mode = TDA10023_OUTPUT_MODE_PARALLEL_B,
.deltaf = 0xba02,
};
static struct tda18212_config anysee_tda18212_config = {
.i2c_address = (0xc0 >> 1),
.if_dvbt_6 = 4150,
.if_dvbt_7 = 4150,
.if_dvbt_8 = 4150,
.if_dvbc = 5000,
};
static struct tda18212_config anysee_tda18212_config2 = {
.i2c_address = 0x60 /* (0xc0 >> 1) */,
.if_dvbt_6 = 3550,
.if_dvbt_7 = 3700,
.if_dvbt_8 = 4150,
.if_dvbt2_6 = 3250,
.if_dvbt2_7 = 4000,
.if_dvbt2_8 = 4000,
.if_dvbc = 5000,
};
static struct cx24116_config anysee_cx24116_config = {
.demod_address = (0xaa >> 1),
.mpg_clk_pos_pol = 0x00,
.i2c_wr_max = 48,
};
static struct stv0900_config anysee_stv0900_config = {
.demod_address = (0xd0 >> 1),
.demod_mode = 0,
.xtal = 8000000,
.clkmode = 3,
.diseqc_mode = 2,
.tun1_maddress = 0,
.tun1_adc = 1, /* 1 Vpp */
.path1_mode = 3,
};
static struct stv6110_config anysee_stv6110_config = {
.i2c_address = (0xc0 >> 1),
.mclk = 16000000,
.clk_div = 1,
};
static struct isl6423_config anysee_isl6423_config = {
.current_max = SEC_CURRENT_800m,
.curlim = SEC_CURRENT_LIM_OFF,
.mod_extern = 1,
.addr = (0x10 >> 1),
};
static struct cxd2820r_config anysee_cxd2820r_config = {
.i2c_address = 0x6d, /* (0xda >> 1) */
.ts_mode = 0x38,
};
/*
* New USB device strings: Mfr=1, Product=2, SerialNumber=0
* Manufacturer: AMT.CO.KR
*
* E30 VID=04b4 PID=861f HW=2 FW=2.1 Product=????????
* PCB: ?
* parts: DNOS404ZH102A(MT352, DTT7579(?))
*
* E30 VID=04b4 PID=861f HW=2 FW=2.1 "anysee-T(LP)"
* PCB: PCB 507T (rev1.61)
* parts: DNOS404ZH103A(ZL10353, DTT7579(?))
* OEA=0a OEB=00 OEC=00 OED=ff OEE=00
* IOA=45 IOB=ff IOC=00 IOD=ff IOE=00
*
* E30 Plus VID=04b4 PID=861f HW=6 FW=1.0 "anysee"
* PCB: 507CD (rev1.1)
* parts: DNOS404ZH103A(ZL10353, DTT7579(?)), CST56I01
* OEA=80 OEB=00 OEC=00 OED=ff OEE=fe
* IOA=4f IOB=ff IOC=00 IOD=06 IOE=01
* IOD[0] ZL10353 1=enabled
* IOA[7] TS 0=enabled
* tuner is not behind ZL10353 I2C-gate (no care if gate disabled or not)
*
* E30 C Plus VID=04b4 PID=861f HW=10 FW=1.0 "anysee-DC(LP)"
* PCB: 507DC (rev0.2)
* parts: TDA10023, DTOS403IH102B TM, CST56I01
* OEA=80 OEB=00 OEC=00 OED=ff OEE=fe
* IOA=4f IOB=ff IOC=00 IOD=26 IOE=01
* IOD[0] TDA10023 1=enabled
*
* E30 S2 Plus VID=04b4 PID=861f HW=11 FW=0.1 "anysee-S2(LP)"
* PCB: 507SI (rev2.1)
* parts: BS2N10WCC01(CX24116, CX24118), ISL6423, TDA8024
* OEA=80 OEB=00 OEC=ff OED=ff OEE=fe
* IOA=4d IOB=ff IOC=00 IOD=26 IOE=01
* IOD[0] CX24116 1=enabled
*
* E30 C Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)"
* PCB: 507FA (rev0.4)
* parts: TDA10023, DTOS403IH102B TM, TDA8024
* OEA=80 OEB=00 OEC=ff OED=ff OEE=ff
* IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0
* IOD[5] TDA10023 1=enabled
* IOE[0] tuner 1=enabled
*
* E30 Combo Plus VID=1c73 PID=861f HW=15 FW=1.2 "anysee-FA(LP)"
* PCB: 507FA (rev1.1)
* parts: ZL10353, TDA10023, DTOS403IH102B TM, TDA8024
* OEA=80 OEB=00 OEC=ff OED=ff OEE=ff
* IOA=4d IOB=ff IOC=00 IOD=00 IOE=c0
* DVB-C:
* IOD[5] TDA10023 1=enabled
* IOE[0] tuner 1=enabled
* DVB-T:
* IOD[0] ZL10353 1=enabled
* IOE[0] tuner 0=enabled
* tuner is behind ZL10353 I2C-gate
*
* E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
* PCB: 508TC (rev0.6)
* parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212)
* OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
* IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
* IOA[7] TS 1=enabled
* IOE[4] TDA18212 1=enabled
* DVB-C:
* IOD[6] ZL10353 0=disabled
* IOD[5] TDA10023 1=enabled
* IOE[0] IF 1=enabled
* DVB-T:
* IOD[5] TDA10023 0=disabled
* IOD[6] ZL10353 1=enabled
* IOE[0] IF 0=enabled
*
* E7 S2 VID=1c73 PID=861f HW=19 FW=0.4 AMTCI=0.5 "anysee-E7S2(LP)"
* PCB: 508S2 (rev0.7)
* parts: DNBU10512IST(STV0903, STV6110), ISL6423
* OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
* IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4
* IOA[7] TS 1=enabled
* IOE[5] STV0903 1=enabled
*
* E7 T2C VID=1c73 PID=861f HW=20 FW=0.1 AMTCI=0.5 "anysee-E7T2C(LP)"
* PCB: 508T2C (rev0.3)
* parts: DNOQ44QCH106A(CXD2820R, TDA18212), TDA8024
* OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
* IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
* IOA[7] TS 1=enabled
* IOE[5] CXD2820R 1=enabled
*
* E7 PTC VID=1c73 PID=861f HW=21 FW=0.1 AMTCI=?? "anysee-E7PTC(LP)"
* PCB: 508PTC (rev0.5)
* parts: ZL10353, TDA10023, DNOD44CDH086A(TDA18212)
* OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
* IOA=4d IOB=00 IOC=cc IOD=48 IOE=e4
* IOA[7] TS 1=enabled
* IOE[4] TDA18212 1=enabled
* DVB-C:
* IOD[6] ZL10353 0=disabled
* IOD[5] TDA10023 1=enabled
* IOE[0] IF 1=enabled
* DVB-T:
* IOD[5] TDA10023 0=disabled
* IOD[6] ZL10353 1=enabled
* IOE[0] IF 0=enabled
*
* E7 PS2 VID=1c73 PID=861f HW=22 FW=0.1 AMTCI=?? "anysee-E7PS2(LP)"
* PCB: 508PS2 (rev0.4)
* parts: DNBU10512IST(STV0903, STV6110), ISL6423
* OEA=80 OEB=00 OEC=03 OED=f7 OEE=ff
* IOA=4d IOB=00 IOC=c4 IOD=08 IOE=e4
* IOA[7] TS 1=enabled
* IOE[5] STV0903 1=enabled
*/
/* external I2C gate used for DNOD44CDH086A(TDA18212) tuner module */
static int anysee_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct dvb_usb_adapter *adap = fe->dvb->priv;
/* enable / disable tuner access on IOE[4] */
return anysee_wr_reg_mask(adap->dev, REG_IOE, (enable << 4), 0x10);
}
static int anysee_frontend_ctrl(struct dvb_frontend *fe, int onoff)
{
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct anysee_state *state = adap->dev->priv;
int ret;
deb_info("%s: fe=%d onoff=%d\n", __func__, fe->id, onoff);
/* no frontend sleep control */
if (onoff == 0)
return 0;
switch (state->hw) {
case ANYSEE_HW_507FA: /* 15 */
/* E30 Combo Plus */
/* E30 C Plus */
if ((fe->id ^ dvb_usb_anysee_delsys) == 0) {
/* disable DVB-T demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 0),
0x01);
if (ret)
goto error;
/* enable DVB-C demod on IOD[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
0x20);
if (ret)
goto error;
/* enable DVB-C tuner on IOE[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 0),
0x01);
if (ret)
goto error;
} else {
/* disable DVB-C demod on IOD[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
0x20);
if (ret)
goto error;
/* enable DVB-T demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0),
0x01);
if (ret)
goto error;
/* enable DVB-T tuner on IOE[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 0),
0x01);
if (ret)
goto error;
}
break;
case ANYSEE_HW_508TC: /* 18 */
case ANYSEE_HW_508PTC: /* 21 */
/* E7 TC */
/* E7 PTC */
if ((fe->id ^ dvb_usb_anysee_delsys) == 0) {
/* disable DVB-T demod on IOD[6] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 6),
0x40);
if (ret)
goto error;
/* enable DVB-C demod on IOD[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
0x20);
if (ret)
goto error;
/* enable IF route on IOE[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 0),
0x01);
if (ret)
goto error;
} else {
/* disable DVB-C demod on IOD[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
0x20);
if (ret)
goto error;
/* enable DVB-T demod on IOD[6] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 6),
0x40);
if (ret)
goto error;
/* enable IF route on IOE[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 0),
0x01);
if (ret)
goto error;
}
break;
default:
ret = 0;
}
error:
return ret;
}
static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
{
int ret;
struct anysee_state *state = adap->dev->priv;
u8 hw_info[3];
u8 tmp;
struct i2c_msg msg[2] = {
{
.addr = anysee_tda18212_config.i2c_address,
.flags = 0,
.len = 1,
.buf = "\x00",
}, {
.addr = anysee_tda18212_config.i2c_address,
.flags = I2C_M_RD,
.len = 1,
.buf = &tmp,
}
};
/* detect hardware only once */
if (adap->fe_adap[0].fe == NULL) {
/* Check which hardware we have.
* We must do this call two times to get reliable values
* (hw/fw bug).
*/
ret = anysee_get_hw_info(adap->dev, hw_info);
if (ret)
goto error;
ret = anysee_get_hw_info(adap->dev, hw_info);
if (ret)
goto error;
/* Meaning of these info bytes are guessed. */
info("firmware version:%d.%d hardware id:%d",
hw_info[1], hw_info[2], hw_info[0]);
state->hw = hw_info[0];
}
/* set current frondend ID for devices having two frondends */
if (adap->fe_adap[0].fe)
state->fe_id++;
switch (state->hw) {
case ANYSEE_HW_507T: /* 2 */
/* E30 */
if (state->fe_id)
break;
/* attach demod */
adap->fe_adap[0].fe = dvb_attach(mt352_attach,
&anysee_mt352_config, &adap->dev->i2c_adap);
if (adap->fe_adap[0].fe)
break;
/* attach demod */
adap->fe_adap[0].fe = dvb_attach(zl10353_attach,
&anysee_zl10353_config, &adap->dev->i2c_adap);
break;
case ANYSEE_HW_507CD: /* 6 */
/* E30 Plus */
if (state->fe_id)
break;
/* enable DVB-T demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01);
if (ret)
goto error;
/* enable transport stream on IOA[7] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOA, (0 << 7), 0x80);
if (ret)
goto error;
/* attach demod */
adap->fe_adap[0].fe = dvb_attach(zl10353_attach,
&anysee_zl10353_config, &adap->dev->i2c_adap);
break;
case ANYSEE_HW_507DC: /* 10 */
/* E30 C Plus */
if (state->fe_id)
break;
/* enable DVB-C demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01);
if (ret)
goto error;
/* attach demod */
adap->fe_adap[0].fe = dvb_attach(tda10023_attach,
&anysee_tda10023_config, &adap->dev->i2c_adap, 0x48);
break;
case ANYSEE_HW_507SI: /* 11 */
/* E30 S2 Plus */
if (state->fe_id)
break;
/* enable DVB-S/S2 demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0), 0x01);
if (ret)
goto error;
/* attach demod */
adap->fe_adap[0].fe = dvb_attach(cx24116_attach,
&anysee_cx24116_config, &adap->dev->i2c_adap);
break;
case ANYSEE_HW_507FA: /* 15 */
/* E30 Combo Plus */
/* E30 C Plus */
/* enable tuner on IOE[4] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 4), 0x10);
if (ret)
goto error;
/* probe TDA18212 */
tmp = 0;
ret = i2c_transfer(&adap->dev->i2c_adap, msg, 2);
if (ret == 2 && tmp == 0xc7)
deb_info("%s: TDA18212 found\n", __func__);
else
tmp = 0;
/* disable tuner on IOE[4] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (0 << 4), 0x10);
if (ret)
goto error;
if ((state->fe_id ^ dvb_usb_anysee_delsys) == 0) {
/* disable DVB-T demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 0),
0x01);
if (ret)
goto error;
/* enable DVB-C demod on IOD[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
0x20);
if (ret)
goto error;
/* attach demod */
if (tmp == 0xc7) {
/* TDA18212 config */
adap->fe_adap[state->fe_id].fe = dvb_attach(
tda10023_attach,
&anysee_tda10023_tda18212_config,
&adap->dev->i2c_adap, 0x48);
} else {
/* PLL config */
adap->fe_adap[state->fe_id].fe = dvb_attach(
tda10023_attach,
&anysee_tda10023_config,
&adap->dev->i2c_adap, 0x48);
}
} else {
/* disable DVB-C demod on IOD[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
0x20);
if (ret)
goto error;
/* enable DVB-T demod on IOD[0] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 0),
0x01);
if (ret)
goto error;
/* attach demod */
if (tmp == 0xc7) {
/* TDA18212 config */
adap->fe_adap[state->fe_id].fe = dvb_attach(
zl10353_attach,
&anysee_zl10353_tda18212_config2,
&adap->dev->i2c_adap);
} else {
/* PLL config */
adap->fe_adap[state->fe_id].fe = dvb_attach(
zl10353_attach,
&anysee_zl10353_config,
&adap->dev->i2c_adap);
}
}
/* I2C gate for DNOD44CDH086A(TDA18212) tuner module */
if (tmp == 0xc7) {
if (adap->fe_adap[state->fe_id].fe)
adap->fe_adap[state->fe_id].fe->ops.i2c_gate_ctrl =
anysee_i2c_gate_ctrl;
}
break;
case ANYSEE_HW_508TC: /* 18 */
case ANYSEE_HW_508PTC: /* 21 */
/* E7 TC */
/* E7 PTC */
if ((state->fe_id ^ dvb_usb_anysee_delsys) == 0) {
/* disable DVB-T demod on IOD[6] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 6),
0x40);
if (ret)
goto error;
/* enable DVB-C demod on IOD[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 5),
0x20);
if (ret)
goto error;
/* attach demod */
adap->fe_adap[state->fe_id].fe =
dvb_attach(tda10023_attach,
&anysee_tda10023_tda18212_config,
&adap->dev->i2c_adap, 0x48);
} else {
/* disable DVB-C demod on IOD[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (0 << 5),
0x20);
if (ret)
goto error;
/* enable DVB-T demod on IOD[6] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOD, (1 << 6),
0x40);
if (ret)
goto error;
/* attach demod */
adap->fe_adap[state->fe_id].fe =
dvb_attach(zl10353_attach,
&anysee_zl10353_tda18212_config,
&adap->dev->i2c_adap);
}
/* I2C gate for DNOD44CDH086A(TDA18212) tuner module */
if (adap->fe_adap[state->fe_id].fe)
adap->fe_adap[state->fe_id].fe->ops.i2c_gate_ctrl =
anysee_i2c_gate_ctrl;
state->has_ci = true;
break;
case ANYSEE_HW_508S2: /* 19 */
case ANYSEE_HW_508PS2: /* 22 */
/* E7 S2 */
/* E7 PS2 */
if (state->fe_id)
break;
/* enable DVB-S/S2 demod on IOE[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
if (ret)
goto error;
/* attach demod */
adap->fe_adap[0].fe = dvb_attach(stv0900_attach,
&anysee_stv0900_config, &adap->dev->i2c_adap, 0);
state->has_ci = true;
break;
case ANYSEE_HW_508T2C: /* 20 */
/* E7 T2C */
if (state->fe_id)
break;
/* enable DVB-T/T2/C demod on IOE[5] */
ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
if (ret)
goto error;
/* attach demod */
adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
&anysee_cxd2820r_config, &adap->dev->i2c_adap);
state->has_ci = true;
break;
}
if (!adap->fe_adap[0].fe) {
/* we have no frontend :-( */
ret = -ENODEV;
err("Unsupported Anysee version. " \
"Please report the <linux-media@vger.kernel.org>.");
}
error:
return ret;
}
static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
{
struct anysee_state *state = adap->dev->priv;
struct dvb_frontend *fe;
int ret;
deb_info("%s: fe=%d\n", __func__, state->fe_id);
switch (state->hw) {
case ANYSEE_HW_507T: /* 2 */
/* E30 */
/* attach tuner */
fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe,
(0xc2 >> 1), NULL, DVB_PLL_THOMSON_DTT7579);
break;
case ANYSEE_HW_507CD: /* 6 */
/* E30 Plus */
/* attach tuner */
fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe,
(0xc2 >> 1), &adap->dev->i2c_adap,
DVB_PLL_THOMSON_DTT7579);
break;
case ANYSEE_HW_507DC: /* 10 */
/* E30 C Plus */
/* attach tuner */
fe = dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe,
(0xc0 >> 1), &adap->dev->i2c_adap,
DVB_PLL_SAMSUNG_DTOS403IH102A);
break;
case ANYSEE_HW_507SI: /* 11 */
/* E30 S2 Plus */
/* attach LNB controller */
fe = dvb_attach(isl6423_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, &anysee_isl6423_config);
break;
case ANYSEE_HW_507FA: /* 15 */
/* E30 Combo Plus */
/* E30 C Plus */
/* Try first attach TDA18212 silicon tuner on IOE[4], if that
* fails attach old simple PLL. */
/* attach tuner */
fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe,
&adap->dev->i2c_adap, &anysee_tda18212_config);
if (fe)
break;
/* attach tuner */
fe = dvb_attach(dvb_pll_attach, adap->fe_adap[state->fe_id].fe,
(0xc0 >> 1), &adap->dev->i2c_adap,
DVB_PLL_SAMSUNG_DTOS403IH102A);
break;
case ANYSEE_HW_508TC: /* 18 */
case ANYSEE_HW_508PTC: /* 21 */
/* E7 TC */
/* E7 PTC */
/* attach tuner */
fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe,
&adap->dev->i2c_adap, &anysee_tda18212_config);
break;
case ANYSEE_HW_508S2: /* 19 */
case ANYSEE_HW_508PS2: /* 22 */
/* E7 S2 */
/* E7 PS2 */
/* attach tuner */
fe = dvb_attach(stv6110_attach, adap->fe_adap[0].fe,
&anysee_stv6110_config, &adap->dev->i2c_adap);
if (fe) {
/* attach LNB controller */
fe = dvb_attach(isl6423_attach, adap->fe_adap[0].fe,
&adap->dev->i2c_adap, &anysee_isl6423_config);
}
break;
case ANYSEE_HW_508T2C: /* 20 */
/* E7 T2C */
/* attach tuner */
fe = dvb_attach(tda18212_attach, adap->fe_adap[state->fe_id].fe,
&adap->dev->i2c_adap, &anysee_tda18212_config2);
break;
default:
fe = NULL;
}
if (fe)
ret = 0;
else
ret = -ENODEV;
return ret;
}
static int anysee_rc_query(struct dvb_usb_device *d)
{
u8 buf[] = {CMD_GET_IR_CODE};
u8 ircode[2];
int ret;
/* Remote controller is basic NEC using address byte 0x08.
Anysee device RC query returns only two bytes, status and code,
address byte is dropped. Also it does not return any value for
NEC RCs having address byte other than 0x08. Due to that, we
cannot use that device as standard NEC receiver.
It could be possible make hack which reads whole code directly
from device memory... */
ret = anysee_ctrl_msg(d, buf, sizeof(buf), ircode, sizeof(ircode));
if (ret)
return ret;
if (ircode[0]) {
deb_rc("%s: key pressed %02x\n", __func__, ircode[1]);
rc_keydown(d->rc_dev, 0x08 << 8 | ircode[1], 0);
}
return 0;
}
static int anysee_ci_read_attribute_mem(struct dvb_ca_en50221 *ci, int slot,
int addr)
{
struct dvb_usb_device *d = ci->data;
int ret;
u8 buf[] = {CMD_CI, 0x02, 0x40 | addr >> 8, addr & 0xff, 0x00, 1};
u8 val;
ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1);
if (ret)
return ret;
return val;
}
static int anysee_ci_write_attribute_mem(struct dvb_ca_en50221 *ci, int slot,
int addr, u8 val)
{
struct dvb_usb_device *d = ci->data;
int ret;
u8 buf[] = {CMD_CI, 0x03, 0x40 | addr >> 8, addr & 0xff, 0x00, 1, val};
ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
if (ret)
return ret;
return 0;
}
static int anysee_ci_read_cam_control(struct dvb_ca_en50221 *ci, int slot,
u8 addr)
{
struct dvb_usb_device *d = ci->data;
int ret;
u8 buf[] = {CMD_CI, 0x04, 0x40, addr, 0x00, 1};
u8 val;
ret = anysee_ctrl_msg(d, buf, sizeof(buf), &val, 1);
if (ret)
return ret;
return val;
}
static int anysee_ci_write_cam_control(struct dvb_ca_en50221 *ci, int slot,
u8 addr, u8 val)
{
struct dvb_usb_device *d = ci->data;
int ret;
u8 buf[] = {CMD_CI, 0x05, 0x40, addr, 0x00, 1, val};
ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
if (ret)
return ret;
return 0;
}
static int anysee_ci_slot_reset(struct dvb_ca_en50221 *ci, int slot)
{
struct dvb_usb_device *d = ci->data;
int ret;
struct anysee_state *state = d->priv;
state->ci_cam_ready = jiffies + msecs_to_jiffies(1000);
ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80);
if (ret)
return ret;
msleep(300);
ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80);
if (ret)
return ret;
return 0;
}
static int anysee_ci_slot_shutdown(struct dvb_ca_en50221 *ci, int slot)
{
struct dvb_usb_device *d = ci->data;
int ret;
ret = anysee_wr_reg_mask(d, REG_IOA, (0 << 7), 0x80);
if (ret)
return ret;
msleep(30);
ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80);
if (ret)
return ret;
return 0;
}
static int anysee_ci_slot_ts_enable(struct dvb_ca_en50221 *ci, int slot)
{
struct dvb_usb_device *d = ci->data;
int ret;
ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 1), 0x02);
if (ret)
return ret;
return 0;
}
static int anysee_ci_poll_slot_status(struct dvb_ca_en50221 *ci, int slot,
int open)
{
struct dvb_usb_device *d = ci->data;
struct anysee_state *state = d->priv;
int ret;
u8 tmp;
ret = anysee_rd_reg_mask(d, REG_IOC, &tmp, 0x40);
if (ret)
return ret;
if (tmp == 0) {
ret = DVB_CA_EN50221_POLL_CAM_PRESENT;
if (time_after(jiffies, state->ci_cam_ready))
ret |= DVB_CA_EN50221_POLL_CAM_READY;
}
return ret;
}
static int anysee_ci_init(struct dvb_usb_device *d)
{
struct anysee_state *state = d->priv;
int ret;
state->ci.owner = THIS_MODULE;
state->ci.read_attribute_mem = anysee_ci_read_attribute_mem;
state->ci.write_attribute_mem = anysee_ci_write_attribute_mem;
state->ci.read_cam_control = anysee_ci_read_cam_control;
state->ci.write_cam_control = anysee_ci_write_cam_control;
state->ci.slot_reset = anysee_ci_slot_reset;
state->ci.slot_shutdown = anysee_ci_slot_shutdown;
state->ci.slot_ts_enable = anysee_ci_slot_ts_enable;
state->ci.poll_slot_status = anysee_ci_poll_slot_status;
state->ci.data = d;
ret = anysee_wr_reg_mask(d, REG_IOA, (1 << 7), 0x80);
if (ret)
return ret;
ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 2)|(0 << 1)|(0 << 0), 0x07);
if (ret)
return ret;
ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 2)|(1 << 1)|(1 << 0), 0x07);
if (ret)
return ret;
ret = dvb_ca_en50221_init(&d->adapter[0].dvb_adap, &state->ci, 0, 1);
if (ret)
return ret;
return 0;
}
static void anysee_ci_release(struct dvb_usb_device *d)
{
struct anysee_state *state = d->priv;
/* detach CI */
if (state->has_ci)
dvb_ca_en50221_release(&state->ci);
return;
}
static int anysee_init(struct dvb_usb_device *d)
{
struct anysee_state *state = d->priv;
int ret;
/* LED light */
ret = anysee_led_ctrl(d, 0x01, 0x03);
if (ret)
return ret;
/* enable IR */
ret = anysee_ir_ctrl(d, 1);
if (ret)
return ret;
/* attach CI */
if (state->has_ci) {
ret = anysee_ci_init(d);
if (ret) {
state->has_ci = false;
return ret;
}
}
return 0;
}
/* DVB USB Driver stuff */
static struct dvb_usb_device_properties anysee_properties;
static int anysee_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct dvb_usb_device *d;
struct usb_host_interface *alt;
int ret;
/* There is one interface with two alternate settings.
Alternate setting 0 is for bulk transfer.
Alternate setting 1 is for isochronous transfer.
We use bulk transfer (alternate setting 0). */
if (intf->num_altsetting < 1)
return -ENODEV;
/*
* Anysee is always warm (its USB-bridge, Cypress FX2, uploads
* firmware from eeprom). If dvb_usb_device_init() succeeds that
* means d is a valid pointer.
*/
ret = dvb_usb_device_init(intf, &anysee_properties, THIS_MODULE, &d,
adapter_nr);
if (ret)
return ret;
alt = usb_altnum_to_altsetting(intf, 0);
if (alt == NULL) {
deb_info("%s: no alt found!\n", __func__);
return -ENODEV;
}
ret = usb_set_interface(d->udev, alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
if (ret)
return ret;
return anysee_init(d);
}
static void anysee_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
anysee_ci_release(d);
dvb_usb_device_exit(intf);
return;
}
static struct usb_device_id anysee_table[] = {
{ USB_DEVICE(USB_VID_CYPRESS, USB_PID_ANYSEE) },
{ USB_DEVICE(USB_VID_AMT, USB_PID_ANYSEE) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, anysee_table);
static struct dvb_usb_device_properties anysee_properties = {
.caps = DVB_USB_IS_AN_I2C_ADAPTER,
.usb_ctrl = DEVICE_SPECIFIC,
.size_of_priv = sizeof(struct anysee_state),
.num_adapters = 1,
.adapter = {
{
.num_frontends = 2,
.frontend_ctrl = anysee_frontend_ctrl,
.fe = { {
.streaming_ctrl = anysee_streaming_ctrl,
.frontend_attach = anysee_frontend_attach,
.tuner_attach = anysee_tuner_attach,
.stream = {
.type = USB_BULK,
.count = 8,
.endpoint = 0x82,
.u = {
.bulk = {
.buffersize = (16*512),
}
}
},
}, {
.streaming_ctrl = anysee_streaming_ctrl,
.frontend_attach = anysee_frontend_attach,
.tuner_attach = anysee_tuner_attach,
.stream = {
.type = USB_BULK,
.count = 8,
.endpoint = 0x82,
.u = {
.bulk = {
.buffersize = (16*512),
}
}
},
} },
}
},
.rc.core = {
.rc_codes = RC_MAP_ANYSEE,
.protocol = RC_TYPE_OTHER,
.module_name = "anysee",
.rc_query = anysee_rc_query,
.rc_interval = 250, /* windows driver uses 500ms */
},
.i2c_algo = &anysee_i2c_algo,
.generic_bulk_ctrl_endpoint = 1,
.num_device_descs = 1,
.devices = {
{
.name = "Anysee DVB USB2.0",
.cold_ids = {NULL},
.warm_ids = {&anysee_table[0],
&anysee_table[1], NULL},
},
}
};
static struct usb_driver anysee_driver = {
.name = "dvb_usb_anysee",
.probe = anysee_probe,
.disconnect = anysee_disconnect,
.id_table = anysee_table,
};
module_usb_driver(anysee_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver Anysee E30 DVB-C & DVB-T USB2.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
aicjofs/android_kernel_fuhu_t8400n | drivers/media/dvb/dvb-usb/dibusb-common.c | 5199 | 12321 | /* Common methods for dibusb-based-receivers.
*
* Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation, version 2.
*
* see Documentation/dvb/README.dvb-usb for more information
*/
#include "dibusb.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "set debugging level (1=info (|-able))." DVB_USB_DEBUG_STATUS);
MODULE_LICENSE("GPL");
#define deb_info(args...) dprintk(debug,0x01,args)
/* common stuff used by the different dibusb modules */
int dibusb_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
if (st->ops.fifo_ctrl != NULL)
if (st->ops.fifo_ctrl(adap->fe_adap[0].fe, onoff)) {
err("error while controlling the fifo of the demod.");
return -ENODEV;
}
}
return 0;
}
EXPORT_SYMBOL(dibusb_streaming_ctrl);
int dibusb_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff)
{
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
if (st->ops.pid_ctrl != NULL)
st->ops.pid_ctrl(adap->fe_adap[0].fe,
index, pid, onoff);
}
return 0;
}
EXPORT_SYMBOL(dibusb_pid_filter);
int dibusb_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
if (st->ops.pid_parse != NULL)
if (st->ops.pid_parse(adap->fe_adap[0].fe, onoff) < 0)
err("could not handle pid_parser");
}
return 0;
}
EXPORT_SYMBOL(dibusb_pid_filter_ctrl);
int dibusb_power_ctrl(struct dvb_usb_device *d, int onoff)
{
u8 b[3];
int ret;
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = DIBUSB_IOCTL_CMD_POWER_MODE;
b[2] = onoff ? DIBUSB_IOCTL_POWER_WAKEUP : DIBUSB_IOCTL_POWER_SLEEP;
ret = dvb_usb_generic_write(d,b,3);
msleep(10);
return ret;
}
EXPORT_SYMBOL(dibusb_power_ctrl);
int dibusb2_0_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
{
u8 b[3] = { 0 };
int ret;
if ((ret = dibusb_streaming_ctrl(adap,onoff)) < 0)
return ret;
if (onoff) {
b[0] = DIBUSB_REQ_SET_STREAMING_MODE;
b[1] = 0x00;
if ((ret = dvb_usb_generic_write(adap->dev,b,2)) < 0)
return ret;
}
b[0] = DIBUSB_REQ_SET_IOCTL;
b[1] = onoff ? DIBUSB_IOCTL_CMD_ENABLE_STREAM : DIBUSB_IOCTL_CMD_DISABLE_STREAM;
return dvb_usb_generic_write(adap->dev,b,3);
}
EXPORT_SYMBOL(dibusb2_0_streaming_ctrl);
int dibusb2_0_power_ctrl(struct dvb_usb_device *d, int onoff)
{
if (onoff) {
u8 b[3] = { DIBUSB_REQ_SET_IOCTL, DIBUSB_IOCTL_CMD_POWER_MODE, DIBUSB_IOCTL_POWER_WAKEUP };
return dvb_usb_generic_write(d,b,3);
} else
return 0;
}
EXPORT_SYMBOL(dibusb2_0_power_ctrl);
static int dibusb_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
{
u8 sndbuf[wlen+4]; /* lead(1) devaddr,direction(1) addr(2) data(wlen) (len(2) (when reading)) */
/* write only ? */
int wo = (rbuf == NULL || rlen == 0),
len = 2 + wlen + (wo ? 0 : 2);
sndbuf[0] = wo ? DIBUSB_REQ_I2C_WRITE : DIBUSB_REQ_I2C_READ;
sndbuf[1] = (addr << 1) | (wo ? 0 : 1);
memcpy(&sndbuf[2],wbuf,wlen);
if (!wo) {
sndbuf[wlen+2] = (rlen >> 8) & 0xff;
sndbuf[wlen+3] = rlen & 0xff;
}
return dvb_usb_generic_rw(d,sndbuf,len,rbuf,rlen,0);
}
/*
* I2C master xfer function
*/
static int dibusb_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
int i;
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
for (i = 0; i < num; i++) {
/* write/read request */
if (i+1 < num && (msg[i].flags & I2C_M_RD) == 0
&& (msg[i+1].flags & I2C_M_RD)) {
if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,
msg[i+1].buf,msg[i+1].len) < 0)
break;
i++;
} else if ((msg[i].flags & I2C_M_RD) == 0) {
if (dibusb_i2c_msg(d, msg[i].addr, msg[i].buf,msg[i].len,NULL,0) < 0)
break;
} else if (msg[i].addr != 0x50) {
/* 0x50 is the address of the eeprom - we need to protect it
* from dibusb's bad i2c implementation: reads without
* writing the offset before are forbidden */
if (dibusb_i2c_msg(d, msg[i].addr, NULL, 0, msg[i].buf, msg[i].len) < 0)
break;
}
}
mutex_unlock(&d->i2c_mutex);
return i;
}
static u32 dibusb_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C;
}
struct i2c_algorithm dibusb_i2c_algo = {
.master_xfer = dibusb_i2c_xfer,
.functionality = dibusb_i2c_func,
};
EXPORT_SYMBOL(dibusb_i2c_algo);
int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
{
u8 wbuf[1] = { offs };
return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
}
EXPORT_SYMBOL(dibusb_read_eeprom_byte);
/* 3000MC/P stuff */
// Config Adjacent channels Perf -cal22
static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
.band_caps = BAND_VHF | BAND_UHF,
.setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
.agc1_max = 48497,
.agc1_min = 23593,
.agc2_max = 46531,
.agc2_min = 24904,
.agc1_pt1 = 0x65,
.agc1_pt2 = 0x69,
.agc1_slope1 = 0x51,
.agc1_slope2 = 0x27,
.agc2_pt1 = 0,
.agc2_pt2 = 0x33,
.agc2_slope1 = 0x35,
.agc2_slope2 = 0x37,
};
static struct dib3000mc_config stk3000p_dib3000p_config = {
&dib3000p_mt2060_agc_config,
.max_time = 0x196,
.ln_adc_level = 0x1cc7,
.output_mpeg2_in_188_bytes = 1,
.agc_command1 = 1,
.agc_command2 = 1,
};
static struct dibx000_agc_config dib3000p_panasonic_agc_config = {
.band_caps = BAND_VHF | BAND_UHF,
.setup = (1 << 8) | (5 << 5) | (1 << 4) | (1 << 3) | (0 << 2) | (2 << 0),
.agc1_max = 56361,
.agc1_min = 22282,
.agc2_max = 47841,
.agc2_min = 36045,
.agc1_pt1 = 0x3b,
.agc1_pt2 = 0x6b,
.agc1_slope1 = 0x55,
.agc1_slope2 = 0x1d,
.agc2_pt1 = 0,
.agc2_pt2 = 0x0a,
.agc2_slope1 = 0x95,
.agc2_slope2 = 0x1e,
};
#if defined(CONFIG_DVB_DIB3000MC) || \
(defined(CONFIG_DVB_DIB3000MC_MODULE) && defined(MODULE))
static struct dib3000mc_config mod3000p_dib3000p_config = {
&dib3000p_panasonic_agc_config,
.max_time = 0x51,
.ln_adc_level = 0x1cc7,
.output_mpeg2_in_188_bytes = 1,
.agc_command1 = 1,
.agc_command2 = 1,
};
int dibusb_dib3000mc_frontend_attach(struct dvb_usb_adapter *adap)
{
if (adap->dev->udev->descriptor.idVendor == USB_VID_LITEON &&
adap->dev->udev->descriptor.idProduct ==
USB_PID_LITEON_DVB_T_WARM) {
msleep(1000);
}
adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
&adap->dev->i2c_adap,
DEFAULT_DIB3000P_I2C_ADDRESS,
&mod3000p_dib3000p_config);
if ((adap->fe_adap[0].fe) == NULL)
adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach,
&adap->dev->i2c_adap,
DEFAULT_DIB3000MC_I2C_ADDRESS,
&mod3000p_dib3000p_config);
if ((adap->fe_adap[0].fe) != NULL) {
if (adap->priv != NULL) {
struct dibusb_state *st = adap->priv;
st->ops.pid_parse = dib3000mc_pid_parse;
st->ops.pid_ctrl = dib3000mc_pid_control;
}
return 0;
}
return -ENODEV;
}
EXPORT_SYMBOL(dibusb_dib3000mc_frontend_attach);
static struct mt2060_config stk3000p_mt2060_config = {
0x60
};
int dibusb_dib3000mc_tuner_attach(struct dvb_usb_adapter *adap)
{
struct dibusb_state *st = adap->priv;
u8 a,b;
u16 if1 = 1220;
struct i2c_adapter *tun_i2c;
// First IF calibration for Liteon Sticks
if (adap->dev->udev->descriptor.idVendor == USB_VID_LITEON &&
adap->dev->udev->descriptor.idProduct == USB_PID_LITEON_DVB_T_WARM) {
dibusb_read_eeprom_byte(adap->dev,0x7E,&a);
dibusb_read_eeprom_byte(adap->dev,0x7F,&b);
if (a == 0x00)
if1 += b;
else if (a == 0x80)
if1 -= b;
else
warn("LITE-ON DVB-T: Strange IF1 calibration :%2X %2X\n", a, b);
} else if (adap->dev->udev->descriptor.idVendor == USB_VID_DIBCOM &&
adap->dev->udev->descriptor.idProduct == USB_PID_DIBCOM_MOD3001_WARM) {
u8 desc;
dibusb_read_eeprom_byte(adap->dev, 7, &desc);
if (desc == 2) {
a = 127;
do {
dibusb_read_eeprom_byte(adap->dev, a, &desc);
a--;
} while (a > 7 && (desc == 0xff || desc == 0x00));
if (desc & 0x80)
if1 -= (0xff - desc);
else
if1 += desc;
}
}
tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1);
if (dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk3000p_mt2060_config, if1) == NULL) {
/* not found - use panasonic pll parameters */
if (dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x60, tun_i2c, DVB_PLL_ENV57H1XD5) == NULL)
return -ENOMEM;
} else {
st->mt2060_present = 1;
/* set the correct parameters for the dib3000p */
dib3000mc_set_config(adap->fe_adap[0].fe, &stk3000p_dib3000p_config);
}
return 0;
}
EXPORT_SYMBOL(dibusb_dib3000mc_tuner_attach);
#endif
/*
* common remote control stuff
*/
struct rc_map_table rc_map_dibusb_table[] = {
/* Key codes for the little Artec T1/Twinhan/HAMA/ remote. */
{ 0x0016, KEY_POWER },
{ 0x0010, KEY_MUTE },
{ 0x0003, KEY_1 },
{ 0x0001, KEY_2 },
{ 0x0006, KEY_3 },
{ 0x0009, KEY_4 },
{ 0x001d, KEY_5 },
{ 0x001f, KEY_6 },
{ 0x000d, KEY_7 },
{ 0x0019, KEY_8 },
{ 0x001b, KEY_9 },
{ 0x0015, KEY_0 },
{ 0x0005, KEY_CHANNELUP },
{ 0x0002, KEY_CHANNELDOWN },
{ 0x001e, KEY_VOLUMEUP },
{ 0x000a, KEY_VOLUMEDOWN },
{ 0x0011, KEY_RECORD },
{ 0x0017, KEY_FAVORITES }, /* Heart symbol - Channel list. */
{ 0x0014, KEY_PLAY },
{ 0x001a, KEY_STOP },
{ 0x0040, KEY_REWIND },
{ 0x0012, KEY_FASTFORWARD },
{ 0x000e, KEY_PREVIOUS }, /* Recall - Previous channel. */
{ 0x004c, KEY_PAUSE },
{ 0x004d, KEY_SCREEN }, /* Full screen mode. */
{ 0x0054, KEY_AUDIO }, /* MTS - Switch to secondary audio. */
/* additional keys TwinHan VisionPlus, the Artec seemingly not have */
{ 0x000c, KEY_CANCEL }, /* Cancel */
{ 0x001c, KEY_EPG }, /* EPG */
{ 0x0000, KEY_TAB }, /* Tab */
{ 0x0048, KEY_INFO }, /* Preview */
{ 0x0004, KEY_LIST }, /* RecordList */
{ 0x000f, KEY_TEXT }, /* Teletext */
/* Key codes for the KWorld/ADSTech/JetWay remote. */
{ 0x8612, KEY_POWER },
{ 0x860f, KEY_SELECT }, /* source */
{ 0x860c, KEY_UNKNOWN }, /* scan */
{ 0x860b, KEY_EPG },
{ 0x8610, KEY_MUTE },
{ 0x8601, KEY_1 },
{ 0x8602, KEY_2 },
{ 0x8603, KEY_3 },
{ 0x8604, KEY_4 },
{ 0x8605, KEY_5 },
{ 0x8606, KEY_6 },
{ 0x8607, KEY_7 },
{ 0x8608, KEY_8 },
{ 0x8609, KEY_9 },
{ 0x860a, KEY_0 },
{ 0x8618, KEY_ZOOM },
{ 0x861c, KEY_UNKNOWN }, /* preview */
{ 0x8613, KEY_UNKNOWN }, /* snap */
{ 0x8600, KEY_UNDO },
{ 0x861d, KEY_RECORD },
{ 0x860d, KEY_STOP },
{ 0x860e, KEY_PAUSE },
{ 0x8616, KEY_PLAY },
{ 0x8611, KEY_BACK },
{ 0x8619, KEY_FORWARD },
{ 0x8614, KEY_UNKNOWN }, /* pip */
{ 0x8615, KEY_ESC },
{ 0x861a, KEY_UP },
{ 0x861e, KEY_DOWN },
{ 0x861f, KEY_LEFT },
{ 0x861b, KEY_RIGHT },
/* Key codes for the DiBcom MOD3000 remote. */
{ 0x8000, KEY_MUTE },
{ 0x8001, KEY_TEXT },
{ 0x8002, KEY_HOME },
{ 0x8003, KEY_POWER },
{ 0x8004, KEY_RED },
{ 0x8005, KEY_GREEN },
{ 0x8006, KEY_YELLOW },
{ 0x8007, KEY_BLUE },
{ 0x8008, KEY_DVD },
{ 0x8009, KEY_AUDIO },
{ 0x800a, KEY_IMAGES }, /* Pictures */
{ 0x800b, KEY_VIDEO },
{ 0x800c, KEY_BACK },
{ 0x800d, KEY_UP },
{ 0x800e, KEY_RADIO },
{ 0x800f, KEY_EPG },
{ 0x8010, KEY_LEFT },
{ 0x8011, KEY_OK },
{ 0x8012, KEY_RIGHT },
{ 0x8013, KEY_UNKNOWN }, /* SAP */
{ 0x8014, KEY_TV },
{ 0x8015, KEY_DOWN },
{ 0x8016, KEY_MENU }, /* DVD Menu */
{ 0x8017, KEY_LAST },
{ 0x8018, KEY_RECORD },
{ 0x8019, KEY_STOP },
{ 0x801a, KEY_PAUSE },
{ 0x801b, KEY_PLAY },
{ 0x801c, KEY_PREVIOUS },
{ 0x801d, KEY_REWIND },
{ 0x801e, KEY_FASTFORWARD },
{ 0x801f, KEY_NEXT},
{ 0x8040, KEY_1 },
{ 0x8041, KEY_2 },
{ 0x8042, KEY_3 },
{ 0x8043, KEY_CHANNELUP },
{ 0x8044, KEY_4 },
{ 0x8045, KEY_5 },
{ 0x8046, KEY_6 },
{ 0x8047, KEY_CHANNELDOWN },
{ 0x8048, KEY_7 },
{ 0x8049, KEY_8 },
{ 0x804a, KEY_9 },
{ 0x804b, KEY_VOLUMEUP },
{ 0x804c, KEY_CLEAR },
{ 0x804d, KEY_0 },
{ 0x804e, KEY_ENTER },
{ 0x804f, KEY_VOLUMEDOWN },
};
EXPORT_SYMBOL(rc_map_dibusb_table);
int dibusb_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
u8 key[5],cmd = DIBUSB_REQ_POLL_REMOTE;
dvb_usb_generic_rw(d,&cmd,1,key,5,0);
dvb_usb_nec_rc_key_to_event(d,key,event,state);
if (key[0] != 0)
deb_info("key: %x %x %x %x %x\n",key[0],key[1],key[2],key[3],key[4]);
return 0;
}
EXPORT_SYMBOL(dibusb_rc_query);
| gpl-2.0 |
blumak2000/blumak_kernel_s6_Lollipop | drivers/hid/usbhid/usbkbd.c | 7503 | 11776 | /*
* Copyright (c) 1999-2001 Vojtech Pavlik
*
* USB HIDBP Keyboard support
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/usb/input.h>
#include <linux/hid.h>
/*
* Version Information
*/
#define DRIVER_VERSION ""
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
#define DRIVER_DESC "USB HID Boot Protocol keyboard driver"
#define DRIVER_LICENSE "GPL"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE(DRIVER_LICENSE);
static const unsigned char usb_kbd_keycode[256] = {
0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26,
27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106,
105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71,
72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190,
191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113,
115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0,
122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113,
150,158,159,128,136,177,178,176,142,152,173,140
};
/**
* struct usb_kbd - state of each attached keyboard
* @dev: input device associated with this keyboard
* @usbdev: usb device associated with this keyboard
* @old: data received in the past from the @irq URB representing which
* keys were pressed. By comparing with the current list of keys
* that are pressed, we are able to see key releases.
* @irq: URB for receiving a list of keys that are pressed when a
* new key is pressed or a key that was pressed is released.
* @led: URB for sending LEDs (e.g. numlock, ...)
* @newleds: data that will be sent with the @led URB representing which LEDs
should be on
* @name: Name of the keyboard. @dev's name field points to this buffer
* @phys: Physical path of the keyboard. @dev's phys field points to this
* buffer
* @new: Buffer for the @irq URB
* @cr: Control request for @led URB
* @leds: Buffer for the @led URB
* @new_dma: DMA address for @irq URB
* @leds_dma: DMA address for @led URB
* @leds_lock: spinlock that protects @leds, @newleds, and @led_urb_submitted
* @led_urb_submitted: indicates whether @led is in progress, i.e. it has been
* submitted and its completion handler has not returned yet
* without resubmitting @led
*/
struct usb_kbd {
struct input_dev *dev;
struct usb_device *usbdev;
unsigned char old[8];
struct urb *irq, *led;
unsigned char newleds;
char name[128];
char phys[64];
unsigned char *new;
struct usb_ctrlrequest *cr;
unsigned char *leds;
dma_addr_t new_dma;
dma_addr_t leds_dma;
spinlock_t leds_lock;
bool led_urb_submitted;
};
static void usb_kbd_irq(struct urb *urb)
{
struct usb_kbd *kbd = urb->context;
int i;
switch (urb->status) {
case 0: /* success */
break;
case -ECONNRESET: /* unlink */
case -ENOENT:
case -ESHUTDOWN:
return;
/* -EPIPE: should clear the halt */
default: /* error */
goto resubmit;
}
for (i = 0; i < 8; i++)
input_report_key(kbd->dev, usb_kbd_keycode[i + 224], (kbd->new[0] >> i) & 1);
for (i = 2; i < 8; i++) {
if (kbd->old[i] > 3 && memscan(kbd->new + 2, kbd->old[i], 6) == kbd->new + 8) {
if (usb_kbd_keycode[kbd->old[i]])
input_report_key(kbd->dev, usb_kbd_keycode[kbd->old[i]], 0);
else
hid_info(urb->dev,
"Unknown key (scancode %#x) released.\n",
kbd->old[i]);
}
if (kbd->new[i] > 3 && memscan(kbd->old + 2, kbd->new[i], 6) == kbd->old + 8) {
if (usb_kbd_keycode[kbd->new[i]])
input_report_key(kbd->dev, usb_kbd_keycode[kbd->new[i]], 1);
else
hid_info(urb->dev,
"Unknown key (scancode %#x) released.\n",
kbd->new[i]);
}
}
input_sync(kbd->dev);
memcpy(kbd->old, kbd->new, 8);
resubmit:
i = usb_submit_urb (urb, GFP_ATOMIC);
if (i)
hid_err(urb->dev, "can't resubmit intr, %s-%s/input0, status %d",
kbd->usbdev->bus->bus_name,
kbd->usbdev->devpath, i);
}
static int usb_kbd_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
unsigned long flags;
struct usb_kbd *kbd = input_get_drvdata(dev);
if (type != EV_LED)
return -1;
spin_lock_irqsave(&kbd->leds_lock, flags);
kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));
if (kbd->led_urb_submitted){
spin_unlock_irqrestore(&kbd->leds_lock, flags);
return 0;
}
if (*(kbd->leds) == kbd->newleds){
spin_unlock_irqrestore(&kbd->leds_lock, flags);
return 0;
}
*(kbd->leds) = kbd->newleds;
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC))
pr_err("usb_submit_urb(leds) failed\n");
else
kbd->led_urb_submitted = true;
spin_unlock_irqrestore(&kbd->leds_lock, flags);
return 0;
}
static void usb_kbd_led(struct urb *urb)
{
unsigned long flags;
struct usb_kbd *kbd = urb->context;
if (urb->status)
hid_warn(urb->dev, "led urb status %d received\n",
urb->status);
spin_lock_irqsave(&kbd->leds_lock, flags);
if (*(kbd->leds) == kbd->newleds){
kbd->led_urb_submitted = false;
spin_unlock_irqrestore(&kbd->leds_lock, flags);
return;
}
*(kbd->leds) = kbd->newleds;
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC)){
hid_err(urb->dev, "usb_submit_urb(leds) failed\n");
kbd->led_urb_submitted = false;
}
spin_unlock_irqrestore(&kbd->leds_lock, flags);
}
static int usb_kbd_open(struct input_dev *dev)
{
struct usb_kbd *kbd = input_get_drvdata(dev);
kbd->irq->dev = kbd->usbdev;
if (usb_submit_urb(kbd->irq, GFP_KERNEL))
return -EIO;
return 0;
}
static void usb_kbd_close(struct input_dev *dev)
{
struct usb_kbd *kbd = input_get_drvdata(dev);
usb_kill_urb(kbd->irq);
}
static int usb_kbd_alloc_mem(struct usb_device *dev, struct usb_kbd *kbd)
{
if (!(kbd->irq = usb_alloc_urb(0, GFP_KERNEL)))
return -1;
if (!(kbd->led = usb_alloc_urb(0, GFP_KERNEL)))
return -1;
if (!(kbd->new = usb_alloc_coherent(dev, 8, GFP_ATOMIC, &kbd->new_dma)))
return -1;
if (!(kbd->cr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL)))
return -1;
if (!(kbd->leds = usb_alloc_coherent(dev, 1, GFP_ATOMIC, &kbd->leds_dma)))
return -1;
return 0;
}
static void usb_kbd_free_mem(struct usb_device *dev, struct usb_kbd *kbd)
{
usb_free_urb(kbd->irq);
usb_free_urb(kbd->led);
usb_free_coherent(dev, 8, kbd->new, kbd->new_dma);
kfree(kbd->cr);
usb_free_coherent(dev, 1, kbd->leds, kbd->leds_dma);
}
static int usb_kbd_probe(struct usb_interface *iface,
const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(iface);
struct usb_host_interface *interface;
struct usb_endpoint_descriptor *endpoint;
struct usb_kbd *kbd;
struct input_dev *input_dev;
int i, pipe, maxp;
int error = -ENOMEM;
interface = iface->cur_altsetting;
if (interface->desc.bNumEndpoints != 1)
return -ENODEV;
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
kbd = kzalloc(sizeof(struct usb_kbd), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbd || !input_dev)
goto fail1;
if (usb_kbd_alloc_mem(dev, kbd))
goto fail2;
kbd->usbdev = dev;
kbd->dev = input_dev;
spin_lock_init(&kbd->leds_lock);
if (dev->manufacturer)
strlcpy(kbd->name, dev->manufacturer, sizeof(kbd->name));
if (dev->product) {
if (dev->manufacturer)
strlcat(kbd->name, " ", sizeof(kbd->name));
strlcat(kbd->name, dev->product, sizeof(kbd->name));
}
if (!strlen(kbd->name))
snprintf(kbd->name, sizeof(kbd->name),
"USB HIDBP Keyboard %04x:%04x",
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
usb_make_path(dev, kbd->phys, sizeof(kbd->phys));
strlcat(kbd->phys, "/input0", sizeof(kbd->phys));
input_dev->name = kbd->name;
input_dev->phys = kbd->phys;
usb_to_input_id(dev, &input_dev->id);
input_dev->dev.parent = &iface->dev;
input_set_drvdata(input_dev, kbd);
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_LED) |
BIT_MASK(EV_REP);
input_dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) |
BIT_MASK(LED_SCROLLL) | BIT_MASK(LED_COMPOSE) |
BIT_MASK(LED_KANA);
for (i = 0; i < 255; i++)
set_bit(usb_kbd_keycode[i], input_dev->keybit);
clear_bit(0, input_dev->keybit);
input_dev->event = usb_kbd_event;
input_dev->open = usb_kbd_open;
input_dev->close = usb_kbd_close;
usb_fill_int_urb(kbd->irq, dev, pipe,
kbd->new, (maxp > 8 ? 8 : maxp),
usb_kbd_irq, kbd, endpoint->bInterval);
kbd->irq->transfer_dma = kbd->new_dma;
kbd->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
kbd->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
kbd->cr->bRequest = 0x09;
kbd->cr->wValue = cpu_to_le16(0x200);
kbd->cr->wIndex = cpu_to_le16(interface->desc.bInterfaceNumber);
kbd->cr->wLength = cpu_to_le16(1);
usb_fill_control_urb(kbd->led, dev, usb_sndctrlpipe(dev, 0),
(void *) kbd->cr, kbd->leds, 1,
usb_kbd_led, kbd);
kbd->led->transfer_dma = kbd->leds_dma;
kbd->led->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
error = input_register_device(kbd->dev);
if (error)
goto fail2;
usb_set_intfdata(iface, kbd);
device_set_wakeup_enable(&dev->dev, 1);
return 0;
fail2:
usb_kbd_free_mem(dev, kbd);
fail1:
input_free_device(input_dev);
kfree(kbd);
return error;
}
static void usb_kbd_disconnect(struct usb_interface *intf)
{
struct usb_kbd *kbd = usb_get_intfdata (intf);
usb_set_intfdata(intf, NULL);
if (kbd) {
usb_kill_urb(kbd->irq);
input_unregister_device(kbd->dev);
usb_kill_urb(kbd->led);
usb_kbd_free_mem(interface_to_usbdev(intf), kbd);
kfree(kbd);
}
}
static struct usb_device_id usb_kbd_id_table [] = {
{ USB_INTERFACE_INFO(USB_INTERFACE_CLASS_HID, USB_INTERFACE_SUBCLASS_BOOT,
USB_INTERFACE_PROTOCOL_KEYBOARD) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, usb_kbd_id_table);
static struct usb_driver usb_kbd_driver = {
.name = "usbkbd",
.probe = usb_kbd_probe,
.disconnect = usb_kbd_disconnect,
.id_table = usb_kbd_id_table,
};
module_usb_driver(usb_kbd_driver);
| gpl-2.0 |
boa19861105/android_444_KitKat_kernel_htc_B2_UHL | arch/cris/arch-v32/mach-fs/vcs_hook.c | 9295 | 2062 | /*
* Call simulator hook. This is the part running in the
* simulated program.
*/
#include "vcs_hook.h"
#include <stdarg.h>
#include <arch-v32/hwregs/reg_map.h>
#include <arch-v32/hwregs/intr_vect_defs.h>
#define HOOK_TRIG_ADDR 0xb7000000 /* hook cvlog model reg address */
#define HOOK_MEM_BASE_ADDR 0xa0000000 /* csp4 (shared mem) base addr */
#define HOOK_DATA(offset) ((unsigned *)HOOK_MEM_BASE_ADDR)[offset]
#define VHOOK_DATA(offset) ((volatile unsigned *)HOOK_MEM_BASE_ADDR)[offset]
#define HOOK_TRIG(funcid) \
do { \
*((unsigned *) HOOK_TRIG_ADDR) = funcid; \
} while (0)
#define HOOK_DATA_BYTE(offset) ((unsigned char *)HOOK_MEM_BASE_ADDR)[offset]
int hook_call(unsigned id, unsigned pcnt, ...)
{
va_list ap;
unsigned i;
unsigned ret;
#ifdef USING_SOS
PREEMPT_OFF_SAVE();
#endif
/* pass parameters */
HOOK_DATA(0) = id;
/* Have to make hook_print_str a special case since we call with a
* parameter of byte type. Should perhaps be a separate
* hook_call. */
if (id == hook_print_str) {
int i;
char *str;
HOOK_DATA(1) = pcnt;
va_start(ap, pcnt);
str = (char *)va_arg(ap, unsigned);
for (i = 0; i != pcnt; i++)
HOOK_DATA_BYTE(8 + i) = str[i];
HOOK_DATA_BYTE(8 + i) = 0; /* null byte */
} else {
va_start(ap, pcnt);
for (i = 1; i <= pcnt; i++)
HOOK_DATA(i) = va_arg(ap, unsigned);
va_end(ap);
}
/* read from mem to make sure data has propagated to memory before
* trigging */
ret = *((volatile unsigned *)HOOK_MEM_BASE_ADDR);
/* trigger hook */
HOOK_TRIG(id);
/* wait for call to finish */
while (VHOOK_DATA(0) > 0) ;
/* extract return value */
ret = VHOOK_DATA(1);
#ifdef USING_SOS
PREEMPT_RESTORE();
#endif
return ret;
}
unsigned hook_buf(unsigned i)
{
return (HOOK_DATA(i));
}
void print_str(const char *str)
{
int i;
/* find null at end of string */
for (i = 1; str[i]; i++) ;
hook_call(hook_print_str, i, str);
}
void CPU_KICK_DOG(void)
{
(void)hook_call(hook_kick_dog, 0);
}
void CPU_WATCHDOG_TIMEOUT(unsigned t)
{
(void)hook_call(hook_dog_timeout, 1, t);
}
| gpl-2.0 |
Loller79/Solid_Kernel-STOCK-KK | arch/arm/mach-shmobile/console.c | 11855 | 1043 | /*
* SH-Mobile Console
*
* Copyright (C) 2010 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <mach/common.h>
#include <asm/mach/map.h>
void __init shmobile_setup_console(void)
{
parse_early_param();
/* Let earlyprintk output early console messages */
early_platform_driver_probe("earlyprintk", 1, 1);
}
| gpl-2.0 |
TheBr0ken/vigor_aosp_kernel | lib/xz/xz_dec_test.c | 13903 | 5278 | /*
* XZ decoder tester
*
* Author: Lasse Collin <lasse.collin@tukaani.org>
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/crc32.h>
#include <linux/xz.h>
/* Maximum supported dictionary size */
#define DICT_MAX (1 << 20)
/* Device name to pass to register_chrdev(). */
#define DEVICE_NAME "xz_dec_test"
/* Dynamically allocated device major number */
static int device_major;
/*
* We reuse the same decoder state, and thus can decode only one
* file at a time.
*/
static bool device_is_open;
/* XZ decoder state */
static struct xz_dec *state;
/*
* Return value of xz_dec_run(). We need to avoid calling xz_dec_run() after
* it has returned XZ_STREAM_END, so we make this static.
*/
static enum xz_ret ret;
/*
* Input and output buffers. The input buffer is used as a temporary safe
* place for the data coming from the userspace.
*/
static uint8_t buffer_in[1024];
static uint8_t buffer_out[1024];
/*
* Structure to pass the input and output buffers to the XZ decoder.
* A few of the fields are never modified so we initialize them here.
*/
static struct xz_buf buffers = {
.in = buffer_in,
.out = buffer_out,
.out_size = sizeof(buffer_out)
};
/*
* CRC32 of uncompressed data. This is used to give the user a simple way
* to check that the decoder produces correct output.
*/
static uint32_t crc;
static int xz_dec_test_open(struct inode *i, struct file *f)
{
if (device_is_open)
return -EBUSY;
device_is_open = true;
xz_dec_reset(state);
ret = XZ_OK;
crc = 0xFFFFFFFF;
buffers.in_pos = 0;
buffers.in_size = 0;
buffers.out_pos = 0;
printk(KERN_INFO DEVICE_NAME ": opened\n");
return 0;
}
static int xz_dec_test_release(struct inode *i, struct file *f)
{
device_is_open = false;
if (ret == XZ_OK)
printk(KERN_INFO DEVICE_NAME ": input was truncated\n");
printk(KERN_INFO DEVICE_NAME ": closed\n");
return 0;
}
/*
* Decode the data given to us from the userspace. CRC32 of the uncompressed
* data is calculated and is printed at the end of successful decoding. The
* uncompressed data isn't stored anywhere for further use.
*
* The .xz file must have exactly one Stream and no Stream Padding. The data
* after the first Stream is considered to be garbage.
*/
static ssize_t xz_dec_test_write(struct file *file, const char __user *buf,
size_t size, loff_t *pos)
{
size_t remaining;
if (ret != XZ_OK) {
if (size > 0)
printk(KERN_INFO DEVICE_NAME ": %zu bytes of "
"garbage at the end of the file\n",
size);
return -ENOSPC;
}
printk(KERN_INFO DEVICE_NAME ": decoding %zu bytes of input\n",
size);
remaining = size;
while ((remaining > 0 || buffers.out_pos == buffers.out_size)
&& ret == XZ_OK) {
if (buffers.in_pos == buffers.in_size) {
buffers.in_pos = 0;
buffers.in_size = min(remaining, sizeof(buffer_in));
if (copy_from_user(buffer_in, buf, buffers.in_size))
return -EFAULT;
buf += buffers.in_size;
remaining -= buffers.in_size;
}
buffers.out_pos = 0;
ret = xz_dec_run(state, &buffers);
crc = crc32(crc, buffer_out, buffers.out_pos);
}
switch (ret) {
case XZ_OK:
printk(KERN_INFO DEVICE_NAME ": XZ_OK\n");
return size;
case XZ_STREAM_END:
printk(KERN_INFO DEVICE_NAME ": XZ_STREAM_END, "
"CRC32 = 0x%08X\n", ~crc);
return size - remaining - (buffers.in_size - buffers.in_pos);
case XZ_MEMLIMIT_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_MEMLIMIT_ERROR\n");
break;
case XZ_FORMAT_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_FORMAT_ERROR\n");
break;
case XZ_OPTIONS_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_OPTIONS_ERROR\n");
break;
case XZ_DATA_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_DATA_ERROR\n");
break;
case XZ_BUF_ERROR:
printk(KERN_INFO DEVICE_NAME ": XZ_BUF_ERROR\n");
break;
default:
printk(KERN_INFO DEVICE_NAME ": Bug detected!\n");
break;
}
return -EIO;
}
/* Allocate the XZ decoder state and register the character device. */
static int __init xz_dec_test_init(void)
{
static const struct file_operations fileops = {
.owner = THIS_MODULE,
.open = &xz_dec_test_open,
.release = &xz_dec_test_release,
.write = &xz_dec_test_write
};
state = xz_dec_init(XZ_PREALLOC, DICT_MAX);
if (state == NULL)
return -ENOMEM;
device_major = register_chrdev(0, DEVICE_NAME, &fileops);
if (device_major < 0) {
xz_dec_end(state);
return device_major;
}
printk(KERN_INFO DEVICE_NAME ": module loaded\n");
printk(KERN_INFO DEVICE_NAME ": Create a device node with "
"'mknod " DEVICE_NAME " c %d 0' and write .xz files "
"to it.\n", device_major);
return 0;
}
static void __exit xz_dec_test_exit(void)
{
unregister_chrdev(device_major, DEVICE_NAME);
xz_dec_end(state);
printk(KERN_INFO DEVICE_NAME ": module unloaded\n");
}
module_init(xz_dec_test_init);
module_exit(xz_dec_test_exit);
MODULE_DESCRIPTION("XZ decompressor tester");
MODULE_VERSION("1.0");
MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org>");
/*
* This code is in the public domain, but in Linux it's simplest to just
* say it's GPL and consider the authors as the copyright holders.
*/
MODULE_LICENSE("GPL");
| gpl-2.0 |
binsys/qemu-linaro | backends/rng.c | 80 | 2578 | /*
* QEMU Random Number Generator Backend
*
* Copyright IBM, Corp. 2012
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "sysemu/rng.h"
#include "qapi/qmp/qerror.h"
#include "qom/object_interfaces.h"
void rng_backend_request_entropy(RngBackend *s, size_t size,
EntropyReceiveFunc *receive_entropy,
void *opaque)
{
RngBackendClass *k = RNG_BACKEND_GET_CLASS(s);
if (k->request_entropy) {
k->request_entropy(s, size, receive_entropy, opaque);
}
}
void rng_backend_cancel_requests(RngBackend *s)
{
RngBackendClass *k = RNG_BACKEND_GET_CLASS(s);
if (k->cancel_requests) {
k->cancel_requests(s);
}
}
static bool rng_backend_prop_get_opened(Object *obj, Error **errp)
{
RngBackend *s = RNG_BACKEND(obj);
return s->opened;
}
static void rng_backend_complete(UserCreatable *uc, Error **errp)
{
object_property_set_bool(OBJECT(uc), true, "opened", errp);
}
static void rng_backend_prop_set_opened(Object *obj, bool value, Error **errp)
{
RngBackend *s = RNG_BACKEND(obj);
RngBackendClass *k = RNG_BACKEND_GET_CLASS(s);
Error *local_err = NULL;
if (value == s->opened) {
return;
}
if (!value && s->opened) {
error_set(errp, QERR_PERMISSION_DENIED);
return;
}
if (k->opened) {
k->opened(s, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
}
s->opened = true;
}
static void rng_backend_init(Object *obj)
{
object_property_add_bool(obj, "opened",
rng_backend_prop_get_opened,
rng_backend_prop_set_opened,
NULL);
}
static void rng_backend_class_init(ObjectClass *oc, void *data)
{
UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
ucc->complete = rng_backend_complete;
}
static const TypeInfo rng_backend_info = {
.name = TYPE_RNG_BACKEND,
.parent = TYPE_OBJECT,
.instance_size = sizeof(RngBackend),
.instance_init = rng_backend_init,
.class_size = sizeof(RngBackendClass),
.class_init = rng_backend_class_init,
.abstract = true,
.interfaces = (InterfaceInfo[]) {
{ TYPE_USER_CREATABLE },
{ }
}
};
static void register_types(void)
{
type_register_static(&rng_backend_info);
}
type_init(register_types);
| gpl-2.0 |
atondwal/linux-2 | kernel/trace/trace.c | 80 | 120312 | /*
* ring buffer based function tracer
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
*
* Originally taken from the RT patch by:
* Arnaldo Carvalho de Melo <acme@redhat.com>
*
* Based on code from the latency_tracer, that is:
* Copyright (C) 2004-2006 Ingo Molnar
* Copyright (C) 2004 William Lee Irwin III
*/
#include <linux/ring_buffer.h>
#include <generated/utsrelease.h>
#include <linux/stacktrace.h>
#include <linux/writeback.h>
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/pagemap.h>
#include <linux/hardirq.h>
#include <linux/linkage.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/splice.h>
#include <linux/kdebug.h>
#include <linux/string.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/nmi.h>
#include <linux/fs.h>
#include "trace.h"
#include "trace_output.h"
/*
* On boot up, the ring buffer is set to the minimum size, so that
* we do not waste memory on systems that are not using tracing.
*/
int ring_buffer_expanded;
/*
* We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
* entries inserted during the selftest although some concurrent
* insertions into the ring-buffer such as trace_printk could occurred
* at the same time, giving false positive or negative results.
*/
static bool __read_mostly tracing_selftest_running;
/*
* If a tracer is running, we do not want to run SELFTEST.
*/
bool __read_mostly tracing_selftest_disabled;
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
{ }
};
static struct tracer_flags dummy_tracer_flags = {
.val = 0,
.opts = dummy_tracer_opt
};
static int dummy_set_flag(u32 old_flags, u32 bit, int set)
{
return 0;
}
/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
* of the tracer is successful. But that is the only place that sets
* this back to zero.
*/
static int tracing_disabled = 1;
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
cpumask_var_t __read_mostly tracing_buffer_mask;
/*
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
*
* If there is an oops (or kernel panic) and the ftrace_dump_on_oops
* is set, then ftrace_dump is called. This will output the contents
* of the ftrace buffers to the console. This is very useful for
* capturing traces that lead to crashes and outputing it to a
* serial console.
*
* It is default off, but you can enable it with either specifying
* "ftrace_dump_on_oops" in the kernel command line, or setting
* /proc/sys/kernel/ftrace_dump_on_oops
* Set 1 if you want to dump buffers of all CPUs
* Set 2 if you want to dump the buffer of the CPU that triggered oops
*/
enum ftrace_dump_mode ftrace_dump_on_oops;
static int tracing_set_tracer(const char *buf);
#define MAX_TRACER_SIZE 100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
static char *default_bootup_tracer;
static int __init set_cmdline_ftrace(char *str)
{
strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */
ring_buffer_expanded = 1;
return 1;
}
__setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
if (*str++ != '=' || !*str) {
ftrace_dump_on_oops = DUMP_ALL;
return 1;
}
if (!strcmp("orig_cpu", str)) {
ftrace_dump_on_oops = DUMP_ORIG;
return 1;
}
return 0;
}
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
unsigned long long ns2usecs(cycle_t nsec)
{
nsec += 500;
do_div(nsec, 1000);
return nsec;
}
/*
* The global_trace is the descriptor that holds the tracing
* buffers for the live tracing. For each CPU, it contains
* a link list of pages that will store trace entries. The
* page descriptor of the pages in the memory is used to hold
* the link list by linking the lru item in the page descriptor
* to each of the pages in the buffer per CPU.
*
* For each active CPU there is a data field that holds the
* pages for the buffer for that CPU. Each CPU has the same number
* of pages allocated for its buffer.
*/
static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
int filter_current_check_discard(struct ring_buffer *buffer,
struct ftrace_event_call *call, void *rec,
struct ring_buffer_event *event)
{
return filter_check_discard(call, rec, buffer, event);
}
EXPORT_SYMBOL_GPL(filter_current_check_discard);
cycle_t ftrace_now(int cpu)
{
u64 ts;
/* Early boot up does not have a buffer yet */
if (!global_trace.buffer)
return trace_clock_local();
ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
return ts;
}
/*
* The max_tr is used to snapshot the global_trace when a maximum
* latency is reached. Some tracers will use this to store a maximum
* trace while it continues examining live traces.
*
* The buffers for the max_tr are set up the same as the global_trace.
* When a snapshot is taken, the link list of the max_tr is swapped
* with the link list of the global_trace and the buffers are reset for
* the global_trace so the tracing can continue.
*/
static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
/**
* tracing_is_enabled - return tracer_enabled status
*
* This function is used by other tracers to know the status
* of the tracer_enabled flag. Tracers may use this function
* to know if it should enable their features when starting
* up. See irqsoff tracer for an example (start_irqsoff_tracer).
*/
int tracing_is_enabled(void)
{
return tracer_enabled;
}
/*
* trace_buf_size is the size in bytes that is allocated
* for a buffer. Note, the number of bytes is always rounded
* to page size.
*
* This number is purposely set to a low number of 16384.
* If the dump on oops happens, it will be much appreciated
* to not have to wait for all that output. Anyway this can be
* boot time and run time configurable.
*/
#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
/* trace_types holds a link list of available tracers. */
static struct tracer *trace_types __read_mostly;
/* current_trace points to the tracer that is currently active */
static struct tracer *current_trace __read_mostly;
/*
* trace_types_lock is used to protect the trace_types list.
*/
static DEFINE_MUTEX(trace_types_lock);
/*
* serialize the access of the ring buffer
*
* ring buffer serializes readers, but it is low level protection.
* The validity of the events (which returns by ring_buffer_peek() ..etc)
* are not protected by ring buffer.
*
* The content of events may become garbage if we allow other process consumes
* these events concurrently:
* A) the page of the consumed events may become a normal page
* (not reader page) in ring buffer, and this page will be rewrited
* by events producer.
* B) The page of the consumed events may become a page for splice_read,
* and this page will be returned to system.
*
* These primitives allow multi process access to different cpu ring buffer
* concurrently.
*
* These primitives don't distinguish read-only and read-consume access.
* Multi read-only access are also serialized.
*/
#ifdef CONFIG_SMP
static DECLARE_RWSEM(all_cpu_access_lock);
static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
static inline void trace_access_lock(int cpu)
{
if (cpu == TRACE_PIPE_ALL_CPU) {
/* gain it for accessing the whole ring buffer. */
down_write(&all_cpu_access_lock);
} else {
/* gain it for accessing a cpu ring buffer. */
/* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
down_read(&all_cpu_access_lock);
/* Secondly block other access to this @cpu ring buffer. */
mutex_lock(&per_cpu(cpu_access_lock, cpu));
}
}
static inline void trace_access_unlock(int cpu)
{
if (cpu == TRACE_PIPE_ALL_CPU) {
up_write(&all_cpu_access_lock);
} else {
mutex_unlock(&per_cpu(cpu_access_lock, cpu));
up_read(&all_cpu_access_lock);
}
}
static inline void trace_access_lock_init(void)
{
int cpu;
for_each_possible_cpu(cpu)
mutex_init(&per_cpu(cpu_access_lock, cpu));
}
#else
static DEFINE_MUTEX(access_lock);
static inline void trace_access_lock(int cpu)
{
(void)cpu;
mutex_lock(&access_lock);
}
static inline void trace_access_unlock(int cpu)
{
(void)cpu;
mutex_unlock(&access_lock);
}
static inline void trace_access_lock_init(void)
{
}
#endif
/* trace_wait is a waitqueue for tasks blocked on trace_poll */
static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
TRACE_ITER_IRQ_INFO;
static int trace_stop_count;
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
static void wakeup_work_handler(struct work_struct *work)
{
wake_up(&trace_wait);
}
static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
/**
* tracing_on - enable tracing buffers
*
* This function enables tracing buffers that may have been
* disabled with tracing_off.
*/
void tracing_on(void)
{
if (global_trace.buffer)
ring_buffer_record_on(global_trace.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
* between setting this flag and actually turning
* on the buffer.
*/
global_trace.buffer_disabled = 0;
}
EXPORT_SYMBOL_GPL(tracing_on);
/**
* tracing_off - turn off tracing buffers
*
* This function stops the tracing buffers from recording data.
* It does not disable any overhead the tracers themselves may
* be causing. This function simply causes all recording to
* the ring buffers to fail.
*/
void tracing_off(void)
{
if (global_trace.buffer)
ring_buffer_record_off(global_trace.buffer);
/*
* This flag is only looked at when buffers haven't been
* allocated yet. We don't really care about the race
* between setting this flag and actually turning
* on the buffer.
*/
global_trace.buffer_disabled = 1;
}
EXPORT_SYMBOL_GPL(tracing_off);
/**
* tracing_is_on - show state of ring buffers enabled
*/
int tracing_is_on(void)
{
if (global_trace.buffer)
return ring_buffer_record_is_on(global_trace.buffer);
return !global_trace.buffer_disabled;
}
EXPORT_SYMBOL_GPL(tracing_is_on);
/**
* trace_wake_up - wake up tasks waiting for trace input
*
* Schedules a delayed work to wake up any task that is blocked on the
* trace_wait queue. These is used with trace_poll for tasks polling the
* trace.
*/
void trace_wake_up(void)
{
const unsigned long delay = msecs_to_jiffies(2);
if (trace_flags & TRACE_ITER_BLOCK)
return;
schedule_delayed_work(&wakeup_work, delay);
}
static int __init set_buf_size(char *str)
{
unsigned long buf_size;
if (!str)
return 0;
buf_size = memparse(str, &str);
/* nr_entries can not be zero */
if (buf_size == 0)
return 0;
trace_buf_size = buf_size;
return 1;
}
__setup("trace_buf_size=", set_buf_size);
static int __init set_tracing_thresh(char *str)
{
unsigned long threshhold;
int ret;
if (!str)
return 0;
ret = strict_strtoul(str, 0, &threshhold);
if (ret < 0)
return 0;
tracing_thresh = threshhold * 1000;
return 1;
}
__setup("tracing_thresh=", set_tracing_thresh);
unsigned long nsecs_to_usecs(unsigned long nsecs)
{
return nsecs / 1000;
}
/* These must match the bit postions in trace_iterator_flags */
static const char *trace_options[] = {
"print-parent",
"sym-offset",
"sym-addr",
"verbose",
"raw",
"hex",
"bin",
"block",
"stacktrace",
"trace_printk",
"ftrace_preempt",
"branch",
"annotate",
"userstacktrace",
"sym-userobj",
"printk-msg-only",
"context-info",
"latency-format",
"sleep-time",
"graph-time",
"record-cmd",
"overwrite",
"disable_on_free",
"irq-info",
NULL
};
static struct {
u64 (*func)(void);
const char *name;
} trace_clocks[] = {
{ trace_clock_local, "local" },
{ trace_clock_global, "global" },
{ trace_clock_counter, "counter" },
};
int trace_clock_id;
/*
* trace_parser_get_init - gets the buffer for trace parser
*/
int trace_parser_get_init(struct trace_parser *parser, int size)
{
memset(parser, 0, sizeof(*parser));
parser->buffer = kmalloc(size, GFP_KERNEL);
if (!parser->buffer)
return 1;
parser->size = size;
return 0;
}
/*
* trace_parser_put - frees the buffer for trace parser
*/
void trace_parser_put(struct trace_parser *parser)
{
kfree(parser->buffer);
}
/*
* trace_get_user - reads the user input string separated by space
* (matched by isspace(ch))
*
* For each string found the 'struct trace_parser' is updated,
* and the function returns.
*
* Returns number of bytes read.
*
* See kernel/trace/trace.h for 'struct trace_parser' details.
*/
int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char ch;
size_t read = 0;
ssize_t ret;
if (!*ppos)
trace_parser_clear(parser);
ret = get_user(ch, ubuf++);
if (ret)
goto out;
read++;
cnt--;
/*
* The parser is not finished with the last write,
* continue reading the user input without skipping spaces.
*/
if (!parser->cont) {
/* skip white space */
while (cnt && isspace(ch)) {
ret = get_user(ch, ubuf++);
if (ret)
goto out;
read++;
cnt--;
}
/* only spaces were written */
if (isspace(ch)) {
*ppos += read;
ret = read;
goto out;
}
parser->idx = 0;
}
/* read the non-space input */
while (cnt && !isspace(ch)) {
if (parser->idx < parser->size - 1)
parser->buffer[parser->idx++] = ch;
else {
ret = -EINVAL;
goto out;
}
ret = get_user(ch, ubuf++);
if (ret)
goto out;
read++;
cnt--;
}
/* We either got finished input or we have to wait for another call. */
if (isspace(ch)) {
parser->buffer[parser->idx] = 0;
parser->cont = false;
} else {
parser->cont = true;
parser->buffer[parser->idx++] = ch;
}
*ppos += read;
ret = read;
out:
return ret;
}
ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
{
int len;
int ret;
if (!cnt)
return 0;
if (s->len <= s->readpos)
return -EBUSY;
len = s->len - s->readpos;
if (cnt > len)
cnt = len;
ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
if (ret == cnt)
return -EFAULT;
cnt -= ret;
s->readpos += cnt;
return cnt;
}
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
if (s->len <= s->readpos)
return -EBUSY;
len = s->len - s->readpos;
if (cnt > len)
cnt = len;
memcpy(buf, s->buffer + s->readpos, cnt);
s->readpos += cnt;
return cnt;
}
/*
* ftrace_max_lock is used to protect the swapping of buffers
* when taking a max snapshot. The buffers themselves are
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
* This is defined as a arch_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*
* It is also used in other places outside the update_max_tr
* so it needs to be defined outside of the
* CONFIG_TRACER_MAX_TRACE.
*/
static arch_spinlock_t ftrace_max_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
unsigned long __read_mostly tracing_thresh;
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
/*
* Copy the new maximum trace into the separate maximum-trace
* structure. (this way the maximum trace is permanently saved,
* for later retrieval via /sys/kernel/debug/tracing/latency_trace)
*/
static void
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct trace_array_cpu *data = tr->data[cpu];
struct trace_array_cpu *max_data;
max_tr.cpu = cpu;
max_tr.time_start = data->preempt_timestamp;
max_data = max_tr.data[cpu];
max_data->saved_latency = tracing_max_latency;
max_data->critical_start = data->critical_start;
max_data->critical_end = data->critical_end;
memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
max_data->pid = tsk->pid;
max_data->uid = task_uid(tsk);
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
max_data->policy = tsk->policy;
max_data->rt_priority = tsk->rt_priority;
/* record this tasks comm */
tracing_record_cmdline(tsk);
}
/**
* update_max_tr - snapshot all trace buffers from global_trace to max_tr
* @tr: tracer
* @tsk: the task with the latency
* @cpu: The cpu that initiated the trace.
*
* Flip the buffers between the @tr and the max_tr and record information
* about which task was the cause of this latency.
*/
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
struct ring_buffer *buf = tr->buffer;
if (trace_stop_count)
return;
WARN_ON_ONCE(!irqs_disabled());
if (!current_trace->use_max_tr) {
WARN_ON_ONCE(1);
return;
}
arch_spin_lock(&ftrace_max_lock);
tr->buffer = max_tr.buffer;
max_tr.buffer = buf;
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&ftrace_max_lock);
}
/**
* update_max_tr_single - only copy one trace over, and reset the rest
* @tr - tracer
* @tsk - task with the latency
* @cpu - the cpu of the buffer to copy.
*
* Flip the trace of a single CPU buffer between the @tr and the max_tr.
*/
void
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
int ret;
if (trace_stop_count)
return;
WARN_ON_ONCE(!irqs_disabled());
if (!current_trace->use_max_tr) {
WARN_ON_ONCE(1);
return;
}
arch_spin_lock(&ftrace_max_lock);
ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
if (ret == -EBUSY) {
/*
* We failed to swap the buffer due to a commit taking
* place on this CPU. We fail to record, but we reset
* the max trace buffer (no one writes directly to it)
* and flag that it failed.
*/
trace_array_printk(&max_tr, _THIS_IP_,
"Failed to swap buffers due to commit in progress\n");
}
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&ftrace_max_lock);
}
#endif /* CONFIG_TRACER_MAX_TRACE */
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
*
* Register a new plugin tracer.
*/
int register_tracer(struct tracer *type)
{
struct tracer *t;
int ret = 0;
if (!type->name) {
pr_info("Tracer must have a name\n");
return -1;
}
if (strlen(type->name) >= MAX_TRACER_SIZE) {
pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
return -1;
}
mutex_lock(&trace_types_lock);
tracing_selftest_running = true;
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
pr_info("Tracer %s already registered\n",
type->name);
ret = -1;
goto out;
}
}
if (!type->set_flag)
type->set_flag = &dummy_set_flag;
if (!type->flags)
type->flags = &dummy_tracer_flags;
else
if (!type->flags->opts)
type->flags->opts = dummy_tracer_opt;
if (!type->wait_pipe)
type->wait_pipe = default_wait_pipe;
#ifdef CONFIG_FTRACE_STARTUP_TEST
if (type->selftest && !tracing_selftest_disabled) {
struct tracer *saved_tracer = current_trace;
struct trace_array *tr = &global_trace;
/*
* Run a selftest on this tracer.
* Here we reset the trace buffer, and set the current
* tracer to be this tracer. The tracer can then run some
* internal tracing to verify that everything is in order.
* If we fail, we do not register this tracer.
*/
tracing_reset_online_cpus(tr);
current_trace = type;
/* If we expanded the buffers, make sure the max is expanded too */
if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS);
/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
ret = type->selftest(type, tr);
/* the test is responsible for resetting too */
current_trace = saved_tracer;
if (ret) {
printk(KERN_CONT "FAILED!\n");
/* Add the warning after printing 'FAILED' */
WARN_ON(1);
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
tracing_reset_online_cpus(tr);
/* Shrink the max buffer again */
if (ring_buffer_expanded && type->use_max_tr)
ring_buffer_resize(max_tr.buffer, 1,
RING_BUFFER_ALL_CPUS);
printk(KERN_CONT "PASSED\n");
}
#endif
type->next = trace_types;
trace_types = type;
out:
tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
if (ret || !default_bootup_tracer)
goto out_unlock;
if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
goto out_unlock;
printk(KERN_INFO "Starting tracer '%s'\n", type->name);
/* Do we want this tracer to start on bootup? */
tracing_set_tracer(type->name);
default_bootup_tracer = NULL;
/* disable other selftests, since this will break it. */
tracing_selftest_disabled = 1;
#ifdef CONFIG_FTRACE_STARTUP_TEST
printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
type->name);
#endif
out_unlock:
return ret;
}
void unregister_tracer(struct tracer *type)
{
struct tracer **t;
mutex_lock(&trace_types_lock);
for (t = &trace_types; *t; t = &(*t)->next) {
if (*t == type)
goto found;
}
pr_info("Tracer %s not registered\n", type->name);
goto out;
found:
*t = (*t)->next;
if (type == current_trace && tracer_enabled) {
tracer_enabled = 0;
tracing_stop();
if (current_trace->stop)
current_trace->stop(&global_trace);
current_trace = &nop_trace;
}
out:
mutex_unlock(&trace_types_lock);
}
void tracing_reset(struct trace_array *tr, int cpu)
{
struct ring_buffer *buffer = tr->buffer;
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
synchronize_sched();
ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer);
}
void tracing_reset_online_cpus(struct trace_array *tr)
{
struct ring_buffer *buffer = tr->buffer;
int cpu;
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
synchronize_sched();
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer);
}
void tracing_reset_current(int cpu)
{
tracing_reset(&global_trace, cpu);
}
void tracing_reset_current_online_cpus(void)
{
tracing_reset_online_cpus(&global_trace);
}
#define SAVED_CMDLINES 128
#define NO_CMDLINE_MAP UINT_MAX
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
static void trace_init_cmdlines(void)
{
memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
cmdline_idx = 0;
}
int is_tracing_stopped(void)
{
return trace_stop_count;
}
/**
* ftrace_off_permanent - disable all ftrace code permanently
*
* This should only be called when a serious anomally has
* been detected. This will turn off the function tracing,
* ring buffers, and other tracing utilites. It takes no
* locks and can be called from any context.
*/
void ftrace_off_permanent(void)
{
tracing_disabled = 1;
ftrace_stop();
tracing_off_permanent();
}
/**
* tracing_start - quick start of the tracer
*
* If tracing is enabled but was stopped by tracing_stop,
* this will start the tracer back up.
*/
void tracing_start(void)
{
struct ring_buffer *buffer;
unsigned long flags;
if (tracing_disabled)
return;
raw_spin_lock_irqsave(&tracing_start_lock, flags);
if (--trace_stop_count) {
if (trace_stop_count < 0) {
/* Someone screwed up their debugging */
WARN_ON_ONCE(1);
trace_stop_count = 0;
}
goto out;
}
/* Prevent the buffers from switching */
arch_spin_lock(&ftrace_max_lock);
buffer = global_trace.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
buffer = max_tr.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
arch_spin_unlock(&ftrace_max_lock);
ftrace_start();
out:
raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
/**
* tracing_stop - quick stop of the tracer
*
* Light weight way to stop tracing. Use in conjunction with
* tracing_start.
*/
void tracing_stop(void)
{
struct ring_buffer *buffer;
unsigned long flags;
ftrace_stop();
raw_spin_lock_irqsave(&tracing_start_lock, flags);
if (trace_stop_count++)
goto out;
/* Prevent the buffers from switching */
arch_spin_lock(&ftrace_max_lock);
buffer = global_trace.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
buffer = max_tr.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
arch_spin_unlock(&ftrace_max_lock);
out:
raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
}
void trace_stop_cmdline_recording(void);
static void trace_save_cmdline(struct task_struct *tsk)
{
unsigned pid, idx;
if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
return;
/*
* It's not the end of the world if we don't get
* the lock, but we also don't want to spin
* nor do we want to disable interrupts,
* so if we miss here, then better luck next time.
*/
if (!arch_spin_trylock(&trace_cmdline_lock))
return;
idx = map_pid_to_cmdline[tsk->pid];
if (idx == NO_CMDLINE_MAP) {
idx = (cmdline_idx + 1) % SAVED_CMDLINES;
/*
* Check whether the cmdline buffer at idx has a pid
* mapped. We are going to overwrite that entry so we
* need to clear the map_pid_to_cmdline. Otherwise we
* would read the new comm for the old pid.
*/
pid = map_cmdline_to_pid[idx];
if (pid != NO_CMDLINE_MAP)
map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
map_cmdline_to_pid[idx] = tsk->pid;
map_pid_to_cmdline[tsk->pid] = idx;
cmdline_idx = idx;
}
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
arch_spin_unlock(&trace_cmdline_lock);
}
void trace_find_cmdline(int pid, char comm[])
{
unsigned map;
if (!pid) {
strcpy(comm, "<idle>");
return;
}
if (WARN_ON_ONCE(pid < 0)) {
strcpy(comm, "<XXX>");
return;
}
if (pid > PID_MAX_DEFAULT) {
strcpy(comm, "<...>");
return;
}
preempt_disable();
arch_spin_lock(&trace_cmdline_lock);
map = map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
strcpy(comm, saved_cmdlines[map]);
else
strcpy(comm, "<...>");
arch_spin_unlock(&trace_cmdline_lock);
preempt_enable();
}
void tracing_record_cmdline(struct task_struct *tsk)
{
if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
!tracing_is_on())
return;
trace_save_cmdline(tsk);
}
void
tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
int pc)
{
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
entry->pid = (tsk) ? tsk->pid : 0;
entry->padding = 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
#else
TRACE_FLAG_IRQS_NOSUPPORT |
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer,
int type,
unsigned long len,
unsigned long flags, int pc)
{
struct ring_buffer_event *event;
event = ring_buffer_lock_reserve(buffer, len);
if (event != NULL) {
struct trace_entry *ent = ring_buffer_event_data(event);
tracing_generic_entry_update(ent, flags, pc);
ent->type = type;
}
return event;
}
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
int wake)
{
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc);
if (wake)
trace_wake_up();
}
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
}
struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
int type, unsigned long len,
unsigned long flags, int pc)
{
*current_rb = global_trace.buffer;
return trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
{
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event)
{
ring_buffer_discard_commit(buffer, event);
}
EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
void
trace_function(struct trace_array *tr,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
int pc)
{
struct ftrace_event_call *call = &event_function;
struct ring_buffer *buffer = tr->buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
/* If we are reading the ring buffer, don't trace */
if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->parent_ip = parent_ip;
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
}
void
ftrace(struct trace_array *tr, struct trace_array_cpu *data,
unsigned long ip, unsigned long parent_ip, unsigned long flags,
int pc)
{
if (likely(!atomic_read(&data->disabled)))
trace_function(tr, ip, parent_ip, flags, pc);
}
#ifdef CONFIG_STACKTRACE
#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
struct ftrace_stack {
unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
};
static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
static void __ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
struct ftrace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
struct stack_entry *entry;
struct stack_trace trace;
int use_stack;
int size = FTRACE_STACK_ENTRIES;
trace.nr_entries = 0;
trace.skip = skip;
/*
* Since events can happen in NMIs there's no safe way to
* use the per cpu ftrace_stacks. We reserve it and if an interrupt
* or NMI comes in, it will just have to use the default
* FTRACE_STACK_SIZE.
*/
preempt_disable_notrace();
use_stack = ++__get_cpu_var(ftrace_stack_reserve);
/*
* We don't need any atomic variables, just a barrier.
* If an interrupt comes in, we don't care, because it would
* have exited and put the counter back to what we want.
* We just need a barrier to keep gcc from moving things
* around.
*/
barrier();
if (use_stack == 1) {
trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
if (regs)
save_stack_trace_regs(regs, &trace);
else
save_stack_trace(&trace);
if (trace.nr_entries > size)
size = trace.nr_entries;
} else
/* From now on, use_stack is a boolean */
use_stack = 0;
size *= sizeof(unsigned long);
event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
sizeof(*entry) + size, flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
memset(&entry->caller, 0, size);
if (use_stack)
memcpy(&entry->caller, trace.entries,
trace.nr_entries * sizeof(unsigned long));
else {
trace.max_entries = FTRACE_STACK_ENTRIES;
trace.entries = entry->caller;
if (regs)
save_stack_trace_regs(regs, &trace);
else
save_stack_trace(&trace);
}
entry->size = trace.nr_entries;
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
out:
/* Again, don't let gcc optimize things here */
barrier();
__get_cpu_var(ftrace_stack_reserve)--;
preempt_enable_notrace();
}
void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc, struct pt_regs *regs)
{
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
__ftrace_trace_stack(buffer, flags, skip, pc, regs);
}
void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
int skip, int pc)
{
if (!(trace_flags & TRACE_ITER_STACKTRACE))
return;
__ftrace_trace_stack(buffer, flags, skip, pc, NULL);
}
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc)
{
__ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
}
/**
* trace_dump_stack - record a stack back trace in the trace buffer
*/
void trace_dump_stack(void)
{
unsigned long flags;
if (tracing_disabled || tracing_selftest_running)
return;
local_save_flags(flags);
/* skipping 3 traces, seems to get us at the caller of this function */
__ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
}
static DEFINE_PER_CPU(int, user_stack_count);
void
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
{
struct ftrace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
struct userstack_entry *entry;
struct stack_trace trace;
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
return;
/*
* NMIs can not handle page faults, even with fix ups.
* The save user stack can (and often does) fault.
*/
if (unlikely(in_nmi()))
return;
/*
* prevent recursion, since the user stack tracing may
* trigger other kernel events.
*/
preempt_disable();
if (__this_cpu_read(user_stack_count))
goto out;
__this_cpu_inc(user_stack_count);
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
sizeof(*entry), flags, pc);
if (!event)
goto out_drop_count;
entry = ring_buffer_event_data(event);
entry->tgid = current->tgid;
memset(&entry->caller, 0, sizeof(entry->caller));
trace.nr_entries = 0;
trace.max_entries = FTRACE_STACK_ENTRIES;
trace.skip = 0;
trace.entries = entry->caller;
save_stack_trace_user(&trace);
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
out_drop_count:
__this_cpu_dec(user_stack_count);
out:
preempt_enable();
}
#ifdef UNUSED
static void __trace_userstack(struct trace_array *tr, unsigned long flags)
{
ftrace_trace_userstack(tr, flags, preempt_count());
}
#endif /* UNUSED */
#endif /* CONFIG_STACKTRACE */
/* created for use with alloc_percpu */
struct trace_buffer_struct {
char buffer[TRACE_BUF_SIZE];
};
static struct trace_buffer_struct *trace_percpu_buffer;
static struct trace_buffer_struct *trace_percpu_sirq_buffer;
static struct trace_buffer_struct *trace_percpu_irq_buffer;
static struct trace_buffer_struct *trace_percpu_nmi_buffer;
/*
* The buffer used is dependent on the context. There is a per cpu
* buffer for normal context, softirq contex, hard irq context and
* for NMI context. Thise allows for lockless recording.
*
* Note, if the buffers failed to be allocated, then this returns NULL
*/
static char *get_trace_buf(void)
{
struct trace_buffer_struct *percpu_buffer;
struct trace_buffer_struct *buffer;
/*
* If we have allocated per cpu buffers, then we do not
* need to do any locking.
*/
if (in_nmi())
percpu_buffer = trace_percpu_nmi_buffer;
else if (in_irq())
percpu_buffer = trace_percpu_irq_buffer;
else if (in_softirq())
percpu_buffer = trace_percpu_sirq_buffer;
else
percpu_buffer = trace_percpu_buffer;
if (!percpu_buffer)
return NULL;
buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
return buffer->buffer;
}
static int alloc_percpu_trace_buffer(void)
{
struct trace_buffer_struct *buffers;
struct trace_buffer_struct *sirq_buffers;
struct trace_buffer_struct *irq_buffers;
struct trace_buffer_struct *nmi_buffers;
buffers = alloc_percpu(struct trace_buffer_struct);
if (!buffers)
goto err_warn;
sirq_buffers = alloc_percpu(struct trace_buffer_struct);
if (!sirq_buffers)
goto err_sirq;
irq_buffers = alloc_percpu(struct trace_buffer_struct);
if (!irq_buffers)
goto err_irq;
nmi_buffers = alloc_percpu(struct trace_buffer_struct);
if (!nmi_buffers)
goto err_nmi;
trace_percpu_buffer = buffers;
trace_percpu_sirq_buffer = sirq_buffers;
trace_percpu_irq_buffer = irq_buffers;
trace_percpu_nmi_buffer = nmi_buffers;
return 0;
err_nmi:
free_percpu(irq_buffers);
err_irq:
free_percpu(sirq_buffers);
err_sirq:
free_percpu(buffers);
err_warn:
WARN(1, "Could not allocate percpu trace_printk buffer");
return -ENOMEM;
}
void trace_printk_init_buffers(void)
{
static int buffers_allocated;
if (buffers_allocated)
return;
if (alloc_percpu_trace_buffer())
return;
pr_info("ftrace: Allocated trace_printk buffers\n");
buffers_allocated = 1;
}
/**
* trace_vbprintk - write binary msg to tracing buffer
*
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
struct ftrace_event_call *call = &event_bprint;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct trace_array *tr = &global_trace;
struct bprint_entry *entry;
unsigned long flags;
char *tbuffer;
int len = 0, size, pc;
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
pc = preempt_count();
preempt_disable_notrace();
tbuffer = get_trace_buf();
if (!tbuffer) {
len = 0;
goto out;
}
len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out;
local_save_flags(flags);
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->ip = ip;
entry->fmt = fmt;
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
out:
preempt_enable_notrace();
unpause_graph_tracing();
return len;
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...)
{
int ret;
va_list ap;
if (!(trace_flags & TRACE_ITER_PRINTK))
return 0;
va_start(ap, fmt);
ret = trace_array_vprintk(tr, ip, fmt, ap);
va_end(ap);
return ret;
}
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
struct ftrace_event_call *call = &event_print;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
int len = 0, size, pc;
struct print_entry *entry;
unsigned long flags;
char *tbuffer;
if (tracing_disabled || tracing_selftest_running)
return 0;
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
pc = preempt_count();
preempt_disable_notrace();
tbuffer = get_trace_buf();
if (!tbuffer) {
len = 0;
goto out;
}
len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
if (len > TRACE_BUF_SIZE)
goto out;
local_save_flags(flags);
size = sizeof(*entry) + len + 1;
buffer = tr->buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
entry->ip = ip;
memcpy(&entry->buf, tbuffer, len);
entry->buf[len] = '\0';
if (!filter_check_discard(call, entry, buffer, event)) {
ring_buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
}
out:
preempt_enable_notrace();
unpause_graph_tracing();
return len;
}
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
return trace_array_vprintk(&global_trace, ip, fmt, args);
}
EXPORT_SYMBOL_GPL(trace_vprintk);
static void trace_iterator_increment(struct trace_iterator *iter)
{
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
iter->idx++;
if (buf_iter)
ring_buffer_read(buf_iter, NULL);
}
static struct trace_entry *
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts);
else
event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
lost_events);
if (event) {
iter->ent_size = ring_buffer_event_length(event);
return ring_buffer_event_data(event);
}
iter->ent_size = 0;
return NULL;
}
static struct trace_entry *
__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
unsigned long *missing_events, u64 *ent_ts)
{
struct ring_buffer *buffer = iter->tr->buffer;
struct trace_entry *ent, *next = NULL;
unsigned long lost_events = 0, next_lost = 0;
int cpu_file = iter->cpu_file;
u64 next_ts = 0, ts;
int next_cpu = -1;
int next_size = 0;
int cpu;
/*
* If we are in a per_cpu trace file, don't bother by iterating over
* all cpu and peek directly.
*/
if (cpu_file > TRACE_PIPE_ALL_CPU) {
if (ring_buffer_empty_cpu(buffer, cpu_file))
return NULL;
ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
if (ent_cpu)
*ent_cpu = cpu_file;
return ent;
}
for_each_tracing_cpu(cpu) {
if (ring_buffer_empty_cpu(buffer, cpu))
continue;
ent = peek_next_entry(iter, cpu, &ts, &lost_events);
/*
* Pick the entry with the smallest timestamp:
*/
if (ent && (!next || ts < next_ts)) {
next = ent;
next_cpu = cpu;
next_ts = ts;
next_lost = lost_events;
next_size = iter->ent_size;
}
}
iter->ent_size = next_size;
if (ent_cpu)
*ent_cpu = next_cpu;
if (ent_ts)
*ent_ts = next_ts;
if (missing_events)
*missing_events = next_lost;
return next;
}
/* Find the next real entry, without updating the iterator itself */
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts)
{
return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
}
/* Find the next real entry, and increment the iterator to the next entry */
void *trace_find_next_entry_inc(struct trace_iterator *iter)
{
iter->ent = __find_next_entry(iter, &iter->cpu,
&iter->lost_events, &iter->ts);
if (iter->ent)
trace_iterator_increment(iter);
return iter->ent ? iter : NULL;
}
static void trace_consume(struct trace_iterator *iter)
{
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
&iter->lost_events);
}
static void *s_next(struct seq_file *m, void *v, loff_t *pos)
{
struct trace_iterator *iter = m->private;
int i = (int)*pos;
void *ent;
WARN_ON_ONCE(iter->leftover);
(*pos)++;
/* can't go backwards */
if (iter->idx > i)
return NULL;
if (iter->idx < 0)
ent = trace_find_next_entry_inc(iter);
else
ent = iter;
while (ent && iter->idx < i)
ent = trace_find_next_entry_inc(iter);
iter->pos = *pos;
return ent;
}
void tracing_iter_reset(struct trace_iterator *iter, int cpu)
{
struct trace_array *tr = iter->tr;
struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter;
unsigned long entries = 0;
u64 ts;
tr->data[cpu]->skipped_entries = 0;
buf_iter = trace_buffer_iter(iter, cpu);
if (!buf_iter)
return;
ring_buffer_iter_reset(buf_iter);
/*
* We could have the case with the max latency tracers
* that a reset never took place on a cpu. This is evident
* by the timestamp being before the start of the buffer.
*/
while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
if (ts >= iter->tr->time_start)
break;
entries++;
ring_buffer_read(buf_iter, NULL);
}
tr->data[cpu]->skipped_entries = entries;
}
/*
* The current tracer is copied to avoid a global locking
* all around.
*/
static void *s_start(struct seq_file *m, loff_t *pos)
{
struct trace_iterator *iter = m->private;
static struct tracer *old_tracer;
int cpu_file = iter->cpu_file;
void *p = NULL;
loff_t l = 0;
int cpu;
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(old_tracer != current_trace && current_trace)) {
old_tracer = current_trace;
*iter->trace = *current_trace;
}
mutex_unlock(&trace_types_lock);
atomic_inc(&trace_record_cmdline_disabled);
if (*pos != iter->pos) {
iter->ent = NULL;
iter->cpu = 0;
iter->idx = -1;
if (cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu)
tracing_iter_reset(iter, cpu);
} else
tracing_iter_reset(iter, cpu_file);
iter->leftover = 0;
for (p = iter; p && l < *pos; p = s_next(m, p, &l))
;
} else {
/*
* If we overflowed the seq_file before, then we want
* to just reuse the trace_seq buffer again.
*/
if (iter->leftover)
p = iter;
else {
l = *pos - 1;
p = s_next(m, p, &l);
}
}
trace_event_read_lock();
trace_access_lock(cpu_file);
return p;
}
static void s_stop(struct seq_file *m, void *p)
{
struct trace_iterator *iter = m->private;
atomic_dec(&trace_record_cmdline_disabled);
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
}
static void
get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
{
unsigned long count;
int cpu;
*total = 0;
*entries = 0;
for_each_tracing_cpu(cpu) {
count = ring_buffer_entries_cpu(tr->buffer, cpu);
/*
* If this buffer has skipped entries, then we hold all
* entries for the trace and we need to ignore the
* ones before the time stamp.
*/
if (tr->data[cpu]->skipped_entries) {
count -= tr->data[cpu]->skipped_entries;
/* total is the same as the entries */
*total += count;
} else
*total += count +
ring_buffer_overrun_cpu(tr->buffer, cpu);
*entries += count;
}
}
static void print_lat_help_header(struct seq_file *m)
{
seq_puts(m, "# _------=> CPU# \n");
seq_puts(m, "# / _-----=> irqs-off \n");
seq_puts(m, "# | / _----=> need-resched \n");
seq_puts(m, "# || / _---=> hardirq/softirq \n");
seq_puts(m, "# ||| / _--=> preempt-depth \n");
seq_puts(m, "# |||| / delay \n");
seq_puts(m, "# cmd pid ||||| time | caller \n");
seq_puts(m, "# \\ / ||||| \\ | / \n");
}
static void print_event_info(struct trace_array *tr, struct seq_file *m)
{
unsigned long total;
unsigned long entries;
get_total_entries(tr, &total, &entries);
seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
entries, total, num_online_cpus());
seq_puts(m, "#\n");
}
static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
{
print_event_info(tr, m);
seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
seq_puts(m, "# | | | | |\n");
}
static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
{
print_event_info(tr, m);
seq_puts(m, "# _-----=> irqs-off\n");
seq_puts(m, "# / _----=> need-resched\n");
seq_puts(m, "# | / _---=> hardirq/softirq\n");
seq_puts(m, "# || / _--=> preempt-depth\n");
seq_puts(m, "# ||| / delay\n");
seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
seq_puts(m, "# | | | |||| | |\n");
}
void
print_trace_header(struct seq_file *m, struct trace_iterator *iter)
{
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
struct trace_array *tr = iter->tr;
struct trace_array_cpu *data = tr->data[tr->cpu];
struct tracer *type = current_trace;
unsigned long entries;
unsigned long total;
const char *name = "preemption";
if (type)
name = type->name;
get_total_entries(tr, &total, &entries);
seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
name, UTS_RELEASE);
seq_puts(m, "# -----------------------------------"
"---------------------------------\n");
seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
" (M:%s VP:%d, KP:%d, SP:%d HP:%d",
nsecs_to_usecs(data->saved_latency),
entries,
total,
tr->cpu,
#if defined(CONFIG_PREEMPT_NONE)
"server",
#elif defined(CONFIG_PREEMPT_VOLUNTARY)
"desktop",
#elif defined(CONFIG_PREEMPT)
"preempt",
#else
"unknown",
#endif
/* These are reserved for later use */
0, 0, 0, 0);
#ifdef CONFIG_SMP
seq_printf(m, " #P:%d)\n", num_online_cpus());
#else
seq_puts(m, ")\n");
#endif
seq_puts(m, "# -----------------\n");
seq_printf(m, "# | task: %.16s-%d "
"(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
data->comm, data->pid, data->uid, data->nice,
data->policy, data->rt_priority);
seq_puts(m, "# -----------------\n");
if (data->critical_start) {
seq_puts(m, "# => started at: ");
seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
trace_print_seq(m, &iter->seq);
seq_puts(m, "\n# => ended at: ");
seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
trace_print_seq(m, &iter->seq);
seq_puts(m, "\n#\n");
}
seq_puts(m, "#\n");
}
static void test_cpu_buff_start(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
if (!(trace_flags & TRACE_ITER_ANNOTATE))
return;
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
return;
if (cpumask_test_cpu(iter->cpu, iter->started))
return;
if (iter->tr->data[iter->cpu]->skipped_entries)
return;
cpumask_set_cpu(iter->cpu, iter->started);
/* Don't print started cpu buffer for the first entry of the trace */
if (iter->idx > 1)
trace_seq_printf(s, "##### CPU %u buffer started ####\n",
iter->cpu);
}
static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
test_cpu_buff_start(iter);
event = ftrace_find_event(entry->type);
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
if (!trace_print_lat_context(iter))
goto partial;
} else {
if (!trace_print_context(iter))
goto partial;
}
}
if (event)
return event->funcs->trace(iter, sym_flags, event);
if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
goto partial;
return TRACE_TYPE_HANDLED;
partial:
return TRACE_TYPE_PARTIAL_LINE;
}
static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
if (!trace_seq_printf(s, "%d %d %llu ",
entry->pid, iter->cpu, iter->ts))
goto partial;
}
event = ftrace_find_event(entry->type);
if (event)
return event->funcs->raw(iter, 0, event);
if (!trace_seq_printf(s, "%d ?\n", entry->type))
goto partial;
return TRACE_TYPE_HANDLED;
partial:
return TRACE_TYPE_PARTIAL_LINE;
}
static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
unsigned char newline = '\n';
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
}
event = ftrace_find_event(entry->type);
if (event) {
enum print_line_t ret = event->funcs->hex(iter, 0, event);
if (ret != TRACE_TYPE_HANDLED)
return ret;
}
SEQ_PUT_FIELD_RET(s, newline);
return TRACE_TYPE_HANDLED;
}
static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
struct trace_event *event;
entry = iter->ent;
if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
SEQ_PUT_FIELD_RET(s, entry->pid);
SEQ_PUT_FIELD_RET(s, iter->cpu);
SEQ_PUT_FIELD_RET(s, iter->ts);
}
event = ftrace_find_event(entry->type);
return event ? event->funcs->binary(iter, 0, event) :
TRACE_TYPE_HANDLED;
}
int trace_empty(struct trace_iterator *iter)
{
struct ring_buffer_iter *buf_iter;
int cpu;
/* If we are looking at one CPU buffer, only check that one */
if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
cpu = iter->cpu_file;
buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter) {
if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
return 0;
}
return 1;
}
for_each_tracing_cpu(cpu) {
buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter) {
if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
return 0;
}
}
return 1;
}
/* Called with trace_event_read_lock() held. */
enum print_line_t print_trace_line(struct trace_iterator *iter)
{
enum print_line_t ret;
if (iter->lost_events &&
!trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
iter->cpu, iter->lost_events))
return TRACE_TYPE_PARTIAL_LINE;
if (iter->trace && iter->trace->print_line) {
ret = iter->trace->print_line(iter);
if (ret != TRACE_TYPE_UNHANDLED)
return ret;
}
if (iter->ent->type == TRACE_BPRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
return trace_print_bprintk_msg_only(iter);
if (iter->ent->type == TRACE_PRINT &&
trace_flags & TRACE_ITER_PRINTK &&
trace_flags & TRACE_ITER_PRINTK_MSGONLY)
return trace_print_printk_msg_only(iter);
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
if (trace_flags & TRACE_ITER_HEX)
return print_hex_fmt(iter);
if (trace_flags & TRACE_ITER_RAW)
return print_raw_fmt(iter);
return print_trace_fmt(iter);
}
void trace_latency_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
/* print nothing if the buffers are empty */
if (trace_empty(iter))
return;
if (iter->iter_flags & TRACE_FILE_LAT_FMT)
print_trace_header(m, iter);
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_lat_help_header(m);
}
void trace_default_header(struct seq_file *m)
{
struct trace_iterator *iter = m->private;
if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
return;
if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
/* print nothing if the buffers are empty */
if (trace_empty(iter))
return;
print_trace_header(m, iter);
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_lat_help_header(m);
} else {
if (!(trace_flags & TRACE_ITER_VERBOSE)) {
if (trace_flags & TRACE_ITER_IRQ_INFO)
print_func_help_header_irq(iter->tr, m);
else
print_func_help_header(iter->tr, m);
}
}
}
static void test_ftrace_alive(struct seq_file *m)
{
if (!ftrace_is_dead())
return;
seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
}
static int s_show(struct seq_file *m, void *v)
{
struct trace_iterator *iter = v;
int ret;
if (iter->ent == NULL) {
if (iter->tr) {
seq_printf(m, "# tracer: %s\n", iter->trace->name);
seq_puts(m, "#\n");
test_ftrace_alive(m);
}
if (iter->trace && iter->trace->print_header)
iter->trace->print_header(m);
else
trace_default_header(m);
} else if (iter->leftover) {
/*
* If we filled the seq_file buffer earlier, we
* want to just show it now.
*/
ret = trace_print_seq(m, &iter->seq);
/* ret should this time be zero, but you never know */
iter->leftover = ret;
} else {
print_trace_line(iter);
ret = trace_print_seq(m, &iter->seq);
/*
* If we overflow the seq_file buffer, then it will
* ask us for this data again at start up.
* Use that instead.
* ret is 0 if seq_file write succeeded.
* -1 otherwise.
*/
iter->leftover = ret;
}
return 0;
}
static const struct seq_operations tracer_seq_ops = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.show = s_show,
};
static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file)
{
long cpu_file = (long) inode->i_private;
struct trace_iterator *iter;
int cpu;
if (tracing_disabled)
return ERR_PTR(-ENODEV);
iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
if (!iter)
return ERR_PTR(-ENOMEM);
iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
GFP_KERNEL);
if (!iter->buffer_iter)
goto release;
/*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
*/
mutex_lock(&trace_types_lock);
iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
if (!iter->trace)
goto fail;
if (current_trace)
*iter->trace = *current_trace;
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
if (current_trace && current_trace->print_max)
iter->tr = &max_tr;
else
iter->tr = &global_trace;
iter->pos = -1;
mutex_init(&iter->mutex);
iter->cpu_file = cpu_file;
/* Notify the tracer early; before we stop tracing. */
if (iter->trace && iter->trace->open)
iter->trace->open(iter);
/* Annotate start of buffers if we had overruns */
if (ring_buffer_overruns(iter->tr->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
/* stop the trace while dumping */
tracing_stop();
if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu) {
iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->tr->buffer, cpu);
}
ring_buffer_read_prepare_sync();
for_each_tracing_cpu(cpu) {
ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
}
} else {
cpu = iter->cpu_file;
iter->buffer_iter[cpu] =
ring_buffer_read_prepare(iter->tr->buffer, cpu);
ring_buffer_read_prepare_sync();
ring_buffer_read_start(iter->buffer_iter[cpu]);
tracing_iter_reset(iter, cpu);
}
mutex_unlock(&trace_types_lock);
return iter;
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
kfree(iter->buffer_iter);
release:
seq_release_private(inode, file);
return ERR_PTR(-ENOMEM);
}
int tracing_open_generic(struct inode *inode, struct file *filp)
{
if (tracing_disabled)
return -ENODEV;
filp->private_data = inode->i_private;
return 0;
}
static int tracing_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
struct trace_iterator *iter;
int cpu;
if (!(file->f_mode & FMODE_READ))
return 0;
iter = m->private;
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
if (iter->buffer_iter[cpu])
ring_buffer_read_finish(iter->buffer_iter[cpu]);
}
if (iter->trace && iter->trace->close)
iter->trace->close(iter);
/* reenable tracing if it was previously enabled */
tracing_start();
mutex_unlock(&trace_types_lock);
mutex_destroy(&iter->mutex);
free_cpumask_var(iter->started);
kfree(iter->trace);
kfree(iter->buffer_iter);
seq_release_private(inode, file);
return 0;
}
static int tracing_open(struct inode *inode, struct file *file)
{
struct trace_iterator *iter;
int ret = 0;
/* If this file was open for write, then erase contents */
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC)) {
long cpu = (long) inode->i_private;
if (cpu == TRACE_PIPE_ALL_CPU)
tracing_reset_online_cpus(&global_trace);
else
tracing_reset(&global_trace, cpu);
}
if (file->f_mode & FMODE_READ) {
iter = __tracing_open(inode, file);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
}
return ret;
}
static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct tracer *t = v;
(*pos)++;
if (t)
t = t->next;
return t;
}
static void *t_start(struct seq_file *m, loff_t *pos)
{
struct tracer *t;
loff_t l = 0;
mutex_lock(&trace_types_lock);
for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
;
return t;
}
static void t_stop(struct seq_file *m, void *p)
{
mutex_unlock(&trace_types_lock);
}
static int t_show(struct seq_file *m, void *v)
{
struct tracer *t = v;
if (!t)
return 0;
seq_printf(m, "%s", t->name);
if (t->next)
seq_putc(m, ' ');
else
seq_putc(m, '\n');
return 0;
}
static const struct seq_operations show_traces_seq_ops = {
.start = t_start,
.next = t_next,
.stop = t_stop,
.show = t_show,
};
static int show_traces_open(struct inode *inode, struct file *file)
{
if (tracing_disabled)
return -ENODEV;
return seq_open(file, &show_traces_seq_ops);
}
static ssize_t
tracing_write_stub(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
return count;
}
static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
{
if (file->f_mode & FMODE_READ)
return seq_lseek(file, offset, origin);
else
return 0;
}
static const struct file_operations tracing_fops = {
.open = tracing_open,
.read = seq_read,
.write = tracing_write_stub,
.llseek = tracing_seek,
.release = tracing_release,
};
static const struct file_operations show_traces_fops = {
.open = show_traces_open,
.read = seq_read,
.release = seq_release,
.llseek = seq_lseek,
};
/*
* Only trace on a CPU if the bitmask is set:
*/
static cpumask_var_t tracing_cpumask;
/*
* The tracer itself will not take this lock, but still we want
* to provide a consistent cpumask to user-space:
*/
static DEFINE_MUTEX(tracing_cpumask_update_lock);
/*
* Temporary storage for the character representation of the
* CPU bitmask (and one more byte for the newline):
*/
static char mask_str[NR_CPUS + 1];
static ssize_t
tracing_cpumask_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
int len;
mutex_lock(&tracing_cpumask_update_lock);
len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
if (count - len < 2) {
count = -EINVAL;
goto out_err;
}
len += sprintf(mask_str + len, "\n");
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
out_err:
mutex_unlock(&tracing_cpumask_update_lock);
return count;
}
static ssize_t
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
int err, cpu;
cpumask_var_t tracing_cpumask_new;
if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
return -ENOMEM;
err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
if (err)
goto err_unlock;
mutex_lock(&tracing_cpumask_update_lock);
local_irq_disable();
arch_spin_lock(&ftrace_max_lock);
for_each_tracing_cpu(cpu) {
/*
* Increase/decrease the disabled counter if we are
* about to flip a bit in the cpumask:
*/
if (cpumask_test_cpu(cpu, tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&global_trace.data[cpu]->disabled);
ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
}
if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&global_trace.data[cpu]->disabled);
ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
}
}
arch_spin_unlock(&ftrace_max_lock);
local_irq_enable();
cpumask_copy(tracing_cpumask, tracing_cpumask_new);
mutex_unlock(&tracing_cpumask_update_lock);
free_cpumask_var(tracing_cpumask_new);
return count;
err_unlock:
free_cpumask_var(tracing_cpumask_new);
return err;
}
static const struct file_operations tracing_cpumask_fops = {
.open = tracing_open_generic,
.read = tracing_cpumask_read,
.write = tracing_cpumask_write,
.llseek = generic_file_llseek,
};
static int tracing_trace_options_show(struct seq_file *m, void *v)
{
struct tracer_opt *trace_opts;
u32 tracer_flags;
int i;
mutex_lock(&trace_types_lock);
tracer_flags = current_trace->flags->val;
trace_opts = current_trace->flags->opts;
for (i = 0; trace_options[i]; i++) {
if (trace_flags & (1 << i))
seq_printf(m, "%s\n", trace_options[i]);
else
seq_printf(m, "no%s\n", trace_options[i]);
}
for (i = 0; trace_opts[i].name; i++) {
if (tracer_flags & trace_opts[i].bit)
seq_printf(m, "%s\n", trace_opts[i].name);
else
seq_printf(m, "no%s\n", trace_opts[i].name);
}
mutex_unlock(&trace_types_lock);
return 0;
}
static int __set_tracer_option(struct tracer *trace,
struct tracer_flags *tracer_flags,
struct tracer_opt *opts, int neg)
{
int ret;
ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
if (ret)
return ret;
if (neg)
tracer_flags->val &= ~opts->bit;
else
tracer_flags->val |= opts->bit;
return 0;
}
/* Try to assign a tracer specific option */
static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
{
struct tracer_flags *tracer_flags = trace->flags;
struct tracer_opt *opts = NULL;
int i;
for (i = 0; tracer_flags->opts[i].name; i++) {
opts = &tracer_flags->opts[i];
if (strcmp(cmp, opts->name) == 0)
return __set_tracer_option(trace, trace->flags,
opts, neg);
}
return -EINVAL;
}
static void set_tracer_flags(unsigned int mask, int enabled)
{
/* do nothing if flag is already set */
if (!!(trace_flags & mask) == !!enabled)
return;
if (enabled)
trace_flags |= mask;
else
trace_flags &= ~mask;
if (mask == TRACE_ITER_RECORD_CMD)
trace_event_enable_cmd_record(enabled);
if (mask == TRACE_ITER_OVERWRITE)
ring_buffer_change_overwrite(global_trace.buffer, enabled);
}
static ssize_t
tracing_trace_options_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
char *cmp;
int neg = 0;
int ret;
int i;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
cmp = strstrip(buf);
if (strncmp(cmp, "no", 2) == 0) {
neg = 1;
cmp += 2;
}
for (i = 0; trace_options[i]; i++) {
if (strcmp(cmp, trace_options[i]) == 0) {
set_tracer_flags(1 << i, !neg);
break;
}
}
/* If no option could be set, test the specific tracer options */
if (!trace_options[i]) {
mutex_lock(&trace_types_lock);
ret = set_tracer_option(current_trace, cmp, neg);
mutex_unlock(&trace_types_lock);
if (ret)
return ret;
}
*ppos += cnt;
return cnt;
}
static int tracing_trace_options_open(struct inode *inode, struct file *file)
{
if (tracing_disabled)
return -ENODEV;
return single_open(file, tracing_trace_options_show, NULL);
}
static const struct file_operations tracing_iter_fops = {
.open = tracing_trace_options_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = tracing_trace_options_write,
};
static const char readme_msg[] =
"tracing mini-HOWTO:\n\n"
"# mount -t debugfs nodev /sys/kernel/debug\n\n"
"# cat /sys/kernel/debug/tracing/available_tracers\n"
"wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
"# cat /sys/kernel/debug/tracing/current_tracer\n"
"nop\n"
"# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
"# cat /sys/kernel/debug/tracing/current_tracer\n"
"wakeup\n"
"# cat /sys/kernel/debug/tracing/trace_options\n"
"noprint-parent nosym-offset nosym-addr noverbose\n"
"# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
"# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
"# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
"# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
;
static ssize_t
tracing_readme_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
return simple_read_from_buffer(ubuf, cnt, ppos,
readme_msg, strlen(readme_msg));
}
static const struct file_operations tracing_readme_fops = {
.open = tracing_open_generic,
.read = tracing_readme_read,
.llseek = generic_file_llseek,
};
static ssize_t
tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char *buf_comm;
char *file_buf;
char *buf;
int len = 0;
int pid;
int i;
file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
if (!file_buf)
return -ENOMEM;
buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
if (!buf_comm) {
kfree(file_buf);
return -ENOMEM;
}
buf = file_buf;
for (i = 0; i < SAVED_CMDLINES; i++) {
int r;
pid = map_cmdline_to_pid[i];
if (pid == -1 || pid == NO_CMDLINE_MAP)
continue;
trace_find_cmdline(pid, buf_comm);
r = sprintf(buf, "%d %s\n", pid, buf_comm);
buf += r;
len += r;
}
len = simple_read_from_buffer(ubuf, cnt, ppos,
file_buf, len);
kfree(file_buf);
kfree(buf_comm);
return len;
}
static const struct file_operations tracing_saved_cmdlines_fops = {
.open = tracing_open_generic,
.read = tracing_saved_cmdlines_read,
.llseek = generic_file_llseek,
};
static ssize_t
tracing_ctrl_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
int r;
r = sprintf(buf, "%u\n", tracer_enabled);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
tracing_ctrl_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
val = !!val;
mutex_lock(&trace_types_lock);
if (tracer_enabled ^ val) {
/* Only need to warn if this is used to change the state */
WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
if (val) {
tracer_enabled = 1;
if (current_trace->start)
current_trace->start(tr);
tracing_start();
} else {
tracer_enabled = 0;
tracing_stop();
if (current_trace->stop)
current_trace->stop(tr);
}
}
mutex_unlock(&trace_types_lock);
*ppos += cnt;
return cnt;
}
static ssize_t
tracing_set_trace_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[MAX_TRACER_SIZE+2];
int r;
mutex_lock(&trace_types_lock);
if (current_trace)
r = sprintf(buf, "%s\n", current_trace->name);
else
r = sprintf(buf, "\n");
mutex_unlock(&trace_types_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
int tracer_init(struct tracer *t, struct trace_array *tr)
{
tracing_reset_online_cpus(tr);
return t->init(tr);
}
static void set_buffer_entries(struct trace_array *tr, unsigned long val)
{
int cpu;
for_each_tracing_cpu(cpu)
tr->data[cpu]->entries = val;
}
static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
{
int ret;
/*
* If kernel or user changes the size of the ring buffer
* we use the size that was given, and we can forget about
* expanding it later.
*/
ring_buffer_expanded = 1;
ret = ring_buffer_resize(global_trace.buffer, size, cpu);
if (ret < 0)
return ret;
if (!current_trace->use_max_tr)
goto out;
ret = ring_buffer_resize(max_tr.buffer, size, cpu);
if (ret < 0) {
int r = 0;
if (cpu == RING_BUFFER_ALL_CPUS) {
int i;
for_each_tracing_cpu(i) {
r = ring_buffer_resize(global_trace.buffer,
global_trace.data[i]->entries,
i);
if (r < 0)
break;
}
} else {
r = ring_buffer_resize(global_trace.buffer,
global_trace.data[cpu]->entries,
cpu);
}
if (r < 0) {
/*
* AARGH! We are left with different
* size max buffer!!!!
* The max buffer is our "snapshot" buffer.
* When a tracer needs a snapshot (one of the
* latency tracers), it swaps the max buffer
* with the saved snap shot. We succeeded to
* update the size of the main buffer, but failed to
* update the size of the max buffer. But when we tried
* to reset the main buffer to the original size, we
* failed there too. This is very unlikely to
* happen, but if it does, warn and kill all
* tracing.
*/
WARN_ON(1);
tracing_disabled = 1;
}
return ret;
}
if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(&max_tr, size);
else
max_tr.data[cpu]->entries = size;
out:
if (cpu == RING_BUFFER_ALL_CPUS)
set_buffer_entries(&global_trace, size);
else
global_trace.data[cpu]->entries = size;
return ret;
}
static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
{
int ret = size;
mutex_lock(&trace_types_lock);
if (cpu_id != RING_BUFFER_ALL_CPUS) {
/* make sure, this cpu is enabled in the mask */
if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
ret = -EINVAL;
goto out;
}
}
ret = __tracing_resize_ring_buffer(size, cpu_id);
if (ret < 0)
ret = -ENOMEM;
out:
mutex_unlock(&trace_types_lock);
return ret;
}
/**
* tracing_update_buffers - used by tracing facility to expand ring buffers
*
* To save on memory when the tracing is never used on a system with it
* configured in. The ring buffers are set to a minimum size. But once
* a user starts to use the tracing facility, then they need to grow
* to their default size.
*
* This function is to be called when a tracer is about to be used.
*/
int tracing_update_buffers(void)
{
int ret = 0;
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded)
ret = __tracing_resize_ring_buffer(trace_buf_size,
RING_BUFFER_ALL_CPUS);
mutex_unlock(&trace_types_lock);
return ret;
}
struct trace_option_dentry;
static struct trace_option_dentry *
create_trace_option_files(struct tracer *tracer);
static void
destroy_trace_option_files(struct trace_option_dentry *topts);
static int tracing_set_tracer(const char *buf)
{
static struct trace_option_dentry *topts;
struct trace_array *tr = &global_trace;
struct tracer *t;
int ret = 0;
mutex_lock(&trace_types_lock);
if (!ring_buffer_expanded) {
ret = __tracing_resize_ring_buffer(trace_buf_size,
RING_BUFFER_ALL_CPUS);
if (ret < 0)
goto out;
ret = 0;
}
for (t = trace_types; t; t = t->next) {
if (strcmp(t->name, buf) == 0)
break;
}
if (!t) {
ret = -EINVAL;
goto out;
}
if (t == current_trace)
goto out;
trace_branch_disable();
if (current_trace && current_trace->reset)
current_trace->reset(tr);
if (current_trace && current_trace->use_max_tr) {
/*
* We don't free the ring buffer. instead, resize it because
* The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it.
*/
ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&max_tr, 1);
}
destroy_trace_option_files(topts);
current_trace = &nop_trace;
topts = create_trace_option_files(t);
if (t->use_max_tr) {
int cpu;
/* we need to make per cpu buffer sizes equivalent */
for_each_tracing_cpu(cpu) {
ret = ring_buffer_resize(max_tr.buffer,
global_trace.data[cpu]->entries,
cpu);
if (ret < 0)
goto out;
max_tr.data[cpu]->entries =
global_trace.data[cpu]->entries;
}
}
if (t->init) {
ret = tracer_init(t, tr);
if (ret)
goto out;
}
current_trace = t;
trace_branch_enable(tr);
out:
mutex_unlock(&trace_types_lock);
return ret;
}
static ssize_t
tracing_set_trace_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[MAX_TRACER_SIZE+1];
int i;
size_t ret;
int err;
ret = cnt;
if (cnt > MAX_TRACER_SIZE)
cnt = MAX_TRACER_SIZE;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
/* strip ending whitespace. */
for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
buf[i] = 0;
err = tracing_set_tracer(buf);
if (err)
return err;
*ppos += ret;
return ret;
}
static ssize_t
tracing_max_lat_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *ptr = filp->private_data;
char buf[64];
int r;
r = snprintf(buf, sizeof(buf), "%ld\n",
*ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
if (r > sizeof(buf))
r = sizeof(buf);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
tracing_max_lat_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long *ptr = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
*ptr = val * 1000;
return cnt;
}
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
long cpu_file = (long) inode->i_private;
struct trace_iterator *iter;
int ret = 0;
if (tracing_disabled)
return -ENODEV;
mutex_lock(&trace_types_lock);
/* create a buffer to store the information to pass to userspace */
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter) {
ret = -ENOMEM;
goto out;
}
/*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
*/
iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
if (!iter->trace) {
ret = -ENOMEM;
goto fail;
}
if (current_trace)
*iter->trace = *current_trace;
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
ret = -ENOMEM;
goto fail;
}
/* trace pipe does not show start of buffer */
cpumask_setall(iter->started);
if (trace_flags & TRACE_ITER_LATENCY_FMT)
iter->iter_flags |= TRACE_FILE_LAT_FMT;
iter->cpu_file = cpu_file;
iter->tr = &global_trace;
mutex_init(&iter->mutex);
filp->private_data = iter;
if (iter->trace->pipe_open)
iter->trace->pipe_open(iter);
nonseekable_open(inode, filp);
out:
mutex_unlock(&trace_types_lock);
return ret;
fail:
kfree(iter->trace);
kfree(iter);
mutex_unlock(&trace_types_lock);
return ret;
}
static int tracing_release_pipe(struct inode *inode, struct file *file)
{
struct trace_iterator *iter = file->private_data;
mutex_lock(&trace_types_lock);
if (iter->trace->pipe_close)
iter->trace->pipe_close(iter);
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
mutex_destroy(&iter->mutex);
kfree(iter->trace);
kfree(iter);
return 0;
}
static unsigned int
tracing_poll_pipe(struct file *filp, poll_table *poll_table)
{
struct trace_iterator *iter = filp->private_data;
if (trace_flags & TRACE_ITER_BLOCK) {
/*
* Always select as readable when in blocking mode
*/
return POLLIN | POLLRDNORM;
} else {
if (!trace_empty(iter))
return POLLIN | POLLRDNORM;
poll_wait(filp, &trace_wait, poll_table);
if (!trace_empty(iter))
return POLLIN | POLLRDNORM;
return 0;
}
}
void default_wait_pipe(struct trace_iterator *iter)
{
DEFINE_WAIT(wait);
prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
if (trace_empty(iter))
schedule();
finish_wait(&trace_wait, &wait);
}
/*
* This is a make-shift waitqueue.
* A tracer might use this callback on some rare cases:
*
* 1) the current tracer might hold the runqueue lock when it wakes up
* a reader, hence a deadlock (sched, function, and function graph tracers)
* 2) the function tracers, trace all functions, we don't want
* the overhead of calling wake_up and friends
* (and tracing them too)
*
* Anyway, this is really very primitive wakeup.
*/
void poll_wait_pipe(struct trace_iterator *iter)
{
set_current_state(TASK_INTERRUPTIBLE);
/* sleep for 100 msecs, and try again. */
schedule_timeout(HZ / 10);
}
/* Must be called with trace_types_lock mutex held. */
static int tracing_wait_pipe(struct file *filp)
{
struct trace_iterator *iter = filp->private_data;
while (trace_empty(iter)) {
if ((filp->f_flags & O_NONBLOCK)) {
return -EAGAIN;
}
mutex_unlock(&iter->mutex);
iter->trace->wait_pipe(iter);
mutex_lock(&iter->mutex);
if (signal_pending(current))
return -EINTR;
/*
* We block until we read something and tracing is disabled.
* We still block if tracing is disabled, but we have never
* read anything. This allows a user to cat this file, and
* then enable tracing. But after we have read something,
* we give an EOF when tracing is again disabled.
*
* iter->pos will be 0 if we haven't read anything.
*/
if (!tracer_enabled && iter->pos)
break;
}
return 1;
}
/*
* Consumer reader.
*/
static ssize_t
tracing_read_pipe(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_iterator *iter = filp->private_data;
static struct tracer *old_tracer;
ssize_t sret;
/* return any leftover data */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (sret != -EBUSY)
return sret;
trace_seq_init(&iter->seq);
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(old_tracer != current_trace && current_trace)) {
old_tracer = current_trace;
*iter->trace = *current_trace;
}
mutex_unlock(&trace_types_lock);
/*
* Avoid more than one consumer on a single file descriptor
* This is just a matter of traces coherency, the ring buffer itself
* is protected.
*/
mutex_lock(&iter->mutex);
if (iter->trace->read) {
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
if (sret)
goto out;
}
waitagain:
sret = tracing_wait_pipe(filp);
if (sret <= 0)
goto out;
/* stop when tracing is finished */
if (trace_empty(iter)) {
sret = 0;
goto out;
}
if (cnt >= PAGE_SIZE)
cnt = PAGE_SIZE - 1;
/* reset all but tr, trace, and overruns */
memset(&iter->seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
iter->pos = -1;
trace_event_read_lock();
trace_access_lock(iter->cpu_file);
while (trace_find_next_entry_inc(iter) != NULL) {
enum print_line_t ret;
int len = iter->seq.len;
ret = print_trace_line(iter);
if (ret == TRACE_TYPE_PARTIAL_LINE) {
/* don't print partial lines */
iter->seq.len = len;
break;
}
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(iter);
if (iter->seq.len >= cnt)
break;
/*
* Setting the full flag means we reached the trace_seq buffer
* size and we should leave by partial output condition above.
* One of the trace_seq_* functions is not used properly.
*/
WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
iter->ent->type);
}
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
/* Now copy what we have to the user */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (iter->seq.readpos >= iter->seq.len)
trace_seq_init(&iter->seq);
/*
* If there was nothing to send to user, in spite of consuming trace
* entries, go back to wait for more entries.
*/
if (sret == -EBUSY)
goto waitagain;
out:
mutex_unlock(&iter->mutex);
return sret;
}
static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
__free_page(buf->page);
}
static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
unsigned int idx)
{
__free_page(spd->pages[idx]);
}
static const struct pipe_buf_operations tracing_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = tracing_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static size_t
tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
{
size_t count;
int ret;
/* Seq buffer is page-sized, exactly what we need. */
for (;;) {
count = iter->seq.len;
ret = print_trace_line(iter);
count = iter->seq.len - count;
if (rem < count) {
rem = 0;
iter->seq.len -= count;
break;
}
if (ret == TRACE_TYPE_PARTIAL_LINE) {
iter->seq.len -= count;
break;
}
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(iter);
rem -= count;
if (!trace_find_next_entry_inc(iter)) {
rem = 0;
iter->ent = NULL;
break;
}
}
return rem;
}
static ssize_t tracing_splice_read_pipe(struct file *filp,
loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len,
unsigned int flags)
{
struct page *pages_def[PIPE_DEF_BUFFERS];
struct partial_page partial_def[PIPE_DEF_BUFFERS];
struct trace_iterator *iter = filp->private_data;
struct splice_pipe_desc spd = {
.pages = pages_def,
.partial = partial_def,
.nr_pages = 0, /* This gets updated below. */
.nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &tracing_pipe_buf_ops,
.spd_release = tracing_spd_release_pipe,
};
static struct tracer *old_tracer;
ssize_t ret;
size_t rem;
unsigned int i;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
/* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock);
if (unlikely(old_tracer != current_trace && current_trace)) {
old_tracer = current_trace;
*iter->trace = *current_trace;
}
mutex_unlock(&trace_types_lock);
mutex_lock(&iter->mutex);
if (iter->trace->splice_read) {
ret = iter->trace->splice_read(iter, filp,
ppos, pipe, len, flags);
if (ret)
goto out_err;
}
ret = tracing_wait_pipe(filp);
if (ret <= 0)
goto out_err;
if (!iter->ent && !trace_find_next_entry_inc(iter)) {
ret = -EFAULT;
goto out_err;
}
trace_event_read_lock();
trace_access_lock(iter->cpu_file);
/* Fill as many pages as possible. */
for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
spd.pages[i] = alloc_page(GFP_KERNEL);
if (!spd.pages[i])
break;
rem = tracing_fill_pipe_page(rem, iter);
/* Copy the data into the page, so we can start over. */
ret = trace_seq_to_buffer(&iter->seq,
page_address(spd.pages[i]),
iter->seq.len);
if (ret < 0) {
__free_page(spd.pages[i]);
break;
}
spd.partial[i].offset = 0;
spd.partial[i].len = iter->seq.len;
trace_seq_init(&iter->seq);
}
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
mutex_unlock(&iter->mutex);
spd.nr_pages = i;
ret = splice_to_pipe(pipe, &spd);
out:
splice_shrink_spd(&spd);
return ret;
out_err:
mutex_unlock(&iter->mutex);
goto out;
}
struct ftrace_entries_info {
struct trace_array *tr;
int cpu;
};
static int tracing_entries_open(struct inode *inode, struct file *filp)
{
struct ftrace_entries_info *info;
if (tracing_disabled)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->tr = &global_trace;
info->cpu = (unsigned long)inode->i_private;
filp->private_data = info;
return 0;
}
static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct ftrace_entries_info *info = filp->private_data;
struct trace_array *tr = info->tr;
char buf[64];
int r = 0;
ssize_t ret;
mutex_lock(&trace_types_lock);
if (info->cpu == RING_BUFFER_ALL_CPUS) {
int cpu, buf_size_same;
unsigned long size;
size = 0;
buf_size_same = 1;
/* check if all cpu sizes are same */
for_each_tracing_cpu(cpu) {
/* fill in the size from first enabled cpu */
if (size == 0)
size = tr->data[cpu]->entries;
if (size != tr->data[cpu]->entries) {
buf_size_same = 0;
break;
}
}
if (buf_size_same) {
if (!ring_buffer_expanded)
r = sprintf(buf, "%lu (expanded: %lu)\n",
size >> 10,
trace_buf_size >> 10);
else
r = sprintf(buf, "%lu\n", size >> 10);
} else
r = sprintf(buf, "X\n");
} else
r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
mutex_unlock(&trace_types_lock);
ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
return ret;
}
static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct ftrace_entries_info *info = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
/* must have at least 1 entry */
if (!val)
return -EINVAL;
/* value is in KB */
val <<= 10;
ret = tracing_resize_ring_buffer(val, info->cpu);
if (ret < 0)
return ret;
*ppos += cnt;
return cnt;
}
static int
tracing_entries_release(struct inode *inode, struct file *filp)
{
struct ftrace_entries_info *info = filp->private_data;
kfree(info);
return 0;
}
static ssize_t
tracing_total_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
char buf[64];
int r, cpu;
unsigned long size = 0, expanded_size = 0;
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
size += tr->data[cpu]->entries >> 10;
if (!ring_buffer_expanded)
expanded_size += trace_buf_size >> 10;
}
if (ring_buffer_expanded)
r = sprintf(buf, "%lu\n", size);
else
r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
mutex_unlock(&trace_types_lock);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
/*
* There is no need to read what the user has written, this function
* is just to make sure that there is no error when "echo" is used
*/
*ppos += cnt;
return cnt;
}
static int
tracing_free_buffer_release(struct inode *inode, struct file *filp)
{
/* disable tracing ? */
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
tracing_off();
/* resize the ring buffer to 0 */
tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
return 0;
}
static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
{
unsigned long addr = (unsigned long)ubuf;
struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct print_entry *entry;
unsigned long irq_flags;
struct page *pages[2];
void *map_page[2];
int nr_pages = 1;
ssize_t written;
int offset;
int size;
int len;
int ret;
int i;
if (tracing_disabled)
return -EINVAL;
if (cnt > TRACE_BUF_SIZE)
cnt = TRACE_BUF_SIZE;
/*
* Userspace is injecting traces into the kernel trace buffer.
* We want to be as non intrusive as possible.
* To do so, we do not want to allocate any special buffers
* or take any locks, but instead write the userspace data
* straight into the ring buffer.
*
* First we need to pin the userspace buffer into memory,
* which, most likely it is, because it just referenced it.
* But there's no guarantee that it is. By using get_user_pages_fast()
* and kmap_atomic/kunmap_atomic() we can get access to the
* pages directly. We then write the data directly into the
* ring buffer.
*/
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
/* check if we cross pages */
if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
nr_pages = 2;
offset = addr & (PAGE_SIZE - 1);
addr &= PAGE_MASK;
ret = get_user_pages_fast(addr, nr_pages, 0, pages);
if (ret < nr_pages) {
while (--ret >= 0)
put_page(pages[ret]);
written = -EFAULT;
goto out;
}
for (i = 0; i < nr_pages; i++)
map_page[i] = kmap_atomic(pages[i]);
local_save_flags(irq_flags);
size = sizeof(*entry) + cnt + 2; /* possible \n added */
buffer = global_trace.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
irq_flags, preempt_count());
if (!event) {
/* Ring buffer disabled, return as if not open for write */
written = -EBADF;
goto out_unlock;
}
entry = ring_buffer_event_data(event);
entry->ip = _THIS_IP_;
if (nr_pages == 2) {
len = PAGE_SIZE - offset;
memcpy(&entry->buf, map_page[0] + offset, len);
memcpy(&entry->buf[len], map_page[1], cnt - len);
} else
memcpy(&entry->buf, map_page[0] + offset, cnt);
if (entry->buf[cnt - 1] != '\n') {
entry->buf[cnt] = '\n';
entry->buf[cnt + 1] = '\0';
} else
entry->buf[cnt] = '\0';
ring_buffer_unlock_commit(buffer, event);
written = cnt;
*fpos += written;
out_unlock:
for (i = 0; i < nr_pages; i++){
kunmap_atomic(map_page[i]);
put_page(pages[i]);
}
out:
return written;
}
static int tracing_clock_show(struct seq_file *m, void *v)
{
int i;
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
seq_printf(m,
"%s%s%s%s", i ? " " : "",
i == trace_clock_id ? "[" : "", trace_clocks[i].name,
i == trace_clock_id ? "]" : "");
seq_putc(m, '\n');
return 0;
}
static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
{
char buf[64];
const char *clockstr;
int i;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
clockstr = strstrip(buf);
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
if (strcmp(trace_clocks[i].name, clockstr) == 0)
break;
}
if (i == ARRAY_SIZE(trace_clocks))
return -EINVAL;
trace_clock_id = i;
mutex_lock(&trace_types_lock);
ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
if (max_tr.buffer)
ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
mutex_unlock(&trace_types_lock);
*fpos += cnt;
return cnt;
}
static int tracing_clock_open(struct inode *inode, struct file *file)
{
if (tracing_disabled)
return -ENODEV;
return single_open(file, tracing_clock_show, NULL);
}
static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
.write = tracing_max_lat_write,
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_ctrl_fops = {
.open = tracing_open_generic,
.read = tracing_ctrl_read,
.write = tracing_ctrl_write,
.llseek = generic_file_llseek,
};
static const struct file_operations set_tracer_fops = {
.open = tracing_open_generic,
.read = tracing_set_trace_read,
.write = tracing_set_trace_write,
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_pipe_fops = {
.open = tracing_open_pipe,
.poll = tracing_poll_pipe,
.read = tracing_read_pipe,
.splice_read = tracing_splice_read_pipe,
.release = tracing_release_pipe,
.llseek = no_llseek,
};
static const struct file_operations tracing_entries_fops = {
.open = tracing_entries_open,
.read = tracing_entries_read,
.write = tracing_entries_write,
.release = tracing_entries_release,
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_total_entries_fops = {
.open = tracing_open_generic,
.read = tracing_total_entries_read,
.llseek = generic_file_llseek,
};
static const struct file_operations tracing_free_buffer_fops = {
.write = tracing_free_buffer_write,
.release = tracing_free_buffer_release,
};
static const struct file_operations tracing_mark_fops = {
.open = tracing_open_generic,
.write = tracing_mark_write,
.llseek = generic_file_llseek,
};
static const struct file_operations trace_clock_fops = {
.open = tracing_clock_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = tracing_clock_write,
};
struct ftrace_buffer_info {
struct trace_array *tr;
void *spare;
int cpu;
unsigned int read;
};
static int tracing_buffers_open(struct inode *inode, struct file *filp)
{
int cpu = (int)(long)inode->i_private;
struct ftrace_buffer_info *info;
if (tracing_disabled)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->tr = &global_trace;
info->cpu = cpu;
info->spare = NULL;
/* Force reading ring buffer for first read */
info->read = (unsigned int)-1;
filp->private_data = info;
return nonseekable_open(inode, filp);
}
static ssize_t
tracing_buffers_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ftrace_buffer_info *info = filp->private_data;
ssize_t ret;
size_t size;
if (!count)
return 0;
if (!info->spare)
info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
if (!info->spare)
return -ENOMEM;
/* Do we have previous read data to read? */
if (info->read < PAGE_SIZE)
goto read;
trace_access_lock(info->cpu);
ret = ring_buffer_read_page(info->tr->buffer,
&info->spare,
count,
info->cpu, 0);
trace_access_unlock(info->cpu);
if (ret < 0)
return 0;
info->read = 0;
read:
size = PAGE_SIZE - info->read;
if (size > count)
size = count;
ret = copy_to_user(ubuf, info->spare + info->read, size);
if (ret == size)
return -EFAULT;
size -= ret;
*ppos += size;
info->read += size;
return size;
}
static int tracing_buffers_release(struct inode *inode, struct file *file)
{
struct ftrace_buffer_info *info = file->private_data;
if (info->spare)
ring_buffer_free_read_page(info->tr->buffer, info->spare);
kfree(info);
return 0;
}
struct buffer_ref {
struct ring_buffer *buffer;
void *page;
int ref;
};
static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
if (--ref->ref)
return;
ring_buffer_free_read_page(ref->buffer, ref->page);
kfree(ref);
buf->private = 0;
}
static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
return 1;
}
static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
ref->ref++;
}
/* Pipe buffer operations for a buffer. */
static const struct pipe_buf_operations buffer_pipe_buf_ops = {
.can_merge = 0,
.map = generic_pipe_buf_map,
.unmap = generic_pipe_buf_unmap,
.confirm = generic_pipe_buf_confirm,
.release = buffer_pipe_buf_release,
.steal = buffer_pipe_buf_steal,
.get = buffer_pipe_buf_get,
};
/*
* Callback from splice_to_pipe(), if we need to release some pages
* at the end of the spd in case we error'ed out in filling the pipe.
*/
static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
{
struct buffer_ref *ref =
(struct buffer_ref *)spd->partial[i].private;
if (--ref->ref)
return;
ring_buffer_free_read_page(ref->buffer, ref->page);
kfree(ref);
spd->partial[i].private = 0;
}
static ssize_t
tracing_buffers_splice_read(struct file *file, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct ftrace_buffer_info *info = file->private_data;
struct partial_page partial_def[PIPE_DEF_BUFFERS];
struct page *pages_def[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = {
.pages = pages_def,
.partial = partial_def,
.nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &buffer_pipe_buf_ops,
.spd_release = buffer_spd_release,
};
struct buffer_ref *ref;
int entries, size, i;
size_t ret;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
if (*ppos & (PAGE_SIZE - 1)) {
WARN_ONCE(1, "Ftrace: previous read must page-align\n");
ret = -EINVAL;
goto out;
}
if (len & (PAGE_SIZE - 1)) {
WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
if (len < PAGE_SIZE) {
ret = -EINVAL;
goto out;
}
len &= PAGE_MASK;
}
trace_access_lock(info->cpu);
entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
struct page *page;
int r;
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
if (!ref)
break;
ref->ref = 1;
ref->buffer = info->tr->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
if (!ref->page) {
kfree(ref);
break;
}
r = ring_buffer_read_page(ref->buffer, &ref->page,
len, info->cpu, 1);
if (r < 0) {
ring_buffer_free_read_page(ref->buffer, ref->page);
kfree(ref);
break;
}
/*
* zero out any left over data, this is going to
* user land.
*/
size = ring_buffer_page_len(ref->page);
if (size < PAGE_SIZE)
memset(ref->page + size, 0, PAGE_SIZE - size);
page = virt_to_page(ref->page);
spd.pages[i] = page;
spd.partial[i].len = PAGE_SIZE;
spd.partial[i].offset = 0;
spd.partial[i].private = (unsigned long)ref;
spd.nr_pages++;
*ppos += PAGE_SIZE;
entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
}
trace_access_unlock(info->cpu);
spd.nr_pages = i;
/* did we read anything? */
if (!spd.nr_pages) {
if (flags & SPLICE_F_NONBLOCK)
ret = -EAGAIN;
else
ret = 0;
/* TODO: block */
goto out;
}
ret = splice_to_pipe(pipe, &spd);
splice_shrink_spd(&spd);
out:
return ret;
}
static const struct file_operations tracing_buffers_fops = {
.open = tracing_buffers_open,
.read = tracing_buffers_read,
.release = tracing_buffers_release,
.splice_read = tracing_buffers_splice_read,
.llseek = no_llseek,
};
static ssize_t
tracing_stats_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
unsigned long cpu = (unsigned long)filp->private_data;
struct trace_array *tr = &global_trace;
struct trace_seq *s;
unsigned long cnt;
unsigned long long t;
unsigned long usec_rem;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
trace_seq_init(s);
cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
trace_seq_printf(s, "entries: %ld\n", cnt);
cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
trace_seq_printf(s, "overrun: %ld\n", cnt);
cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
trace_seq_printf(s, "commit overrun: %ld\n", cnt);
cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
trace_seq_printf(s, "bytes: %ld\n", cnt);
t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
usec_rem = do_div(t, USEC_PER_SEC);
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
kfree(s);
return count;
}
static const struct file_operations tracing_stats_fops = {
.open = tracing_open_generic,
.read = tracing_stats_read,
.llseek = generic_file_llseek,
};
#ifdef CONFIG_DYNAMIC_FTRACE
int __weak ftrace_arch_read_dyn_info(char *buf, int size)
{
return 0;
}
static ssize_t
tracing_read_dyn_info(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
static char ftrace_dyn_info_buffer[1024];
static DEFINE_MUTEX(dyn_info_mutex);
unsigned long *p = filp->private_data;
char *buf = ftrace_dyn_info_buffer;
int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
int r;
mutex_lock(&dyn_info_mutex);
r = sprintf(buf, "%ld ", *p);
r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
buf[r++] = '\n';
r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
mutex_unlock(&dyn_info_mutex);
return r;
}
static const struct file_operations tracing_dyn_info_fops = {
.open = tracing_open_generic,
.read = tracing_read_dyn_info,
.llseek = generic_file_llseek,
};
#endif
static struct dentry *d_tracer;
struct dentry *tracing_init_dentry(void)
{
static int once;
if (d_tracer)
return d_tracer;
if (!debugfs_initialized())
return NULL;
d_tracer = debugfs_create_dir("tracing", NULL);
if (!d_tracer && !once) {
once = 1;
pr_warning("Could not create debugfs directory 'tracing'\n");
return NULL;
}
return d_tracer;
}
static struct dentry *d_percpu;
struct dentry *tracing_dentry_percpu(void)
{
static int once;
struct dentry *d_tracer;
if (d_percpu)
return d_percpu;
d_tracer = tracing_init_dentry();
if (!d_tracer)
return NULL;
d_percpu = debugfs_create_dir("per_cpu", d_tracer);
if (!d_percpu && !once) {
once = 1;
pr_warning("Could not create debugfs directory 'per_cpu'\n");
return NULL;
}
return d_percpu;
}
static void tracing_init_debugfs_percpu(long cpu)
{
struct dentry *d_percpu = tracing_dentry_percpu();
struct dentry *d_cpu;
char cpu_dir[30]; /* 30 characters should be more than enough */
if (!d_percpu)
return;
snprintf(cpu_dir, 30, "cpu%ld", cpu);
d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
if (!d_cpu) {
pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
return;
}
/* per cpu trace_pipe */
trace_create_file("trace_pipe", 0444, d_cpu,
(void *) cpu, &tracing_pipe_fops);
/* per cpu trace */
trace_create_file("trace", 0644, d_cpu,
(void *) cpu, &tracing_fops);
trace_create_file("trace_pipe_raw", 0444, d_cpu,
(void *) cpu, &tracing_buffers_fops);
trace_create_file("stats", 0444, d_cpu,
(void *) cpu, &tracing_stats_fops);
trace_create_file("buffer_size_kb", 0444, d_cpu,
(void *) cpu, &tracing_entries_fops);
}
#ifdef CONFIG_FTRACE_SELFTEST
/* Let selftest have access to static functions in this file */
#include "trace_selftest.c"
#endif
struct trace_option_dentry {
struct tracer_opt *opt;
struct tracer_flags *flags;
struct dentry *entry;
};
static ssize_t
trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct trace_option_dentry *topt = filp->private_data;
char *buf;
if (topt->flags->val & topt->opt->bit)
buf = "1\n";
else
buf = "0\n";
return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
}
static ssize_t
trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
struct trace_option_dentry *topt = filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
if (val != 0 && val != 1)
return -EINVAL;
if (!!(topt->flags->val & topt->opt->bit) != val) {
mutex_lock(&trace_types_lock);
ret = __set_tracer_option(current_trace, topt->flags,
topt->opt, !val);
mutex_unlock(&trace_types_lock);
if (ret)
return ret;
}
*ppos += cnt;
return cnt;
}
static const struct file_operations trace_options_fops = {
.open = tracing_open_generic,
.read = trace_options_read,
.write = trace_options_write,
.llseek = generic_file_llseek,
};
static ssize_t
trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
long index = (long)filp->private_data;
char *buf;
if (trace_flags & (1 << index))
buf = "1\n";
else
buf = "0\n";
return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
}
static ssize_t
trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
long index = (long)filp->private_data;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
if (val != 0 && val != 1)
return -EINVAL;
set_tracer_flags(1 << index, val);
*ppos += cnt;
return cnt;
}
static const struct file_operations trace_options_core_fops = {
.open = tracing_open_generic,
.read = trace_options_core_read,
.write = trace_options_core_write,
.llseek = generic_file_llseek,
};
struct dentry *trace_create_file(const char *name,
umode_t mode,
struct dentry *parent,
void *data,
const struct file_operations *fops)
{
struct dentry *ret;
ret = debugfs_create_file(name, mode, parent, data, fops);
if (!ret)
pr_warning("Could not create debugfs '%s' entry\n", name);
return ret;
}
static struct dentry *trace_options_init_dentry(void)
{
struct dentry *d_tracer;
static struct dentry *t_options;
if (t_options)
return t_options;
d_tracer = tracing_init_dentry();
if (!d_tracer)
return NULL;
t_options = debugfs_create_dir("options", d_tracer);
if (!t_options) {
pr_warning("Could not create debugfs directory 'options'\n");
return NULL;
}
return t_options;
}
static void
create_trace_option_file(struct trace_option_dentry *topt,
struct tracer_flags *flags,
struct tracer_opt *opt)
{
struct dentry *t_options;
t_options = trace_options_init_dentry();
if (!t_options)
return;
topt->flags = flags;
topt->opt = opt;
topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
&trace_options_fops);
}
static struct trace_option_dentry *
create_trace_option_files(struct tracer *tracer)
{
struct trace_option_dentry *topts;
struct tracer_flags *flags;
struct tracer_opt *opts;
int cnt;
if (!tracer)
return NULL;
flags = tracer->flags;
if (!flags || !flags->opts)
return NULL;
opts = flags->opts;
for (cnt = 0; opts[cnt].name; cnt++)
;
topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
if (!topts)
return NULL;
for (cnt = 0; opts[cnt].name; cnt++)
create_trace_option_file(&topts[cnt], flags,
&opts[cnt]);
return topts;
}
static void
destroy_trace_option_files(struct trace_option_dentry *topts)
{
int cnt;
if (!topts)
return;
for (cnt = 0; topts[cnt].opt; cnt++) {
if (topts[cnt].entry)
debugfs_remove(topts[cnt].entry);
}
kfree(topts);
}
static struct dentry *
create_trace_option_core_file(const char *option, long index)
{
struct dentry *t_options;
t_options = trace_options_init_dentry();
if (!t_options)
return NULL;
return trace_create_file(option, 0644, t_options, (void *)index,
&trace_options_core_fops);
}
static __init void create_trace_options_dir(void)
{
struct dentry *t_options;
int i;
t_options = trace_options_init_dentry();
if (!t_options)
return;
for (i = 0; trace_options[i]; i++)
create_trace_option_core_file(trace_options[i], i);
}
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
struct ring_buffer *buffer = tr->buffer;
char buf[64];
int r;
if (buffer)
r = ring_buffer_record_is_on(buffer);
else
r = 0;
r = sprintf(buf, "%d\n", r);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}
static ssize_t
rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
struct ring_buffer *buffer = tr->buffer;
unsigned long val;
int ret;
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
if (buffer) {
if (val)
ring_buffer_record_on(buffer);
else
ring_buffer_record_off(buffer);
}
(*ppos)++;
return cnt;
}
static const struct file_operations rb_simple_fops = {
.open = tracing_open_generic,
.read = rb_simple_read,
.write = rb_simple_write,
.llseek = default_llseek,
};
static __init int tracer_init_debugfs(void)
{
struct dentry *d_tracer;
int cpu;
trace_access_lock_init();
d_tracer = tracing_init_dentry();
trace_create_file("tracing_enabled", 0644, d_tracer,
&global_trace, &tracing_ctrl_fops);
trace_create_file("trace_options", 0644, d_tracer,
NULL, &tracing_iter_fops);
trace_create_file("tracing_cpumask", 0644, d_tracer,
NULL, &tracing_cpumask_fops);
trace_create_file("trace", 0644, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
trace_create_file("available_tracers", 0444, d_tracer,
&global_trace, &show_traces_fops);
trace_create_file("current_tracer", 0644, d_tracer,
&global_trace, &set_tracer_fops);
#ifdef CONFIG_TRACER_MAX_TRACE
trace_create_file("tracing_max_latency", 0644, d_tracer,
&tracing_max_latency, &tracing_max_lat_fops);
#endif
trace_create_file("tracing_thresh", 0644, d_tracer,
&tracing_thresh, &tracing_max_lat_fops);
trace_create_file("README", 0444, d_tracer,
NULL, &tracing_readme_fops);
trace_create_file("trace_pipe", 0444, d_tracer,
(void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
trace_create_file("buffer_size_kb", 0644, d_tracer,
(void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
&global_trace, &tracing_total_entries_fops);
trace_create_file("free_buffer", 0644, d_tracer,
&global_trace, &tracing_free_buffer_fops);
trace_create_file("trace_marker", 0220, d_tracer,
NULL, &tracing_mark_fops);
trace_create_file("saved_cmdlines", 0444, d_tracer,
NULL, &tracing_saved_cmdlines_fops);
trace_create_file("trace_clock", 0644, d_tracer, NULL,
&trace_clock_fops);
trace_create_file("tracing_on", 0644, d_tracer,
&global_trace, &rb_simple_fops);
#ifdef CONFIG_DYNAMIC_FTRACE
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
#endif
create_trace_options_dir();
for_each_tracing_cpu(cpu)
tracing_init_debugfs_percpu(cpu);
return 0;
}
static int trace_panic_handler(struct notifier_block *this,
unsigned long event, void *unused)
{
if (ftrace_dump_on_oops)
ftrace_dump(ftrace_dump_on_oops);
return NOTIFY_OK;
}
static struct notifier_block trace_panic_notifier = {
.notifier_call = trace_panic_handler,
.next = NULL,
.priority = 150 /* priority: INT_MAX >= x >= 0 */
};
static int trace_die_handler(struct notifier_block *self,
unsigned long val,
void *data)
{
switch (val) {
case DIE_OOPS:
if (ftrace_dump_on_oops)
ftrace_dump(ftrace_dump_on_oops);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block trace_die_notifier = {
.notifier_call = trace_die_handler,
.priority = 200
};
/*
* printk is set to max of 1024, we really don't need it that big.
* Nothing should be printing 1000 characters anyway.
*/
#define TRACE_MAX_PRINT 1000
/*
* Define here KERN_TRACE so that we have one place to modify
* it if we decide to change what log level the ftrace dump
* should be at.
*/
#define KERN_TRACE KERN_EMERG
void
trace_printk_seq(struct trace_seq *s)
{
/* Probably should print a warning here. */
if (s->len >= 1000)
s->len = 1000;
/* should be zero ended, but we are paranoid. */
s->buffer[s->len] = 0;
printk(KERN_TRACE "%s", s->buffer);
trace_seq_init(s);
}
void trace_init_global_iter(struct trace_iterator *iter)
{
iter->tr = &global_trace;
iter->trace = current_trace;
iter->cpu_file = TRACE_PIPE_ALL_CPU;
}
static void
__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
{
static arch_spinlock_t ftrace_dump_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
unsigned int old_userobj;
static int dump_ran;
unsigned long flags;
int cnt = 0, cpu;
/* only one dump */
local_irq_save(flags);
arch_spin_lock(&ftrace_dump_lock);
if (dump_ran)
goto out;
dump_ran = 1;
tracing_off();
/* Did function tracer already get disabled? */
if (ftrace_is_dead()) {
printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
printk("# MAY BE MISSING FUNCTION EVENTS\n");
}
if (disable_tracing)
ftrace_kill();
trace_init_global_iter(&iter);
for_each_tracing_cpu(cpu) {
atomic_inc(&iter.tr->data[cpu]->disabled);
}
old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
/* don't look at user memory in panic mode */
trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
/* Simulate the iterator */
iter.tr = &global_trace;
iter.trace = current_trace;
switch (oops_dump_mode) {
case DUMP_ALL:
iter.cpu_file = TRACE_PIPE_ALL_CPU;
break;
case DUMP_ORIG:
iter.cpu_file = raw_smp_processor_id();
break;
case DUMP_NONE:
goto out_enable;
default:
printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
iter.cpu_file = TRACE_PIPE_ALL_CPU;
}
printk(KERN_TRACE "Dumping ftrace buffer:\n");
/*
* We need to stop all tracing on all CPUS to read the
* the next buffer. This is a bit expensive, but is
* not done often. We fill all what we can read,
* and then release the locks again.
*/
while (!trace_empty(&iter)) {
if (!cnt)
printk(KERN_TRACE "---------------------------------\n");
cnt++;
/* reset all but tr, trace, and overruns */
memset(&iter.seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
iter.iter_flags |= TRACE_FILE_LAT_FMT;
iter.pos = -1;
if (trace_find_next_entry_inc(&iter) != NULL) {
int ret;
ret = print_trace_line(&iter);
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(&iter);
}
touch_nmi_watchdog();
trace_printk_seq(&iter.seq);
}
if (!cnt)
printk(KERN_TRACE " (ftrace buffer empty)\n");
else
printk(KERN_TRACE "---------------------------------\n");
out_enable:
/* Re-enable tracing if requested */
if (!disable_tracing) {
trace_flags |= old_userobj;
for_each_tracing_cpu(cpu) {
atomic_dec(&iter.tr->data[cpu]->disabled);
}
tracing_on();
}
out:
arch_spin_unlock(&ftrace_dump_lock);
local_irq_restore(flags);
}
/* By default: disable tracing after the dump */
void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
{
__ftrace_dump(true, oops_dump_mode);
}
EXPORT_SYMBOL_GPL(ftrace_dump);
__init static int tracer_alloc_buffers(void)
{
int ring_buf_size;
enum ring_buffer_flags rb_flags;
int i;
int ret = -ENOMEM;
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
goto out;
if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
/* Only allocate trace_printk buffers if a trace_printk exists */
if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
trace_printk_init_buffers();
/* To save memory, keep the ring buffer size to its minimum */
if (ring_buffer_expanded)
ring_buf_size = trace_buf_size;
else
ring_buf_size = 1;
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
cpumask_copy(tracing_cpumask, cpu_all_mask);
/* TODO: make the number of buffers hot pluggable with CPUS */
global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
if (!global_trace.buffer) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
WARN_ON(1);
goto out_free_cpumask;
}
if (global_trace.buffer_disabled)
tracing_off();
#ifdef CONFIG_TRACER_MAX_TRACE
max_tr.buffer = ring_buffer_alloc(1, rb_flags);
if (!max_tr.buffer) {
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
WARN_ON(1);
ring_buffer_free(global_trace.buffer);
goto out_free_cpumask;
}
#endif
/* Allocate the first page for all buffers */
for_each_tracing_cpu(i) {
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
max_tr.data[i] = &per_cpu(max_tr_data, i);
}
set_buffer_entries(&global_trace,
ring_buffer_size(global_trace.buffer, 0));
#ifdef CONFIG_TRACER_MAX_TRACE
set_buffer_entries(&max_tr, 1);
#endif
trace_init_cmdlines();
register_tracer(&nop_trace);
current_trace = &nop_trace;
/* All seems OK, enable tracing */
tracing_disabled = 0;
atomic_notifier_chain_register(&panic_notifier_list,
&trace_panic_notifier);
register_die_notifier(&trace_die_notifier);
return 0;
out_free_cpumask:
free_cpumask_var(tracing_cpumask);
out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask);
out:
return ret;
}
__init static int clear_boot_tracer(void)
{
/*
* The default tracer at boot buffer is an init section.
* This function is called in lateinit. If we did not
* find the boot tracer, then clear it out, to prevent
* later registration from accessing the buffer that is
* about to be freed.
*/
if (!default_bootup_tracer)
return 0;
printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
default_bootup_tracer);
default_bootup_tracer = NULL;
return 0;
}
early_initcall(tracer_alloc_buffers);
fs_initcall(tracer_init_debugfs);
late_initcall(clear_boot_tracer);
| gpl-2.0 |
sthalik/android_kernel_motorola_msm8916 | drivers/mfd/wm5102-tables.c | 80 | 89632 | /*
* wm5102-tables.c -- WM5102 data tables
*
* Copyright 2012 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mfd/arizona/core.h>
#include <linux/mfd/arizona/registers.h>
#include "arizona.h"
#define WM5102_NUM_AOD_ISR 2
#define WM5102_NUM_ISR 5
static const struct reg_default wm5102_reva_patch[] = {
{ 0x80, 0x0003 },
{ 0x221, 0x0090 },
{ 0x211, 0x0014 },
{ 0x212, 0x0000 },
{ 0x214, 0x000C },
{ 0x171, 0x0002 },
{ 0x171, 0x0000 },
{ 0x461, 0x8000 },
{ 0x463, 0x50F0 },
{ 0x465, 0x4820 },
{ 0x467, 0x4040 },
{ 0x469, 0x3940 },
{ 0x46B, 0x3310 },
{ 0x46D, 0x2D80 },
{ 0x46F, 0x2890 },
{ 0x471, 0x1990 },
{ 0x473, 0x1450 },
{ 0x475, 0x1020 },
{ 0x477, 0x0CD0 },
{ 0x479, 0x0A30 },
{ 0x47B, 0x0810 },
{ 0x47D, 0x0510 },
{ 0x4D1, 0x017F },
{ 0x500, 0x000D },
{ 0x507, 0x1820 },
{ 0x508, 0x1820 },
{ 0x540, 0x000D },
{ 0x547, 0x1820 },
{ 0x548, 0x1820 },
{ 0x580, 0x000D },
{ 0x587, 0x1820 },
{ 0x588, 0x1820 },
{ 0x80, 0x0000 },
};
static const struct reg_default wm5102_revb_patch[] = {
{ 0x19, 0x0001 },
{ 0x80, 0x0003 },
{ 0x081, 0xE022 },
{ 0x410, 0x6080 },
{ 0x418, 0xa080 },
{ 0x420, 0xa080 },
{ 0x428, 0xe000 },
{ 0x442, 0x3F0A },
{ 0x443, 0xDC1F },
{ 0x4B0, 0x0066 },
{ 0x458, 0x000b },
{ 0x212, 0x0000 },
{ 0x171, 0x0000 },
{ 0x35E, 0x000C },
{ 0x2D4, 0x0000 },
{ 0x4DC, 0x0900 },
{ 0x80, 0x0000 },
};
static const struct reg_default wm5102t_pwr_1[] = {
{ 0x46C, 0xC01 },
{ 0x46E, 0xC01 },
{ 0x470, 0xC01 },
};
static const struct reg_default wm5102t_pwr_2[] = {
{ 0x462, 0xC00 },
{ 0x464, 0xC00 },
{ 0x466, 0xC00 },
{ 0x468, 0xC00 },
{ 0x46a, 0xC00 },
{ 0x46c, 0xC00 },
{ 0x46e, 0xC00 },
{ 0x470, 0xC00 },
{ 0x476, 0x806 },
};
static const struct reg_default wm5102t_pwr_3[] = {
{ 0x462, 0xC00 },
{ 0x464, 0xC00 },
{ 0x466, 0xC00 },
{ 0x468, 0xC00 },
{ 0x46a, 0xC00 },
{ 0x46c, 0xC00 },
{ 0x46e, 0xC00 },
{ 0x470, 0xC00 },
{ 0x472, 0xC00 },
{ 0x47c, 0x806 },
{ 0x47e, 0x80e },
};
static const struct reg_default wm5102t_pwr_4[] = {
{ 0x462, 0xC00 },
{ 0x464, 0xC00 },
{ 0x466, 0xC00 },
{ 0x468, 0xC00 },
{ 0x46a, 0xC00 },
{ 0x46c, 0xC00 },
{ 0x46e, 0xC00 },
{ 0x470, 0xC00 },
{ 0x472, 0xC00 },
{ 0x474, 0xC00 },
{ 0x476, 0xC00 },
{ 0x478, 0xC00 },
{ 0x47a, 0xC00 },
{ 0x47c, 0xC00 },
{ 0x47e, 0xC00 },
};
static const struct {
const struct reg_default *patch;
int size;
} wm5102t_pwr[] = {
{ NULL, 0 },
{ wm5102t_pwr_1, ARRAY_SIZE(wm5102t_pwr_1) },
{ wm5102t_pwr_2, ARRAY_SIZE(wm5102t_pwr_2) },
{ wm5102t_pwr_3, ARRAY_SIZE(wm5102t_pwr_3) },
{ wm5102t_pwr_4, ARRAY_SIZE(wm5102t_pwr_4) },
};
/* We use a function so we can use ARRAY_SIZE() */
int wm5102_patch(struct arizona *arizona)
{
const struct reg_default *wm5102_patch;
int ret;
int patch_size;
int pwr_index = arizona->pdata.wm5102t_output_pwr;
switch (arizona->rev) {
case 0:
wm5102_patch = wm5102_reva_patch;
patch_size = ARRAY_SIZE(wm5102_reva_patch);
break;
default:
wm5102_patch = wm5102_revb_patch;
patch_size = ARRAY_SIZE(wm5102_revb_patch);
break;
}
ret = regmap_multi_reg_write_bypassed(arizona->regmap,
wm5102_patch,
patch_size);
if (ret != 0)
goto out;
if (pwr_index < ARRAY_SIZE(wm5102t_pwr))
ret = regmap_multi_reg_write_bypassed(arizona->regmap,
wm5102t_pwr[pwr_index].patch,
wm5102t_pwr[pwr_index].size);
else
dev_err(arizona->dev, "Invalid wm5102t output power\n");
out:
return ret;
}
static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
[ARIZONA_IRQ_MICD_CLAMP_FALL] = {
.mask = ARIZONA_MICD_CLAMP_FALL_EINT1
},
[ARIZONA_IRQ_MICD_CLAMP_RISE] = {
.mask = ARIZONA_MICD_CLAMP_RISE_EINT1
},
[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
[ARIZONA_IRQ_JD_RISE] = { .mask = ARIZONA_JD1_RISE_EINT1 },
};
const struct regmap_irq_chip wm5102_aod = {
.name = "wm5102 AOD",
.status_base = ARIZONA_AOD_IRQ1,
.mask_base = ARIZONA_AOD_IRQ_MASK_IRQ1,
.ack_base = ARIZONA_AOD_IRQ1,
.wake_base = ARIZONA_WAKE_CONTROL,
.wake_invert = 1,
.num_regs = 1,
.irqs = wm5102_aod_irqs,
.num_irqs = ARRAY_SIZE(wm5102_aod_irqs),
};
static const struct regmap_irq wm5102_irqs[ARIZONA_NUM_IRQ] = {
[ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 },
[ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 },
[ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 },
[ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 },
[ARIZONA_IRQ_DSP1_RAM_RDY] = {
.reg_offset = 1, .mask = ARIZONA_DSP1_RAM_RDY_EINT1
},
[ARIZONA_IRQ_DSP_IRQ2] = {
.reg_offset = 1, .mask = ARIZONA_DSP_IRQ2_EINT1
},
[ARIZONA_IRQ_DSP_IRQ1] = {
.reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1
},
[ARIZONA_IRQ_SPK_OVERHEAT_WARN] = {
.reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1
},
[ARIZONA_IRQ_SPK_OVERHEAT] = {
.reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1
},
[ARIZONA_IRQ_HPDET] = {
.reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
},
[ARIZONA_IRQ_MICDET] = {
.reg_offset = 2, .mask = ARIZONA_MICDET_EINT1
},
[ARIZONA_IRQ_WSEQ_DONE] = {
.reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1
},
[ARIZONA_IRQ_DRC2_SIG_DET] = {
.reg_offset = 2, .mask = ARIZONA_DRC2_SIG_DET_EINT1
},
[ARIZONA_IRQ_DRC1_SIG_DET] = {
.reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1
},
[ARIZONA_IRQ_ASRC2_LOCK] = {
.reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1
},
[ARIZONA_IRQ_ASRC1_LOCK] = {
.reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1
},
[ARIZONA_IRQ_UNDERCLOCKED] = {
.reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1
},
[ARIZONA_IRQ_OVERCLOCKED] = {
.reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1
},
[ARIZONA_IRQ_FLL2_LOCK] = {
.reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1
},
[ARIZONA_IRQ_FLL1_LOCK] = {
.reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1
},
[ARIZONA_IRQ_CLKGEN_ERR] = {
.reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1
},
[ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = {
.reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1
},
[ARIZONA_IRQ_ASRC_CFG_ERR] = {
.reg_offset = 3, .mask = ARIZONA_ASRC_CFG_ERR_EINT1
},
[ARIZONA_IRQ_AIF3_ERR] = {
.reg_offset = 3, .mask = ARIZONA_AIF3_ERR_EINT1
},
[ARIZONA_IRQ_AIF2_ERR] = {
.reg_offset = 3, .mask = ARIZONA_AIF2_ERR_EINT1
},
[ARIZONA_IRQ_AIF1_ERR] = {
.reg_offset = 3, .mask = ARIZONA_AIF1_ERR_EINT1
},
[ARIZONA_IRQ_CTRLIF_ERR] = {
.reg_offset = 3, .mask = ARIZONA_CTRLIF_ERR_EINT1
},
[ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = {
.reg_offset = 3, .mask = ARIZONA_MIXER_DROPPED_SAMPLE_EINT1
},
[ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = {
.reg_offset = 3, .mask = ARIZONA_ASYNC_CLK_ENA_LOW_EINT1
},
[ARIZONA_IRQ_SYSCLK_ENA_LOW] = {
.reg_offset = 3, .mask = ARIZONA_SYSCLK_ENA_LOW_EINT1
},
[ARIZONA_IRQ_ISRC1_CFG_ERR] = {
.reg_offset = 3, .mask = ARIZONA_ISRC1_CFG_ERR_EINT1
},
[ARIZONA_IRQ_ISRC2_CFG_ERR] = {
.reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1
},
[ARIZONA_IRQ_BOOT_DONE] = {
.reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1
},
[ARIZONA_IRQ_DCS_DAC_DONE] = {
.reg_offset = 4, .mask = ARIZONA_DCS_DAC_DONE_EINT1
},
[ARIZONA_IRQ_DCS_HP_DONE] = {
.reg_offset = 4, .mask = ARIZONA_DCS_HP_DONE_EINT1
},
[ARIZONA_IRQ_FLL2_CLOCK_OK] = {
.reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1
},
[ARIZONA_IRQ_FLL1_CLOCK_OK] = {
.reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1
},
};
const struct regmap_irq_chip wm5102_irq = {
.name = "wm5102 IRQ",
.status_base = ARIZONA_INTERRUPT_STATUS_1,
.mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK,
.ack_base = ARIZONA_INTERRUPT_STATUS_1,
.num_regs = 5,
.irqs = wm5102_irqs,
.num_irqs = ARRAY_SIZE(wm5102_irqs),
};
static const struct reg_default wm5102_reg_default[] = {
{ 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
{ 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
{ 0x00000020, 0x0000 }, /* R32 - Tone Generator 1 */
{ 0x00000021, 0x1000 }, /* R33 - Tone Generator 2 */
{ 0x00000022, 0x0000 }, /* R34 - Tone Generator 3 */
{ 0x00000023, 0x1000 }, /* R35 - Tone Generator 4 */
{ 0x00000024, 0x0000 }, /* R36 - Tone Generator 5 */
{ 0x00000030, 0x0000 }, /* R48 - PWM Drive 1 */
{ 0x00000031, 0x0100 }, /* R49 - PWM Drive 2 */
{ 0x00000032, 0x0100 }, /* R50 - PWM Drive 3 */
{ 0x00000040, 0x0000 }, /* R64 - Wake control */
{ 0x00000041, 0x0000 }, /* R65 - Sequence control */
{ 0x00000061, 0x01FF }, /* R97 - Sample Rate Sequence Select 1 */
{ 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
{ 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
{ 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
{ 0x00000066, 0x01FF }, /* R102 - Always On Triggers Sequence Select 1 */
{ 0x00000067, 0x01FF }, /* R103 - Always On Triggers Sequence Select 2 */
{ 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */
{ 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */
{ 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */
{ 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */
{ 0x0000006E, 0x01FF }, /* R110 - Trigger Sequence Select 32 */
{ 0x0000006F, 0x01FF }, /* R111 - Trigger Sequence Select 33 */
{ 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
{ 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
{ 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
{ 0x00000092, 0x0000 }, /* R146 - Haptics phase 1 intensity */
{ 0x00000093, 0x0000 }, /* R147 - Haptics phase 1 duration */
{ 0x00000094, 0x0000 }, /* R148 - Haptics phase 2 intensity */
{ 0x00000095, 0x0000 }, /* R149 - Haptics phase 2 duration */
{ 0x00000096, 0x0000 }, /* R150 - Haptics phase 3 intensity */
{ 0x00000097, 0x0000 }, /* R151 - Haptics phase 3 duration */
{ 0x00000100, 0x0002 }, /* R256 - Clock 32k 1 */
{ 0x00000101, 0x0304 }, /* R257 - System Clock 1 */
{ 0x00000102, 0x0011 }, /* R258 - Sample rate 1 */
{ 0x00000103, 0x0011 }, /* R259 - Sample rate 2 */
{ 0x00000104, 0x0011 }, /* R260 - Sample rate 3 */
{ 0x00000112, 0x0305 }, /* R274 - Async clock 1 */
{ 0x00000113, 0x0011 }, /* R275 - Async sample rate 1 */
{ 0x00000114, 0x0011 }, /* R276 - Async sample rate 2 */
{ 0x00000149, 0x0000 }, /* R329 - Output system clock */
{ 0x0000014A, 0x0000 }, /* R330 - Output async clock */
{ 0x00000152, 0x0000 }, /* R338 - Rate Estimator 1 */
{ 0x00000153, 0x0000 }, /* R339 - Rate Estimator 2 */
{ 0x00000154, 0x0000 }, /* R340 - Rate Estimator 3 */
{ 0x00000155, 0x0000 }, /* R341 - Rate Estimator 4 */
{ 0x00000156, 0x0000 }, /* R342 - Rate Estimator 5 */
{ 0x00000161, 0x0000 }, /* R353 - Dynamic Frequency Scaling 1 */
{ 0x00000171, 0x0000 }, /* R369 - FLL1 Control 1 */
{ 0x00000172, 0x0008 }, /* R370 - FLL1 Control 2 */
{ 0x00000173, 0x0018 }, /* R371 - FLL1 Control 3 */
{ 0x00000174, 0x007D }, /* R372 - FLL1 Control 4 */
{ 0x00000175, 0x0004 }, /* R373 - FLL1 Control 5 */
{ 0x00000176, 0x0000 }, /* R374 - FLL1 Control 6 */
{ 0x00000177, 0x0181 }, /* R375 - FLL1 Loop Filter Test 1 */
{ 0x00000178, 0x0000 }, /* R376 - FLL1 NCO Test 0 */
{ 0x00000179, 0x0000 }, /* R377 - FLL1 Control 7 */
{ 0x00000181, 0x0000 }, /* R385 - FLL1 Synchroniser 1 */
{ 0x00000182, 0x0000 }, /* R386 - FLL1 Synchroniser 2 */
{ 0x00000183, 0x0000 }, /* R387 - FLL1 Synchroniser 3 */
{ 0x00000184, 0x0000 }, /* R388 - FLL1 Synchroniser 4 */
{ 0x00000185, 0x0000 }, /* R389 - FLL1 Synchroniser 5 */
{ 0x00000186, 0x0000 }, /* R390 - FLL1 Synchroniser 6 */
{ 0x00000187, 0x0001 }, /* R391 - FLL1 Synchroniser 7 */
{ 0x00000189, 0x0000 }, /* R393 - FLL1 Spread Spectrum */
{ 0x0000018A, 0x0004 }, /* R394 - FLL1 GPIO Clock */
{ 0x00000191, 0x0000 }, /* R401 - FLL2 Control 1 */
{ 0x00000192, 0x0008 }, /* R402 - FLL2 Control 2 */
{ 0x00000193, 0x0018 }, /* R403 - FLL2 Control 3 */
{ 0x00000194, 0x007D }, /* R404 - FLL2 Control 4 */
{ 0x00000195, 0x0004 }, /* R405 - FLL2 Control 5 */
{ 0x00000196, 0x0000 }, /* R406 - FLL2 Control 6 */
{ 0x00000197, 0x0000 }, /* R407 - FLL2 Loop Filter Test 1 */
{ 0x00000198, 0x0000 }, /* R408 - FLL2 NCO Test 0 */
{ 0x00000199, 0x0000 }, /* R409 - FLL2 Control 7 */
{ 0x000001A1, 0x0000 }, /* R417 - FLL2 Synchroniser 1 */
{ 0x000001A2, 0x0000 }, /* R418 - FLL2 Synchroniser 2 */
{ 0x000001A3, 0x0000 }, /* R419 - FLL2 Synchroniser 3 */
{ 0x000001A4, 0x0000 }, /* R420 - FLL2 Synchroniser 4 */
{ 0x000001A5, 0x0000 }, /* R421 - FLL2 Synchroniser 5 */
{ 0x000001A6, 0x0000 }, /* R422 - FLL2 Synchroniser 6 */
{ 0x000001A7, 0x0001 }, /* R423 - FLL2 Synchroniser 7 */
{ 0x000001A9, 0x0000 }, /* R425 - FLL2 Spread Spectrum */
{ 0x000001AA, 0x0004 }, /* R426 - FLL2 GPIO Clock */
{ 0x00000200, 0x0006 }, /* R512 - Mic Charge Pump 1 */
{ 0x00000210, 0x00D4 }, /* R528 - LDO1 Control 1 */
{ 0x00000212, 0x0000 }, /* R530 - LDO1 Control 2 */
{ 0x00000213, 0x0344 }, /* R531 - LDO2 Control 1 */
{ 0x00000218, 0x01A6 }, /* R536 - Mic Bias Ctrl 1 */
{ 0x00000219, 0x01A6 }, /* R537 - Mic Bias Ctrl 2 */
{ 0x0000021A, 0x01A6 }, /* R538 - Mic Bias Ctrl 3 */
{ 0x00000225, 0x0400 }, /* R549 - HP Ctrl 1L */
{ 0x00000226, 0x0400 }, /* R550 - HP Ctrl 1R */
{ 0x00000293, 0x0000 }, /* R659 - Accessory Detect Mode 1 */
{ 0x0000029B, 0x0020 }, /* R667 - Headphone Detect 1 */
{ 0x0000029C, 0x0000 }, /* R668 - Headphone Detect 2 */
{ 0x0000029F, 0x0000 }, /* R671 - Headphone Detect Test */
{ 0x000002A2, 0x0000 }, /* R674 - Micd clamp control */
{ 0x000002A3, 0x1102 }, /* R675 - Mic Detect 1 */
{ 0x000002A4, 0x009F }, /* R676 - Mic Detect 2 */
{ 0x000002A5, 0x0000 }, /* R677 - Mic Detect 3 */
{ 0x000002A6, 0x3737 }, /* R678 - Mic Detect Level 1 */
{ 0x000002A7, 0x372C }, /* R679 - Mic Detect Level 2 */
{ 0x000002A8, 0x1422 }, /* R680 - Mic Detect Level 3 */
{ 0x000002A9, 0x030A }, /* R681 - Mic Detect Level 4 */
{ 0x000002C3, 0x0000 }, /* R707 - Mic noise mix control 1 */
{ 0x000002CB, 0x0000 }, /* R715 - Isolation control */
{ 0x000002D3, 0x0000 }, /* R723 - Jack detect analogue */
{ 0x00000300, 0x0000 }, /* R768 - Input Enables */
{ 0x00000308, 0x0000 }, /* R776 - Input Rate */
{ 0x00000309, 0x0022 }, /* R777 - Input Volume Ramp */
{ 0x00000310, 0x2080 }, /* R784 - IN1L Control */
{ 0x00000311, 0x0180 }, /* R785 - ADC Digital Volume 1L */
{ 0x00000312, 0x0000 }, /* R786 - DMIC1L Control */
{ 0x00000314, 0x0080 }, /* R788 - IN1R Control */
{ 0x00000315, 0x0180 }, /* R789 - ADC Digital Volume 1R */
{ 0x00000316, 0x0000 }, /* R790 - DMIC1R Control */
{ 0x00000318, 0x2080 }, /* R792 - IN2L Control */
{ 0x00000319, 0x0180 }, /* R793 - ADC Digital Volume 2L */
{ 0x0000031A, 0x0000 }, /* R794 - DMIC2L Control */
{ 0x0000031C, 0x0080 }, /* R796 - IN2R Control */
{ 0x0000031D, 0x0180 }, /* R797 - ADC Digital Volume 2R */
{ 0x0000031E, 0x0000 }, /* R798 - DMIC2R Control */
{ 0x00000320, 0x2080 }, /* R800 - IN3L Control */
{ 0x00000321, 0x0180 }, /* R801 - ADC Digital Volume 3L */
{ 0x00000322, 0x0000 }, /* R802 - DMIC3L Control */
{ 0x00000324, 0x0080 }, /* R804 - IN3R Control */
{ 0x00000325, 0x0180 }, /* R805 - ADC Digital Volume 3R */
{ 0x00000326, 0x0000 }, /* R806 - DMIC3R Control */
{ 0x00000400, 0x0000 }, /* R1024 - Output Enables 1 */
{ 0x00000408, 0x0000 }, /* R1032 - Output Rate 1 */
{ 0x00000409, 0x0022 }, /* R1033 - Output Volume Ramp */
{ 0x00000410, 0x6080 }, /* R1040 - Output Path Config 1L */
{ 0x00000411, 0x0180 }, /* R1041 - DAC Digital Volume 1L */
{ 0x00000412, 0x0081 }, /* R1042 - DAC Volume Limit 1L */
{ 0x00000413, 0x0001 }, /* R1043 - Noise Gate Select 1L */
{ 0x00000414, 0x0080 }, /* R1044 - Output Path Config 1R */
{ 0x00000415, 0x0180 }, /* R1045 - DAC Digital Volume 1R */
{ 0x00000416, 0x0081 }, /* R1046 - DAC Volume Limit 1R */
{ 0x00000417, 0x0002 }, /* R1047 - Noise Gate Select 1R */
{ 0x00000418, 0xA080 }, /* R1048 - Output Path Config 2L */
{ 0x00000419, 0x0180 }, /* R1049 - DAC Digital Volume 2L */
{ 0x0000041A, 0x0081 }, /* R1050 - DAC Volume Limit 2L */
{ 0x0000041B, 0x0004 }, /* R1051 - Noise Gate Select 2L */
{ 0x0000041C, 0x0080 }, /* R1052 - Output Path Config 2R */
{ 0x0000041D, 0x0180 }, /* R1053 - DAC Digital Volume 2R */
{ 0x0000041E, 0x0081 }, /* R1054 - DAC Volume Limit 2R */
{ 0x0000041F, 0x0008 }, /* R1055 - Noise Gate Select 2R */
{ 0x00000420, 0xA080 }, /* R1056 - Output Path Config 3L */
{ 0x00000421, 0x0180 }, /* R1057 - DAC Digital Volume 3L */
{ 0x00000422, 0x0081 }, /* R1058 - DAC Volume Limit 3L */
{ 0x00000423, 0x0010 }, /* R1059 - Noise Gate Select 3L */
{ 0x00000428, 0xE000 }, /* R1064 - Output Path Config 4L */
{ 0x00000429, 0x0180 }, /* R1065 - DAC Digital Volume 4L */
{ 0x0000042A, 0x0081 }, /* R1066 - Out Volume 4L */
{ 0x0000042B, 0x0040 }, /* R1067 - Noise Gate Select 4L */
{ 0x0000042D, 0x0180 }, /* R1069 - DAC Digital Volume 4R */
{ 0x0000042E, 0x0081 }, /* R1070 - Out Volume 4R */
{ 0x0000042F, 0x0080 }, /* R1071 - Noise Gate Select 4R */
{ 0x00000430, 0x0000 }, /* R1072 - Output Path Config 5L */
{ 0x00000431, 0x0180 }, /* R1073 - DAC Digital Volume 5L */
{ 0x00000432, 0x0081 }, /* R1074 - DAC Volume Limit 5L */
{ 0x00000433, 0x0100 }, /* R1075 - Noise Gate Select 5L */
{ 0x00000435, 0x0180 }, /* R1077 - DAC Digital Volume 5R */
{ 0x00000436, 0x0081 }, /* R1078 - DAC Volume Limit 5R */
{ 0x00000437, 0x0200 }, /* R1079 - Noise Gate Select 5R */
{ 0x00000440, 0x8FFF }, /* R1088 - DRE Enable */
{ 0x00000442, 0x3F0A }, /* R1090 - DRE Control 2 */
{ 0x00000443, 0xDC1F }, /* R1090 - DRE Control 3 */
{ 0x00000450, 0x0000 }, /* R1104 - DAC AEC Control 1 */
{ 0x00000458, 0x000B }, /* R1112 - Noise Gate Control */
{ 0x00000490, 0x0069 }, /* R1168 - PDM SPK1 CTRL 1 */
{ 0x00000491, 0x0000 }, /* R1169 - PDM SPK1 CTRL 2 */
{ 0x00000500, 0x000C }, /* R1280 - AIF1 BCLK Ctrl */
{ 0x00000501, 0x0008 }, /* R1281 - AIF1 Tx Pin Ctrl */
{ 0x00000502, 0x0000 }, /* R1282 - AIF1 Rx Pin Ctrl */
{ 0x00000503, 0x0000 }, /* R1283 - AIF1 Rate Ctrl */
{ 0x00000504, 0x0000 }, /* R1284 - AIF1 Format */
{ 0x00000505, 0x0040 }, /* R1285 - AIF1 Tx BCLK Rate */
{ 0x00000506, 0x0040 }, /* R1286 - AIF1 Rx BCLK Rate */
{ 0x00000507, 0x1818 }, /* R1287 - AIF1 Frame Ctrl 1 */
{ 0x00000508, 0x1818 }, /* R1288 - AIF1 Frame Ctrl 2 */
{ 0x00000509, 0x0000 }, /* R1289 - AIF1 Frame Ctrl 3 */
{ 0x0000050A, 0x0001 }, /* R1290 - AIF1 Frame Ctrl 4 */
{ 0x0000050B, 0x0002 }, /* R1291 - AIF1 Frame Ctrl 5 */
{ 0x0000050C, 0x0003 }, /* R1292 - AIF1 Frame Ctrl 6 */
{ 0x0000050D, 0x0004 }, /* R1293 - AIF1 Frame Ctrl 7 */
{ 0x0000050E, 0x0005 }, /* R1294 - AIF1 Frame Ctrl 8 */
{ 0x0000050F, 0x0006 }, /* R1295 - AIF1 Frame Ctrl 9 */
{ 0x00000510, 0x0007 }, /* R1296 - AIF1 Frame Ctrl 10 */
{ 0x00000511, 0x0000 }, /* R1297 - AIF1 Frame Ctrl 11 */
{ 0x00000512, 0x0001 }, /* R1298 - AIF1 Frame Ctrl 12 */
{ 0x00000513, 0x0002 }, /* R1299 - AIF1 Frame Ctrl 13 */
{ 0x00000514, 0x0003 }, /* R1300 - AIF1 Frame Ctrl 14 */
{ 0x00000515, 0x0004 }, /* R1301 - AIF1 Frame Ctrl 15 */
{ 0x00000516, 0x0005 }, /* R1302 - AIF1 Frame Ctrl 16 */
{ 0x00000517, 0x0006 }, /* R1303 - AIF1 Frame Ctrl 17 */
{ 0x00000518, 0x0007 }, /* R1304 - AIF1 Frame Ctrl 18 */
{ 0x00000519, 0x0000 }, /* R1305 - AIF1 Tx Enables */
{ 0x0000051A, 0x0000 }, /* R1306 - AIF1 Rx Enables */
{ 0x00000540, 0x000C }, /* R1344 - AIF2 BCLK Ctrl */
{ 0x00000541, 0x0008 }, /* R1345 - AIF2 Tx Pin Ctrl */
{ 0x00000542, 0x0000 }, /* R1346 - AIF2 Rx Pin Ctrl */
{ 0x00000543, 0x0000 }, /* R1347 - AIF2 Rate Ctrl */
{ 0x00000544, 0x0000 }, /* R1348 - AIF2 Format */
{ 0x00000545, 0x0040 }, /* R1349 - AIF2 Tx BCLK Rate */
{ 0x00000546, 0x0040 }, /* R1350 - AIF2 Rx BCLK Rate */
{ 0x00000547, 0x1818 }, /* R1351 - AIF2 Frame Ctrl 1 */
{ 0x00000548, 0x1818 }, /* R1352 - AIF2 Frame Ctrl 2 */
{ 0x00000549, 0x0000 }, /* R1353 - AIF2 Frame Ctrl 3 */
{ 0x0000054A, 0x0001 }, /* R1354 - AIF2 Frame Ctrl 4 */
{ 0x00000551, 0x0000 }, /* R1361 - AIF2 Frame Ctrl 11 */
{ 0x00000552, 0x0001 }, /* R1362 - AIF2 Frame Ctrl 12 */
{ 0x00000559, 0x0000 }, /* R1369 - AIF2 Tx Enables */
{ 0x0000055A, 0x0000 }, /* R1370 - AIF2 Rx Enables */
{ 0x00000580, 0x000C }, /* R1408 - AIF3 BCLK Ctrl */
{ 0x00000581, 0x0008 }, /* R1409 - AIF3 Tx Pin Ctrl */
{ 0x00000582, 0x0000 }, /* R1410 - AIF3 Rx Pin Ctrl */
{ 0x00000583, 0x0000 }, /* R1411 - AIF3 Rate Ctrl */
{ 0x00000584, 0x0000 }, /* R1412 - AIF3 Format */
{ 0x00000585, 0x0040 }, /* R1413 - AIF3 Tx BCLK Rate */
{ 0x00000586, 0x0040 }, /* R1414 - AIF3 Rx BCLK Rate */
{ 0x00000587, 0x1818 }, /* R1415 - AIF3 Frame Ctrl 1 */
{ 0x00000588, 0x1818 }, /* R1416 - AIF3 Frame Ctrl 2 */
{ 0x00000589, 0x0000 }, /* R1417 - AIF3 Frame Ctrl 3 */
{ 0x0000058A, 0x0001 }, /* R1418 - AIF3 Frame Ctrl 4 */
{ 0x00000591, 0x0000 }, /* R1425 - AIF3 Frame Ctrl 11 */
{ 0x00000592, 0x0001 }, /* R1426 - AIF3 Frame Ctrl 12 */
{ 0x00000599, 0x0000 }, /* R1433 - AIF3 Tx Enables */
{ 0x0000059A, 0x0000 }, /* R1434 - AIF3 Rx Enables */
{ 0x000005E3, 0x0004 }, /* R1507 - SLIMbus Framer Ref Gear */
{ 0x000005E5, 0x0000 }, /* R1509 - SLIMbus Rates 1 */
{ 0x000005E6, 0x0000 }, /* R1510 - SLIMbus Rates 2 */
{ 0x000005E7, 0x0000 }, /* R1511 - SLIMbus Rates 3 */
{ 0x000005E8, 0x0000 }, /* R1512 - SLIMbus Rates 4 */
{ 0x000005E9, 0x0000 }, /* R1513 - SLIMbus Rates 5 */
{ 0x000005EA, 0x0000 }, /* R1514 - SLIMbus Rates 6 */
{ 0x000005EB, 0x0000 }, /* R1515 - SLIMbus Rates 7 */
{ 0x000005EC, 0x0000 }, /* R1516 - SLIMbus Rates 8 */
{ 0x000005F5, 0x0000 }, /* R1525 - SLIMbus RX Channel Enable */
{ 0x000005F6, 0x0000 }, /* R1526 - SLIMbus TX Channel Enable */
{ 0x00000640, 0x0000 }, /* R1600 - PWM1MIX Input 1 Source */
{ 0x00000641, 0x0080 }, /* R1601 - PWM1MIX Input 1 Volume */
{ 0x00000642, 0x0000 }, /* R1602 - PWM1MIX Input 2 Source */
{ 0x00000643, 0x0080 }, /* R1603 - PWM1MIX Input 2 Volume */
{ 0x00000644, 0x0000 }, /* R1604 - PWM1MIX Input 3 Source */
{ 0x00000645, 0x0080 }, /* R1605 - PWM1MIX Input 3 Volume */
{ 0x00000646, 0x0000 }, /* R1606 - PWM1MIX Input 4 Source */
{ 0x00000647, 0x0080 }, /* R1607 - PWM1MIX Input 4 Volume */
{ 0x00000648, 0x0000 }, /* R1608 - PWM2MIX Input 1 Source */
{ 0x00000649, 0x0080 }, /* R1609 - PWM2MIX Input 1 Volume */
{ 0x0000064A, 0x0000 }, /* R1610 - PWM2MIX Input 2 Source */
{ 0x0000064B, 0x0080 }, /* R1611 - PWM2MIX Input 2 Volume */
{ 0x0000064C, 0x0000 }, /* R1612 - PWM2MIX Input 3 Source */
{ 0x0000064D, 0x0080 }, /* R1613 - PWM2MIX Input 3 Volume */
{ 0x0000064E, 0x0000 }, /* R1614 - PWM2MIX Input 4 Source */
{ 0x0000064F, 0x0080 }, /* R1615 - PWM2MIX Input 4 Volume */
{ 0x00000660, 0x0000 }, /* R1632 - MICMIX Input 1 Source */
{ 0x00000661, 0x0080 }, /* R1633 - MICMIX Input 1 Volume */
{ 0x00000662, 0x0000 }, /* R1634 - MICMIX Input 2 Source */
{ 0x00000663, 0x0080 }, /* R1635 - MICMIX Input 2 Volume */
{ 0x00000664, 0x0000 }, /* R1636 - MICMIX Input 3 Source */
{ 0x00000665, 0x0080 }, /* R1637 - MICMIX Input 3 Volume */
{ 0x00000666, 0x0000 }, /* R1638 - MICMIX Input 4 Source */
{ 0x00000667, 0x0080 }, /* R1639 - MICMIX Input 4 Volume */
{ 0x00000668, 0x0000 }, /* R1640 - NOISEMIX Input 1 Source */
{ 0x00000669, 0x0080 }, /* R1641 - NOISEMIX Input 1 Volume */
{ 0x0000066A, 0x0000 }, /* R1642 - NOISEMIX Input 2 Source */
{ 0x0000066B, 0x0080 }, /* R1643 - NOISEMIX Input 2 Volume */
{ 0x0000066C, 0x0000 }, /* R1644 - NOISEMIX Input 3 Source */
{ 0x0000066D, 0x0080 }, /* R1645 - NOISEMIX Input 3 Volume */
{ 0x0000066E, 0x0000 }, /* R1646 - NOISEMIX Input 4 Source */
{ 0x0000066F, 0x0080 }, /* R1647 - NOISEMIX Input 4 Volume */
{ 0x00000680, 0x0000 }, /* R1664 - OUT1LMIX Input 1 Source */
{ 0x00000681, 0x0080 }, /* R1665 - OUT1LMIX Input 1 Volume */
{ 0x00000682, 0x0000 }, /* R1666 - OUT1LMIX Input 2 Source */
{ 0x00000683, 0x0080 }, /* R1667 - OUT1LMIX Input 2 Volume */
{ 0x00000684, 0x0000 }, /* R1668 - OUT1LMIX Input 3 Source */
{ 0x00000685, 0x0080 }, /* R1669 - OUT1LMIX Input 3 Volume */
{ 0x00000686, 0x0000 }, /* R1670 - OUT1LMIX Input 4 Source */
{ 0x00000687, 0x0080 }, /* R1671 - OUT1LMIX Input 4 Volume */
{ 0x00000688, 0x0000 }, /* R1672 - OUT1RMIX Input 1 Source */
{ 0x00000689, 0x0080 }, /* R1673 - OUT1RMIX Input 1 Volume */
{ 0x0000068A, 0x0000 }, /* R1674 - OUT1RMIX Input 2 Source */
{ 0x0000068B, 0x0080 }, /* R1675 - OUT1RMIX Input 2 Volume */
{ 0x0000068C, 0x0000 }, /* R1676 - OUT1RMIX Input 3 Source */
{ 0x0000068D, 0x0080 }, /* R1677 - OUT1RMIX Input 3 Volume */
{ 0x0000068E, 0x0000 }, /* R1678 - OUT1RMIX Input 4 Source */
{ 0x0000068F, 0x0080 }, /* R1679 - OUT1RMIX Input 4 Volume */
{ 0x00000690, 0x0000 }, /* R1680 - OUT2LMIX Input 1 Source */
{ 0x00000691, 0x0080 }, /* R1681 - OUT2LMIX Input 1 Volume */
{ 0x00000692, 0x0000 }, /* R1682 - OUT2LMIX Input 2 Source */
{ 0x00000693, 0x0080 }, /* R1683 - OUT2LMIX Input 2 Volume */
{ 0x00000694, 0x0000 }, /* R1684 - OUT2LMIX Input 3 Source */
{ 0x00000695, 0x0080 }, /* R1685 - OUT2LMIX Input 3 Volume */
{ 0x00000696, 0x0000 }, /* R1686 - OUT2LMIX Input 4 Source */
{ 0x00000697, 0x0080 }, /* R1687 - OUT2LMIX Input 4 Volume */
{ 0x00000698, 0x0000 }, /* R1688 - OUT2RMIX Input 1 Source */
{ 0x00000699, 0x0080 }, /* R1689 - OUT2RMIX Input 1 Volume */
{ 0x0000069A, 0x0000 }, /* R1690 - OUT2RMIX Input 2 Source */
{ 0x0000069B, 0x0080 }, /* R1691 - OUT2RMIX Input 2 Volume */
{ 0x0000069C, 0x0000 }, /* R1692 - OUT2RMIX Input 3 Source */
{ 0x0000069D, 0x0080 }, /* R1693 - OUT2RMIX Input 3 Volume */
{ 0x0000069E, 0x0000 }, /* R1694 - OUT2RMIX Input 4 Source */
{ 0x0000069F, 0x0080 }, /* R1695 - OUT2RMIX Input 4 Volume */
{ 0x000006A0, 0x0000 }, /* R1696 - OUT3LMIX Input 1 Source */
{ 0x000006A1, 0x0080 }, /* R1697 - OUT3LMIX Input 1 Volume */
{ 0x000006A2, 0x0000 }, /* R1698 - OUT3LMIX Input 2 Source */
{ 0x000006A3, 0x0080 }, /* R1699 - OUT3LMIX Input 2 Volume */
{ 0x000006A4, 0x0000 }, /* R1700 - OUT3LMIX Input 3 Source */
{ 0x000006A5, 0x0080 }, /* R1701 - OUT3LMIX Input 3 Volume */
{ 0x000006A6, 0x0000 }, /* R1702 - OUT3LMIX Input 4 Source */
{ 0x000006A7, 0x0080 }, /* R1703 - OUT3LMIX Input 4 Volume */
{ 0x000006B0, 0x0000 }, /* R1712 - OUT4LMIX Input 1 Source */
{ 0x000006B1, 0x0080 }, /* R1713 - OUT4LMIX Input 1 Volume */
{ 0x000006B2, 0x0000 }, /* R1714 - OUT4LMIX Input 2 Source */
{ 0x000006B3, 0x0080 }, /* R1715 - OUT4LMIX Input 2 Volume */
{ 0x000006B4, 0x0000 }, /* R1716 - OUT4LMIX Input 3 Source */
{ 0x000006B5, 0x0080 }, /* R1717 - OUT4LMIX Input 3 Volume */
{ 0x000006B6, 0x0000 }, /* R1718 - OUT4LMIX Input 4 Source */
{ 0x000006B7, 0x0080 }, /* R1719 - OUT4LMIX Input 4 Volume */
{ 0x000006B8, 0x0000 }, /* R1720 - OUT4RMIX Input 1 Source */
{ 0x000006B9, 0x0080 }, /* R1721 - OUT4RMIX Input 1 Volume */
{ 0x000006BA, 0x0000 }, /* R1722 - OUT4RMIX Input 2 Source */
{ 0x000006BB, 0x0080 }, /* R1723 - OUT4RMIX Input 2 Volume */
{ 0x000006BC, 0x0000 }, /* R1724 - OUT4RMIX Input 3 Source */
{ 0x000006BD, 0x0080 }, /* R1725 - OUT4RMIX Input 3 Volume */
{ 0x000006BE, 0x0000 }, /* R1726 - OUT4RMIX Input 4 Source */
{ 0x000006BF, 0x0080 }, /* R1727 - OUT4RMIX Input 4 Volume */
{ 0x000006C0, 0x0000 }, /* R1728 - OUT5LMIX Input 1 Source */
{ 0x000006C1, 0x0080 }, /* R1729 - OUT5LMIX Input 1 Volume */
{ 0x000006C2, 0x0000 }, /* R1730 - OUT5LMIX Input 2 Source */
{ 0x000006C3, 0x0080 }, /* R1731 - OUT5LMIX Input 2 Volume */
{ 0x000006C4, 0x0000 }, /* R1732 - OUT5LMIX Input 3 Source */
{ 0x000006C5, 0x0080 }, /* R1733 - OUT5LMIX Input 3 Volume */
{ 0x000006C6, 0x0000 }, /* R1734 - OUT5LMIX Input 4 Source */
{ 0x000006C7, 0x0080 }, /* R1735 - OUT5LMIX Input 4 Volume */
{ 0x000006C8, 0x0000 }, /* R1736 - OUT5RMIX Input 1 Source */
{ 0x000006C9, 0x0080 }, /* R1737 - OUT5RMIX Input 1 Volume */
{ 0x000006CA, 0x0000 }, /* R1738 - OUT5RMIX Input 2 Source */
{ 0x000006CB, 0x0080 }, /* R1739 - OUT5RMIX Input 2 Volume */
{ 0x000006CC, 0x0000 }, /* R1740 - OUT5RMIX Input 3 Source */
{ 0x000006CD, 0x0080 }, /* R1741 - OUT5RMIX Input 3 Volume */
{ 0x000006CE, 0x0000 }, /* R1742 - OUT5RMIX Input 4 Source */
{ 0x000006CF, 0x0080 }, /* R1743 - OUT5RMIX Input 4 Volume */
{ 0x00000700, 0x0000 }, /* R1792 - AIF1TX1MIX Input 1 Source */
{ 0x00000701, 0x0080 }, /* R1793 - AIF1TX1MIX Input 1 Volume */
{ 0x00000702, 0x0000 }, /* R1794 - AIF1TX1MIX Input 2 Source */
{ 0x00000703, 0x0080 }, /* R1795 - AIF1TX1MIX Input 2 Volume */
{ 0x00000704, 0x0000 }, /* R1796 - AIF1TX1MIX Input 3 Source */
{ 0x00000705, 0x0080 }, /* R1797 - AIF1TX1MIX Input 3 Volume */
{ 0x00000706, 0x0000 }, /* R1798 - AIF1TX1MIX Input 4 Source */
{ 0x00000707, 0x0080 }, /* R1799 - AIF1TX1MIX Input 4 Volume */
{ 0x00000708, 0x0000 }, /* R1800 - AIF1TX2MIX Input 1 Source */
{ 0x00000709, 0x0080 }, /* R1801 - AIF1TX2MIX Input 1 Volume */
{ 0x0000070A, 0x0000 }, /* R1802 - AIF1TX2MIX Input 2 Source */
{ 0x0000070B, 0x0080 }, /* R1803 - AIF1TX2MIX Input 2 Volume */
{ 0x0000070C, 0x0000 }, /* R1804 - AIF1TX2MIX Input 3 Source */
{ 0x0000070D, 0x0080 }, /* R1805 - AIF1TX2MIX Input 3 Volume */
{ 0x0000070E, 0x0000 }, /* R1806 - AIF1TX2MIX Input 4 Source */
{ 0x0000070F, 0x0080 }, /* R1807 - AIF1TX2MIX Input 4 Volume */
{ 0x00000710, 0x0000 }, /* R1808 - AIF1TX3MIX Input 1 Source */
{ 0x00000711, 0x0080 }, /* R1809 - AIF1TX3MIX Input 1 Volume */
{ 0x00000712, 0x0000 }, /* R1810 - AIF1TX3MIX Input 2 Source */
{ 0x00000713, 0x0080 }, /* R1811 - AIF1TX3MIX Input 2 Volume */
{ 0x00000714, 0x0000 }, /* R1812 - AIF1TX3MIX Input 3 Source */
{ 0x00000715, 0x0080 }, /* R1813 - AIF1TX3MIX Input 3 Volume */
{ 0x00000716, 0x0000 }, /* R1814 - AIF1TX3MIX Input 4 Source */
{ 0x00000717, 0x0080 }, /* R1815 - AIF1TX3MIX Input 4 Volume */
{ 0x00000718, 0x0000 }, /* R1816 - AIF1TX4MIX Input 1 Source */
{ 0x00000719, 0x0080 }, /* R1817 - AIF1TX4MIX Input 1 Volume */
{ 0x0000071A, 0x0000 }, /* R1818 - AIF1TX4MIX Input 2 Source */
{ 0x0000071B, 0x0080 }, /* R1819 - AIF1TX4MIX Input 2 Volume */
{ 0x0000071C, 0x0000 }, /* R1820 - AIF1TX4MIX Input 3 Source */
{ 0x0000071D, 0x0080 }, /* R1821 - AIF1TX4MIX Input 3 Volume */
{ 0x0000071E, 0x0000 }, /* R1822 - AIF1TX4MIX Input 4 Source */
{ 0x0000071F, 0x0080 }, /* R1823 - AIF1TX4MIX Input 4 Volume */
{ 0x00000720, 0x0000 }, /* R1824 - AIF1TX5MIX Input 1 Source */
{ 0x00000721, 0x0080 }, /* R1825 - AIF1TX5MIX Input 1 Volume */
{ 0x00000722, 0x0000 }, /* R1826 - AIF1TX5MIX Input 2 Source */
{ 0x00000723, 0x0080 }, /* R1827 - AIF1TX5MIX Input 2 Volume */
{ 0x00000724, 0x0000 }, /* R1828 - AIF1TX5MIX Input 3 Source */
{ 0x00000725, 0x0080 }, /* R1829 - AIF1TX5MIX Input 3 Volume */
{ 0x00000726, 0x0000 }, /* R1830 - AIF1TX5MIX Input 4 Source */
{ 0x00000727, 0x0080 }, /* R1831 - AIF1TX5MIX Input 4 Volume */
{ 0x00000728, 0x0000 }, /* R1832 - AIF1TX6MIX Input 1 Source */
{ 0x00000729, 0x0080 }, /* R1833 - AIF1TX6MIX Input 1 Volume */
{ 0x0000072A, 0x0000 }, /* R1834 - AIF1TX6MIX Input 2 Source */
{ 0x0000072B, 0x0080 }, /* R1835 - AIF1TX6MIX Input 2 Volume */
{ 0x0000072C, 0x0000 }, /* R1836 - AIF1TX6MIX Input 3 Source */
{ 0x0000072D, 0x0080 }, /* R1837 - AIF1TX6MIX Input 3 Volume */
{ 0x0000072E, 0x0000 }, /* R1838 - AIF1TX6MIX Input 4 Source */
{ 0x0000072F, 0x0080 }, /* R1839 - AIF1TX6MIX Input 4 Volume */
{ 0x00000730, 0x0000 }, /* R1840 - AIF1TX7MIX Input 1 Source */
{ 0x00000731, 0x0080 }, /* R1841 - AIF1TX7MIX Input 1 Volume */
{ 0x00000732, 0x0000 }, /* R1842 - AIF1TX7MIX Input 2 Source */
{ 0x00000733, 0x0080 }, /* R1843 - AIF1TX7MIX Input 2 Volume */
{ 0x00000734, 0x0000 }, /* R1844 - AIF1TX7MIX Input 3 Source */
{ 0x00000735, 0x0080 }, /* R1845 - AIF1TX7MIX Input 3 Volume */
{ 0x00000736, 0x0000 }, /* R1846 - AIF1TX7MIX Input 4 Source */
{ 0x00000737, 0x0080 }, /* R1847 - AIF1TX7MIX Input 4 Volume */
{ 0x00000738, 0x0000 }, /* R1848 - AIF1TX8MIX Input 1 Source */
{ 0x00000739, 0x0080 }, /* R1849 - AIF1TX8MIX Input 1 Volume */
{ 0x0000073A, 0x0000 }, /* R1850 - AIF1TX8MIX Input 2 Source */
{ 0x0000073B, 0x0080 }, /* R1851 - AIF1TX8MIX Input 2 Volume */
{ 0x0000073C, 0x0000 }, /* R1852 - AIF1TX8MIX Input 3 Source */
{ 0x0000073D, 0x0080 }, /* R1853 - AIF1TX8MIX Input 3 Volume */
{ 0x0000073E, 0x0000 }, /* R1854 - AIF1TX8MIX Input 4 Source */
{ 0x0000073F, 0x0080 }, /* R1855 - AIF1TX8MIX Input 4 Volume */
{ 0x00000740, 0x0000 }, /* R1856 - AIF2TX1MIX Input 1 Source */
{ 0x00000741, 0x0080 }, /* R1857 - AIF2TX1MIX Input 1 Volume */
{ 0x00000742, 0x0000 }, /* R1858 - AIF2TX1MIX Input 2 Source */
{ 0x00000743, 0x0080 }, /* R1859 - AIF2TX1MIX Input 2 Volume */
{ 0x00000744, 0x0000 }, /* R1860 - AIF2TX1MIX Input 3 Source */
{ 0x00000745, 0x0080 }, /* R1861 - AIF2TX1MIX Input 3 Volume */
{ 0x00000746, 0x0000 }, /* R1862 - AIF2TX1MIX Input 4 Source */
{ 0x00000747, 0x0080 }, /* R1863 - AIF2TX1MIX Input 4 Volume */
{ 0x00000748, 0x0000 }, /* R1864 - AIF2TX2MIX Input 1 Source */
{ 0x00000749, 0x0080 }, /* R1865 - AIF2TX2MIX Input 1 Volume */
{ 0x0000074A, 0x0000 }, /* R1866 - AIF2TX2MIX Input 2 Source */
{ 0x0000074B, 0x0080 }, /* R1867 - AIF2TX2MIX Input 2 Volume */
{ 0x0000074C, 0x0000 }, /* R1868 - AIF2TX2MIX Input 3 Source */
{ 0x0000074D, 0x0080 }, /* R1869 - AIF2TX2MIX Input 3 Volume */
{ 0x0000074E, 0x0000 }, /* R1870 - AIF2TX2MIX Input 4 Source */
{ 0x0000074F, 0x0080 }, /* R1871 - AIF2TX2MIX Input 4 Volume */
{ 0x00000780, 0x0000 }, /* R1920 - AIF3TX1MIX Input 1 Source */
{ 0x00000781, 0x0080 }, /* R1921 - AIF3TX1MIX Input 1 Volume */
{ 0x00000782, 0x0000 }, /* R1922 - AIF3TX1MIX Input 2 Source */
{ 0x00000783, 0x0080 }, /* R1923 - AIF3TX1MIX Input 2 Volume */
{ 0x00000784, 0x0000 }, /* R1924 - AIF3TX1MIX Input 3 Source */
{ 0x00000785, 0x0080 }, /* R1925 - AIF3TX1MIX Input 3 Volume */
{ 0x00000786, 0x0000 }, /* R1926 - AIF3TX1MIX Input 4 Source */
{ 0x00000787, 0x0080 }, /* R1927 - AIF3TX1MIX Input 4 Volume */
{ 0x00000788, 0x0000 }, /* R1928 - AIF3TX2MIX Input 1 Source */
{ 0x00000789, 0x0080 }, /* R1929 - AIF3TX2MIX Input 1 Volume */
{ 0x0000078A, 0x0000 }, /* R1930 - AIF3TX2MIX Input 2 Source */
{ 0x0000078B, 0x0080 }, /* R1931 - AIF3TX2MIX Input 2 Volume */
{ 0x0000078C, 0x0000 }, /* R1932 - AIF3TX2MIX Input 3 Source */
{ 0x0000078D, 0x0080 }, /* R1933 - AIF3TX2MIX Input 3 Volume */
{ 0x0000078E, 0x0000 }, /* R1934 - AIF3TX2MIX Input 4 Source */
{ 0x0000078F, 0x0080 }, /* R1935 - AIF3TX2MIX Input 4 Volume */
{ 0x000007C0, 0x0000 }, /* R1984 - SLIMTX1MIX Input 1 Source */
{ 0x000007C1, 0x0080 }, /* R1985 - SLIMTX1MIX Input 1 Volume */
{ 0x000007C2, 0x0000 }, /* R1986 - SLIMTX1MIX Input 2 Source */
{ 0x000007C3, 0x0080 }, /* R1987 - SLIMTX1MIX Input 2 Volume */
{ 0x000007C4, 0x0000 }, /* R1988 - SLIMTX1MIX Input 3 Source */
{ 0x000007C5, 0x0080 }, /* R1989 - SLIMTX1MIX Input 3 Volume */
{ 0x000007C6, 0x0000 }, /* R1990 - SLIMTX1MIX Input 4 Source */
{ 0x000007C7, 0x0080 }, /* R1991 - SLIMTX1MIX Input 4 Volume */
{ 0x000007C8, 0x0000 }, /* R1992 - SLIMTX2MIX Input 1 Source */
{ 0x000007C9, 0x0080 }, /* R1993 - SLIMTX2MIX Input 1 Volume */
{ 0x000007CA, 0x0000 }, /* R1994 - SLIMTX2MIX Input 2 Source */
{ 0x000007CB, 0x0080 }, /* R1995 - SLIMTX2MIX Input 2 Volume */
{ 0x000007CC, 0x0000 }, /* R1996 - SLIMTX2MIX Input 3 Source */
{ 0x000007CD, 0x0080 }, /* R1997 - SLIMTX2MIX Input 3 Volume */
{ 0x000007CE, 0x0000 }, /* R1998 - SLIMTX2MIX Input 4 Source */
{ 0x000007CF, 0x0080 }, /* R1999 - SLIMTX2MIX Input 4 Volume */
{ 0x000007D0, 0x0000 }, /* R2000 - SLIMTX3MIX Input 1 Source */
{ 0x000007D1, 0x0080 }, /* R2001 - SLIMTX3MIX Input 1 Volume */
{ 0x000007D2, 0x0000 }, /* R2002 - SLIMTX3MIX Input 2 Source */
{ 0x000007D3, 0x0080 }, /* R2003 - SLIMTX3MIX Input 2 Volume */
{ 0x000007D4, 0x0000 }, /* R2004 - SLIMTX3MIX Input 3 Source */
{ 0x000007D5, 0x0080 }, /* R2005 - SLIMTX3MIX Input 3 Volume */
{ 0x000007D6, 0x0000 }, /* R2006 - SLIMTX3MIX Input 4 Source */
{ 0x000007D7, 0x0080 }, /* R2007 - SLIMTX3MIX Input 4 Volume */
{ 0x000007D8, 0x0000 }, /* R2008 - SLIMTX4MIX Input 1 Source */
{ 0x000007D9, 0x0080 }, /* R2009 - SLIMTX4MIX Input 1 Volume */
{ 0x000007DA, 0x0000 }, /* R2010 - SLIMTX4MIX Input 2 Source */
{ 0x000007DB, 0x0080 }, /* R2011 - SLIMTX4MIX Input 2 Volume */
{ 0x000007DC, 0x0000 }, /* R2012 - SLIMTX4MIX Input 3 Source */
{ 0x000007DD, 0x0080 }, /* R2013 - SLIMTX4MIX Input 3 Volume */
{ 0x000007DE, 0x0000 }, /* R2014 - SLIMTX4MIX Input 4 Source */
{ 0x000007DF, 0x0080 }, /* R2015 - SLIMTX4MIX Input 4 Volume */
{ 0x000007E0, 0x0000 }, /* R2016 - SLIMTX5MIX Input 1 Source */
{ 0x000007E1, 0x0080 }, /* R2017 - SLIMTX5MIX Input 1 Volume */
{ 0x000007E2, 0x0000 }, /* R2018 - SLIMTX5MIX Input 2 Source */
{ 0x000007E3, 0x0080 }, /* R2019 - SLIMTX5MIX Input 2 Volume */
{ 0x000007E4, 0x0000 }, /* R2020 - SLIMTX5MIX Input 3 Source */
{ 0x000007E5, 0x0080 }, /* R2021 - SLIMTX5MIX Input 3 Volume */
{ 0x000007E6, 0x0000 }, /* R2022 - SLIMTX5MIX Input 4 Source */
{ 0x000007E7, 0x0080 }, /* R2023 - SLIMTX5MIX Input 4 Volume */
{ 0x000007E8, 0x0000 }, /* R2024 - SLIMTX6MIX Input 1 Source */
{ 0x000007E9, 0x0080 }, /* R2025 - SLIMTX6MIX Input 1 Volume */
{ 0x000007EA, 0x0000 }, /* R2026 - SLIMTX6MIX Input 2 Source */
{ 0x000007EB, 0x0080 }, /* R2027 - SLIMTX6MIX Input 2 Volume */
{ 0x000007EC, 0x0000 }, /* R2028 - SLIMTX6MIX Input 3 Source */
{ 0x000007ED, 0x0080 }, /* R2029 - SLIMTX6MIX Input 3 Volume */
{ 0x000007EE, 0x0000 }, /* R2030 - SLIMTX6MIX Input 4 Source */
{ 0x000007EF, 0x0080 }, /* R2031 - SLIMTX6MIX Input 4 Volume */
{ 0x000007F0, 0x0000 }, /* R2032 - SLIMTX7MIX Input 1 Source */
{ 0x000007F1, 0x0080 }, /* R2033 - SLIMTX7MIX Input 1 Volume */
{ 0x000007F2, 0x0000 }, /* R2034 - SLIMTX7MIX Input 2 Source */
{ 0x000007F3, 0x0080 }, /* R2035 - SLIMTX7MIX Input 2 Volume */
{ 0x000007F4, 0x0000 }, /* R2036 - SLIMTX7MIX Input 3 Source */
{ 0x000007F5, 0x0080 }, /* R2037 - SLIMTX7MIX Input 3 Volume */
{ 0x000007F6, 0x0000 }, /* R2038 - SLIMTX7MIX Input 4 Source */
{ 0x000007F7, 0x0080 }, /* R2039 - SLIMTX7MIX Input 4 Volume */
{ 0x000007F8, 0x0000 }, /* R2040 - SLIMTX8MIX Input 1 Source */
{ 0x000007F9, 0x0080 }, /* R2041 - SLIMTX8MIX Input 1 Volume */
{ 0x000007FA, 0x0000 }, /* R2042 - SLIMTX8MIX Input 2 Source */
{ 0x000007FB, 0x0080 }, /* R2043 - SLIMTX8MIX Input 2 Volume */
{ 0x000007FC, 0x0000 }, /* R2044 - SLIMTX8MIX Input 3 Source */
{ 0x000007FD, 0x0080 }, /* R2045 - SLIMTX8MIX Input 3 Volume */
{ 0x000007FE, 0x0000 }, /* R2046 - SLIMTX8MIX Input 4 Source */
{ 0x000007FF, 0x0080 }, /* R2047 - SLIMTX8MIX Input 4 Volume */
{ 0x00000880, 0x0000 }, /* R2176 - EQ1MIX Input 1 Source */
{ 0x00000881, 0x0080 }, /* R2177 - EQ1MIX Input 1 Volume */
{ 0x00000882, 0x0000 }, /* R2178 - EQ1MIX Input 2 Source */
{ 0x00000883, 0x0080 }, /* R2179 - EQ1MIX Input 2 Volume */
{ 0x00000884, 0x0000 }, /* R2180 - EQ1MIX Input 3 Source */
{ 0x00000885, 0x0080 }, /* R2181 - EQ1MIX Input 3 Volume */
{ 0x00000886, 0x0000 }, /* R2182 - EQ1MIX Input 4 Source */
{ 0x00000887, 0x0080 }, /* R2183 - EQ1MIX Input 4 Volume */
{ 0x00000888, 0x0000 }, /* R2184 - EQ2MIX Input 1 Source */
{ 0x00000889, 0x0080 }, /* R2185 - EQ2MIX Input 1 Volume */
{ 0x0000088A, 0x0000 }, /* R2186 - EQ2MIX Input 2 Source */
{ 0x0000088B, 0x0080 }, /* R2187 - EQ2MIX Input 2 Volume */
{ 0x0000088C, 0x0000 }, /* R2188 - EQ2MIX Input 3 Source */
{ 0x0000088D, 0x0080 }, /* R2189 - EQ2MIX Input 3 Volume */
{ 0x0000088E, 0x0000 }, /* R2190 - EQ2MIX Input 4 Source */
{ 0x0000088F, 0x0080 }, /* R2191 - EQ2MIX Input 4 Volume */
{ 0x00000890, 0x0000 }, /* R2192 - EQ3MIX Input 1 Source */
{ 0x00000891, 0x0080 }, /* R2193 - EQ3MIX Input 1 Volume */
{ 0x00000892, 0x0000 }, /* R2194 - EQ3MIX Input 2 Source */
{ 0x00000893, 0x0080 }, /* R2195 - EQ3MIX Input 2 Volume */
{ 0x00000894, 0x0000 }, /* R2196 - EQ3MIX Input 3 Source */
{ 0x00000895, 0x0080 }, /* R2197 - EQ3MIX Input 3 Volume */
{ 0x00000896, 0x0000 }, /* R2198 - EQ3MIX Input 4 Source */
{ 0x00000897, 0x0080 }, /* R2199 - EQ3MIX Input 4 Volume */
{ 0x00000898, 0x0000 }, /* R2200 - EQ4MIX Input 1 Source */
{ 0x00000899, 0x0080 }, /* R2201 - EQ4MIX Input 1 Volume */
{ 0x0000089A, 0x0000 }, /* R2202 - EQ4MIX Input 2 Source */
{ 0x0000089B, 0x0080 }, /* R2203 - EQ4MIX Input 2 Volume */
{ 0x0000089C, 0x0000 }, /* R2204 - EQ4MIX Input 3 Source */
{ 0x0000089D, 0x0080 }, /* R2205 - EQ4MIX Input 3 Volume */
{ 0x0000089E, 0x0000 }, /* R2206 - EQ4MIX Input 4 Source */
{ 0x0000089F, 0x0080 }, /* R2207 - EQ4MIX Input 4 Volume */
{ 0x000008C0, 0x0000 }, /* R2240 - DRC1LMIX Input 1 Source */
{ 0x000008C1, 0x0080 }, /* R2241 - DRC1LMIX Input 1 Volume */
{ 0x000008C2, 0x0000 }, /* R2242 - DRC1LMIX Input 2 Source */
{ 0x000008C3, 0x0080 }, /* R2243 - DRC1LMIX Input 2 Volume */
{ 0x000008C4, 0x0000 }, /* R2244 - DRC1LMIX Input 3 Source */
{ 0x000008C5, 0x0080 }, /* R2245 - DRC1LMIX Input 3 Volume */
{ 0x000008C6, 0x0000 }, /* R2246 - DRC1LMIX Input 4 Source */
{ 0x000008C7, 0x0080 }, /* R2247 - DRC1LMIX Input 4 Volume */
{ 0x000008C8, 0x0000 }, /* R2248 - DRC1RMIX Input 1 Source */
{ 0x000008C9, 0x0080 }, /* R2249 - DRC1RMIX Input 1 Volume */
{ 0x000008CA, 0x0000 }, /* R2250 - DRC1RMIX Input 2 Source */
{ 0x000008CB, 0x0080 }, /* R2251 - DRC1RMIX Input 2 Volume */
{ 0x000008CC, 0x0000 }, /* R2252 - DRC1RMIX Input 3 Source */
{ 0x000008CD, 0x0080 }, /* R2253 - DRC1RMIX Input 3 Volume */
{ 0x000008CE, 0x0000 }, /* R2254 - DRC1RMIX Input 4 Source */
{ 0x000008CF, 0x0080 }, /* R2255 - DRC1RMIX Input 4 Volume */
{ 0x00000900, 0x0000 }, /* R2304 - HPLP1MIX Input 1 Source */
{ 0x00000901, 0x0080 }, /* R2305 - HPLP1MIX Input 1 Volume */
{ 0x00000902, 0x0000 }, /* R2306 - HPLP1MIX Input 2 Source */
{ 0x00000903, 0x0080 }, /* R2307 - HPLP1MIX Input 2 Volume */
{ 0x00000904, 0x0000 }, /* R2308 - HPLP1MIX Input 3 Source */
{ 0x00000905, 0x0080 }, /* R2309 - HPLP1MIX Input 3 Volume */
{ 0x00000906, 0x0000 }, /* R2310 - HPLP1MIX Input 4 Source */
{ 0x00000907, 0x0080 }, /* R2311 - HPLP1MIX Input 4 Volume */
{ 0x00000908, 0x0000 }, /* R2312 - HPLP2MIX Input 1 Source */
{ 0x00000909, 0x0080 }, /* R2313 - HPLP2MIX Input 1 Volume */
{ 0x0000090A, 0x0000 }, /* R2314 - HPLP2MIX Input 2 Source */
{ 0x0000090B, 0x0080 }, /* R2315 - HPLP2MIX Input 2 Volume */
{ 0x0000090C, 0x0000 }, /* R2316 - HPLP2MIX Input 3 Source */
{ 0x0000090D, 0x0080 }, /* R2317 - HPLP2MIX Input 3 Volume */
{ 0x0000090E, 0x0000 }, /* R2318 - HPLP2MIX Input 4 Source */
{ 0x0000090F, 0x0080 }, /* R2319 - HPLP2MIX Input 4 Volume */
{ 0x00000910, 0x0000 }, /* R2320 - HPLP3MIX Input 1 Source */
{ 0x00000911, 0x0080 }, /* R2321 - HPLP3MIX Input 1 Volume */
{ 0x00000912, 0x0000 }, /* R2322 - HPLP3MIX Input 2 Source */
{ 0x00000913, 0x0080 }, /* R2323 - HPLP3MIX Input 2 Volume */
{ 0x00000914, 0x0000 }, /* R2324 - HPLP3MIX Input 3 Source */
{ 0x00000915, 0x0080 }, /* R2325 - HPLP3MIX Input 3 Volume */
{ 0x00000916, 0x0000 }, /* R2326 - HPLP3MIX Input 4 Source */
{ 0x00000917, 0x0080 }, /* R2327 - HPLP3MIX Input 4 Volume */
{ 0x00000918, 0x0000 }, /* R2328 - HPLP4MIX Input 1 Source */
{ 0x00000919, 0x0080 }, /* R2329 - HPLP4MIX Input 1 Volume */
{ 0x0000091A, 0x0000 }, /* R2330 - HPLP4MIX Input 2 Source */
{ 0x0000091B, 0x0080 }, /* R2331 - HPLP4MIX Input 2 Volume */
{ 0x0000091C, 0x0000 }, /* R2332 - HPLP4MIX Input 3 Source */
{ 0x0000091D, 0x0080 }, /* R2333 - HPLP4MIX Input 3 Volume */
{ 0x0000091E, 0x0000 }, /* R2334 - HPLP4MIX Input 4 Source */
{ 0x0000091F, 0x0080 }, /* R2335 - HPLP4MIX Input 4 Volume */
{ 0x00000940, 0x0000 }, /* R2368 - DSP1LMIX Input 1 Source */
{ 0x00000941, 0x0080 }, /* R2369 - DSP1LMIX Input 1 Volume */
{ 0x00000942, 0x0000 }, /* R2370 - DSP1LMIX Input 2 Source */
{ 0x00000943, 0x0080 }, /* R2371 - DSP1LMIX Input 2 Volume */
{ 0x00000944, 0x0000 }, /* R2372 - DSP1LMIX Input 3 Source */
{ 0x00000945, 0x0080 }, /* R2373 - DSP1LMIX Input 3 Volume */
{ 0x00000946, 0x0000 }, /* R2374 - DSP1LMIX Input 4 Source */
{ 0x00000947, 0x0080 }, /* R2375 - DSP1LMIX Input 4 Volume */
{ 0x00000948, 0x0000 }, /* R2376 - DSP1RMIX Input 1 Source */
{ 0x00000949, 0x0080 }, /* R2377 - DSP1RMIX Input 1 Volume */
{ 0x0000094A, 0x0000 }, /* R2378 - DSP1RMIX Input 2 Source */
{ 0x0000094B, 0x0080 }, /* R2379 - DSP1RMIX Input 2 Volume */
{ 0x0000094C, 0x0000 }, /* R2380 - DSP1RMIX Input 3 Source */
{ 0x0000094D, 0x0080 }, /* R2381 - DSP1RMIX Input 3 Volume */
{ 0x0000094E, 0x0000 }, /* R2382 - DSP1RMIX Input 4 Source */
{ 0x0000094F, 0x0080 }, /* R2383 - DSP1RMIX Input 4 Volume */
{ 0x00000950, 0x0000 }, /* R2384 - DSP1AUX1MIX Input 1 Source */
{ 0x00000958, 0x0000 }, /* R2392 - DSP1AUX2MIX Input 1 Source */
{ 0x00000960, 0x0000 }, /* R2400 - DSP1AUX3MIX Input 1 Source */
{ 0x00000968, 0x0000 }, /* R2408 - DSP1AUX4MIX Input 1 Source */
{ 0x00000970, 0x0000 }, /* R2416 - DSP1AUX5MIX Input 1 Source */
{ 0x00000978, 0x0000 }, /* R2424 - DSP1AUX6MIX Input 1 Source */
{ 0x00000A80, 0x0000 }, /* R2688 - ASRC1LMIX Input 1 Source */
{ 0x00000A88, 0x0000 }, /* R2696 - ASRC1RMIX Input 1 Source */
{ 0x00000A90, 0x0000 }, /* R2704 - ASRC2LMIX Input 1 Source */
{ 0x00000A98, 0x0000 }, /* R2712 - ASRC2RMIX Input 1 Source */
{ 0x00000B00, 0x0000 }, /* R2816 - ISRC1DEC1MIX Input 1 Source */
{ 0x00000B08, 0x0000 }, /* R2824 - ISRC1DEC2MIX Input 1 Source */
{ 0x00000B20, 0x0000 }, /* R2848 - ISRC1INT1MIX Input 1 Source */
{ 0x00000B28, 0x0000 }, /* R2856 - ISRC1INT2MIX Input 1 Source */
{ 0x00000B40, 0x0000 }, /* R2880 - ISRC2DEC1MIX Input 1 Source */
{ 0x00000B48, 0x0000 }, /* R2888 - ISRC2DEC2MIX Input 1 Source */
{ 0x00000B60, 0x0000 }, /* R2912 - ISRC2INT1MIX Input 1 Source */
{ 0x00000B68, 0x0000 }, /* R2920 - ISRC2INT2MIX Input 1 Source */
{ 0x00000C00, 0xA101 }, /* R3072 - GPIO1 CTRL */
{ 0x00000C01, 0xA101 }, /* R3073 - GPIO2 CTRL */
{ 0x00000C02, 0xA101 }, /* R3074 - GPIO3 CTRL */
{ 0x00000C03, 0xA101 }, /* R3075 - GPIO4 CTRL */
{ 0x00000C04, 0xA101 }, /* R3076 - GPIO5 CTRL */
{ 0x00000C0F, 0x0400 }, /* R3087 - IRQ CTRL 1 */
{ 0x00000C10, 0x1000 }, /* R3088 - GPIO Debounce Config */
{ 0x00000C20, 0x8002 }, /* R3104 - Misc Pad Ctrl 1 */
{ 0x00000C21, 0x8001 }, /* R3105 - Misc Pad Ctrl 2 */
{ 0x00000C22, 0x0000 }, /* R3106 - Misc Pad Ctrl 3 */
{ 0x00000C23, 0x0000 }, /* R3107 - Misc Pad Ctrl 4 */
{ 0x00000C24, 0x0000 }, /* R3108 - Misc Pad Ctrl 5 */
{ 0x00000C25, 0x0000 }, /* R3109 - Misc Pad Ctrl 6 */
{ 0x00000D08, 0xFFFF }, /* R3336 - Interrupt Status 1 Mask */
{ 0x00000D09, 0xFFFF }, /* R3337 - Interrupt Status 2 Mask */
{ 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
{ 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
{ 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
{ 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */
{ 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */
{ 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */
{ 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */
{ 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
{ 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
{ 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
{ 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
{ 0x00000D56, 0x0000 }, /* R3414 - Jack detect debounce */
{ 0x00000E00, 0x0000 }, /* R3584 - FX_Ctrl1 */
{ 0x00000E01, 0x0000 }, /* R3585 - FX_Ctrl2 */
{ 0x00000E10, 0x6318 }, /* R3600 - EQ1_1 */
{ 0x00000E11, 0x6300 }, /* R3601 - EQ1_2 */
{ 0x00000E12, 0x0FC8 }, /* R3602 - EQ1_3 */
{ 0x00000E13, 0x03FE }, /* R3603 - EQ1_4 */
{ 0x00000E14, 0x00E0 }, /* R3604 - EQ1_5 */
{ 0x00000E15, 0x1EC4 }, /* R3605 - EQ1_6 */
{ 0x00000E16, 0xF136 }, /* R3606 - EQ1_7 */
{ 0x00000E17, 0x0409 }, /* R3607 - EQ1_8 */
{ 0x00000E18, 0x04CC }, /* R3608 - EQ1_9 */
{ 0x00000E19, 0x1C9B }, /* R3609 - EQ1_10 */
{ 0x00000E1A, 0xF337 }, /* R3610 - EQ1_11 */
{ 0x00000E1B, 0x040B }, /* R3611 - EQ1_12 */
{ 0x00000E1C, 0x0CBB }, /* R3612 - EQ1_13 */
{ 0x00000E1D, 0x16F8 }, /* R3613 - EQ1_14 */
{ 0x00000E1E, 0xF7D9 }, /* R3614 - EQ1_15 */
{ 0x00000E1F, 0x040A }, /* R3615 - EQ1_16 */
{ 0x00000E20, 0x1F14 }, /* R3616 - EQ1_17 */
{ 0x00000E21, 0x058C }, /* R3617 - EQ1_18 */
{ 0x00000E22, 0x0563 }, /* R3618 - EQ1_19 */
{ 0x00000E23, 0x4000 }, /* R3619 - EQ1_20 */
{ 0x00000E24, 0x0B75 }, /* R3620 - EQ1_21 */
{ 0x00000E26, 0x6318 }, /* R3622 - EQ2_1 */
{ 0x00000E27, 0x6300 }, /* R3623 - EQ2_2 */
{ 0x00000E28, 0x0FC8 }, /* R3624 - EQ2_3 */
{ 0x00000E29, 0x03FE }, /* R3625 - EQ2_4 */
{ 0x00000E2A, 0x00E0 }, /* R3626 - EQ2_5 */
{ 0x00000E2B, 0x1EC4 }, /* R3627 - EQ2_6 */
{ 0x00000E2C, 0xF136 }, /* R3628 - EQ2_7 */
{ 0x00000E2D, 0x0409 }, /* R3629 - EQ2_8 */
{ 0x00000E2E, 0x04CC }, /* R3630 - EQ2_9 */
{ 0x00000E2F, 0x1C9B }, /* R3631 - EQ2_10 */
{ 0x00000E30, 0xF337 }, /* R3632 - EQ2_11 */
{ 0x00000E31, 0x040B }, /* R3633 - EQ2_12 */
{ 0x00000E32, 0x0CBB }, /* R3634 - EQ2_13 */
{ 0x00000E33, 0x16F8 }, /* R3635 - EQ2_14 */
{ 0x00000E34, 0xF7D9 }, /* R3636 - EQ2_15 */
{ 0x00000E35, 0x040A }, /* R3637 - EQ2_16 */
{ 0x00000E36, 0x1F14 }, /* R3638 - EQ2_17 */
{ 0x00000E37, 0x058C }, /* R3639 - EQ2_18 */
{ 0x00000E38, 0x0563 }, /* R3640 - EQ2_19 */
{ 0x00000E39, 0x4000 }, /* R3641 - EQ2_20 */
{ 0x00000E3A, 0x0B75 }, /* R3642 - EQ2_21 */
{ 0x00000E3C, 0x6318 }, /* R3644 - EQ3_1 */
{ 0x00000E3D, 0x6300 }, /* R3645 - EQ3_2 */
{ 0x00000E3E, 0x0FC8 }, /* R3646 - EQ3_3 */
{ 0x00000E3F, 0x03FE }, /* R3647 - EQ3_4 */
{ 0x00000E40, 0x00E0 }, /* R3648 - EQ3_5 */
{ 0x00000E41, 0x1EC4 }, /* R3649 - EQ3_6 */
{ 0x00000E42, 0xF136 }, /* R3650 - EQ3_7 */
{ 0x00000E43, 0x0409 }, /* R3651 - EQ3_8 */
{ 0x00000E44, 0x04CC }, /* R3652 - EQ3_9 */
{ 0x00000E45, 0x1C9B }, /* R3653 - EQ3_10 */
{ 0x00000E46, 0xF337 }, /* R3654 - EQ3_11 */
{ 0x00000E47, 0x040B }, /* R3655 - EQ3_12 */
{ 0x00000E48, 0x0CBB }, /* R3656 - EQ3_13 */
{ 0x00000E49, 0x16F8 }, /* R3657 - EQ3_14 */
{ 0x00000E4A, 0xF7D9 }, /* R3658 - EQ3_15 */
{ 0x00000E4B, 0x040A }, /* R3659 - EQ3_16 */
{ 0x00000E4C, 0x1F14 }, /* R3660 - EQ3_17 */
{ 0x00000E4D, 0x058C }, /* R3661 - EQ3_18 */
{ 0x00000E4E, 0x0563 }, /* R3662 - EQ3_19 */
{ 0x00000E4F, 0x4000 }, /* R3663 - EQ3_20 */
{ 0x00000E50, 0x0B75 }, /* R3664 - EQ3_21 */
{ 0x00000E52, 0x6318 }, /* R3666 - EQ4_1 */
{ 0x00000E53, 0x6300 }, /* R3667 - EQ4_2 */
{ 0x00000E54, 0x0FC8 }, /* R3668 - EQ4_3 */
{ 0x00000E55, 0x03FE }, /* R3669 - EQ4_4 */
{ 0x00000E56, 0x00E0 }, /* R3670 - EQ4_5 */
{ 0x00000E57, 0x1EC4 }, /* R3671 - EQ4_6 */
{ 0x00000E58, 0xF136 }, /* R3672 - EQ4_7 */
{ 0x00000E59, 0x0409 }, /* R3673 - EQ4_8 */
{ 0x00000E5A, 0x04CC }, /* R3674 - EQ4_9 */
{ 0x00000E5B, 0x1C9B }, /* R3675 - EQ4_10 */
{ 0x00000E5C, 0xF337 }, /* R3676 - EQ4_11 */
{ 0x00000E5D, 0x040B }, /* R3677 - EQ4_12 */
{ 0x00000E5E, 0x0CBB }, /* R3678 - EQ4_13 */
{ 0x00000E5F, 0x16F8 }, /* R3679 - EQ4_14 */
{ 0x00000E60, 0xF7D9 }, /* R3680 - EQ4_15 */
{ 0x00000E61, 0x040A }, /* R3681 - EQ4_16 */
{ 0x00000E62, 0x1F14 }, /* R3682 - EQ4_17 */
{ 0x00000E63, 0x058C }, /* R3683 - EQ4_18 */
{ 0x00000E64, 0x0563 }, /* R3684 - EQ4_19 */
{ 0x00000E65, 0x4000 }, /* R3685 - EQ4_20 */
{ 0x00000E66, 0x0B75 }, /* R3686 - EQ4_21 */
{ 0x00000E80, 0x0018 }, /* R3712 - DRC1 ctrl1 */
{ 0x00000E81, 0x0933 }, /* R3713 - DRC1 ctrl2 */
{ 0x00000E82, 0x0018 }, /* R3714 - DRC1 ctrl3 */
{ 0x00000E83, 0x0000 }, /* R3715 - DRC1 ctrl4 */
{ 0x00000E84, 0x0000 }, /* R3716 - DRC1 ctrl5 */
{ 0x00000EC0, 0x0000 }, /* R3776 - HPLPF1_1 */
{ 0x00000EC1, 0x0000 }, /* R3777 - HPLPF1_2 */
{ 0x00000EC4, 0x0000 }, /* R3780 - HPLPF2_1 */
{ 0x00000EC5, 0x0000 }, /* R3781 - HPLPF2_2 */
{ 0x00000EC8, 0x0000 }, /* R3784 - HPLPF3_1 */
{ 0x00000EC9, 0x0000 }, /* R3785 - HPLPF3_2 */
{ 0x00000ECC, 0x0000 }, /* R3788 - HPLPF4_1 */
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
{ 0x00000EF3, 0x0000 }, /* R3827 - ISRC 2 CTRL 1 */
{ 0x00000EF4, 0x0000 }, /* R3828 - ISRC 2 CTRL 2 */
{ 0x00000EF5, 0x0000 }, /* R3829 - ISRC 2 CTRL 3 */
{ 0x00001100, 0x0010 }, /* R4352 - DSP1 Control 1 */
{ 0x00001101, 0x0000 }, /* R4353 - DSP1 Clocking 1 */
};
static bool wm5102_readable_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case ARIZONA_SOFTWARE_RESET:
case ARIZONA_DEVICE_REVISION:
case ARIZONA_CTRL_IF_SPI_CFG_1:
case ARIZONA_CTRL_IF_I2C1_CFG_1:
case ARIZONA_CTRL_IF_STATUS_1:
case ARIZONA_WRITE_SEQUENCER_CTRL_0:
case ARIZONA_WRITE_SEQUENCER_CTRL_1:
case ARIZONA_WRITE_SEQUENCER_CTRL_2:
case ARIZONA_WRITE_SEQUENCER_PROM:
case ARIZONA_TONE_GENERATOR_1:
case ARIZONA_TONE_GENERATOR_2:
case ARIZONA_TONE_GENERATOR_3:
case ARIZONA_TONE_GENERATOR_4:
case ARIZONA_TONE_GENERATOR_5:
case ARIZONA_PWM_DRIVE_1:
case ARIZONA_PWM_DRIVE_2:
case ARIZONA_PWM_DRIVE_3:
case ARIZONA_WAKE_CONTROL:
case ARIZONA_SEQUENCE_CONTROL:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_1:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_2:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_3:
case ARIZONA_SAMPLE_RATE_SEQUENCE_SELECT_4:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_3:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_4:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_5:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_6:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_7:
case ARIZONA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_8:
case ARIZONA_COMFORT_NOISE_GENERATOR:
case ARIZONA_HAPTICS_CONTROL_1:
case ARIZONA_HAPTICS_CONTROL_2:
case ARIZONA_HAPTICS_PHASE_1_INTENSITY:
case ARIZONA_HAPTICS_PHASE_1_DURATION:
case ARIZONA_HAPTICS_PHASE_2_INTENSITY:
case ARIZONA_HAPTICS_PHASE_2_DURATION:
case ARIZONA_HAPTICS_PHASE_3_INTENSITY:
case ARIZONA_HAPTICS_PHASE_3_DURATION:
case ARIZONA_HAPTICS_STATUS:
case ARIZONA_CLOCK_32K_1:
case ARIZONA_SYSTEM_CLOCK_1:
case ARIZONA_SAMPLE_RATE_1:
case ARIZONA_SAMPLE_RATE_2:
case ARIZONA_SAMPLE_RATE_3:
case ARIZONA_SAMPLE_RATE_1_STATUS:
case ARIZONA_SAMPLE_RATE_2_STATUS:
case ARIZONA_SAMPLE_RATE_3_STATUS:
case ARIZONA_ASYNC_CLOCK_1:
case ARIZONA_ASYNC_SAMPLE_RATE_1:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_2:
case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
case ARIZONA_OUTPUT_SYSTEM_CLOCK:
case ARIZONA_OUTPUT_ASYNC_CLOCK:
case ARIZONA_RATE_ESTIMATOR_1:
case ARIZONA_RATE_ESTIMATOR_2:
case ARIZONA_RATE_ESTIMATOR_3:
case ARIZONA_RATE_ESTIMATOR_4:
case ARIZONA_RATE_ESTIMATOR_5:
case ARIZONA_DYNAMIC_FREQUENCY_SCALING_1:
case ARIZONA_FLL1_CONTROL_1:
case ARIZONA_FLL1_CONTROL_2:
case ARIZONA_FLL1_CONTROL_3:
case ARIZONA_FLL1_CONTROL_4:
case ARIZONA_FLL1_CONTROL_5:
case ARIZONA_FLL1_CONTROL_6:
case ARIZONA_FLL1_LOOP_FILTER_TEST_1:
case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL1_CONTROL_7:
case ARIZONA_FLL1_SYNCHRONISER_1:
case ARIZONA_FLL1_SYNCHRONISER_2:
case ARIZONA_FLL1_SYNCHRONISER_3:
case ARIZONA_FLL1_SYNCHRONISER_4:
case ARIZONA_FLL1_SYNCHRONISER_5:
case ARIZONA_FLL1_SYNCHRONISER_6:
case ARIZONA_FLL1_SYNCHRONISER_7:
case ARIZONA_FLL1_SPREAD_SPECTRUM:
case ARIZONA_FLL1_GPIO_CLOCK:
case ARIZONA_FLL2_CONTROL_1:
case ARIZONA_FLL2_CONTROL_2:
case ARIZONA_FLL2_CONTROL_3:
case ARIZONA_FLL2_CONTROL_4:
case ARIZONA_FLL2_CONTROL_5:
case ARIZONA_FLL2_CONTROL_6:
case ARIZONA_FLL2_LOOP_FILTER_TEST_1:
case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_FLL2_CONTROL_7:
case ARIZONA_FLL2_SYNCHRONISER_1:
case ARIZONA_FLL2_SYNCHRONISER_2:
case ARIZONA_FLL2_SYNCHRONISER_3:
case ARIZONA_FLL2_SYNCHRONISER_4:
case ARIZONA_FLL2_SYNCHRONISER_5:
case ARIZONA_FLL2_SYNCHRONISER_6:
case ARIZONA_FLL2_SYNCHRONISER_7:
case ARIZONA_FLL2_SPREAD_SPECTRUM:
case ARIZONA_FLL2_GPIO_CLOCK:
case ARIZONA_MIC_CHARGE_PUMP_1:
case ARIZONA_LDO1_CONTROL_1:
case ARIZONA_LDO1_CONTROL_2:
case ARIZONA_LDO2_CONTROL_1:
case ARIZONA_MIC_BIAS_CTRL_1:
case ARIZONA_MIC_BIAS_CTRL_2:
case ARIZONA_MIC_BIAS_CTRL_3:
case ARIZONA_ACCESSORY_DETECT_MODE_1:
case ARIZONA_HEADPHONE_DETECT_1:
case ARIZONA_HEADPHONE_DETECT_2:
case ARIZONA_HP_DACVAL:
case ARIZONA_MICD_CLAMP_CONTROL:
case ARIZONA_MIC_DETECT_1:
case ARIZONA_MIC_DETECT_2:
case ARIZONA_MIC_DETECT_3:
case ARIZONA_MIC_DETECT_LEVEL_1:
case ARIZONA_MIC_DETECT_LEVEL_2:
case ARIZONA_MIC_DETECT_LEVEL_3:
case ARIZONA_MIC_DETECT_LEVEL_4:
case ARIZONA_MIC_NOISE_MIX_CONTROL_1:
case ARIZONA_ISOLATION_CONTROL:
case ARIZONA_JACK_DETECT_ANALOGUE:
case ARIZONA_INPUT_ENABLES:
case ARIZONA_INPUT_RATE:
case ARIZONA_INPUT_VOLUME_RAMP:
case ARIZONA_IN1L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_1L:
case ARIZONA_DMIC1L_CONTROL:
case ARIZONA_IN1R_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_1R:
case ARIZONA_DMIC1R_CONTROL:
case ARIZONA_IN2L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_2L:
case ARIZONA_DMIC2L_CONTROL:
case ARIZONA_IN2R_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_2R:
case ARIZONA_DMIC2R_CONTROL:
case ARIZONA_IN3L_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_3L:
case ARIZONA_DMIC3L_CONTROL:
case ARIZONA_IN3R_CONTROL:
case ARIZONA_ADC_DIGITAL_VOLUME_3R:
case ARIZONA_DMIC3R_CONTROL:
case ARIZONA_OUTPUT_ENABLES_1:
case ARIZONA_OUTPUT_STATUS_1:
case ARIZONA_OUTPUT_RATE_1:
case ARIZONA_OUTPUT_VOLUME_RAMP:
case ARIZONA_OUTPUT_PATH_CONFIG_1L:
case ARIZONA_DAC_DIGITAL_VOLUME_1L:
case ARIZONA_DAC_VOLUME_LIMIT_1L:
case ARIZONA_NOISE_GATE_SELECT_1L:
case ARIZONA_OUTPUT_PATH_CONFIG_1R:
case ARIZONA_DAC_DIGITAL_VOLUME_1R:
case ARIZONA_DAC_VOLUME_LIMIT_1R:
case ARIZONA_NOISE_GATE_SELECT_1R:
case ARIZONA_OUTPUT_PATH_CONFIG_2L:
case ARIZONA_DAC_DIGITAL_VOLUME_2L:
case ARIZONA_DAC_VOLUME_LIMIT_2L:
case ARIZONA_NOISE_GATE_SELECT_2L:
case ARIZONA_OUTPUT_PATH_CONFIG_2R:
case ARIZONA_DAC_DIGITAL_VOLUME_2R:
case ARIZONA_DAC_VOLUME_LIMIT_2R:
case ARIZONA_NOISE_GATE_SELECT_2R:
case ARIZONA_OUTPUT_PATH_CONFIG_3L:
case ARIZONA_DAC_DIGITAL_VOLUME_3L:
case ARIZONA_DAC_VOLUME_LIMIT_3L:
case ARIZONA_NOISE_GATE_SELECT_3L:
case ARIZONA_OUTPUT_PATH_CONFIG_3R:
case ARIZONA_DAC_DIGITAL_VOLUME_3R:
case ARIZONA_DAC_VOLUME_LIMIT_3R:
case ARIZONA_OUTPUT_PATH_CONFIG_4L:
case ARIZONA_DAC_DIGITAL_VOLUME_4L:
case ARIZONA_OUT_VOLUME_4L:
case ARIZONA_NOISE_GATE_SELECT_4L:
case ARIZONA_OUTPUT_PATH_CONFIG_4R:
case ARIZONA_DAC_DIGITAL_VOLUME_4R:
case ARIZONA_OUT_VOLUME_4R:
case ARIZONA_NOISE_GATE_SELECT_4R:
case ARIZONA_OUTPUT_PATH_CONFIG_5L:
case ARIZONA_DAC_DIGITAL_VOLUME_5L:
case ARIZONA_DAC_VOLUME_LIMIT_5L:
case ARIZONA_NOISE_GATE_SELECT_5L:
case ARIZONA_OUTPUT_PATH_CONFIG_5R:
case ARIZONA_DAC_DIGITAL_VOLUME_5R:
case ARIZONA_DAC_VOLUME_LIMIT_5R:
case ARIZONA_NOISE_GATE_SELECT_5R:
case ARIZONA_DRE_ENABLE:
case ARIZONA_DRE_CONTROL_2:
case ARIZONA_DRE_CONTROL_3:
case ARIZONA_DAC_AEC_CONTROL_1:
case ARIZONA_NOISE_GATE_CONTROL:
case ARIZONA_PDM_SPK1_CTRL_1:
case ARIZONA_PDM_SPK1_CTRL_2:
case ARIZONA_SPK_CTRL_2:
case ARIZONA_SPK_CTRL_3:
case ARIZONA_DAC_COMP_1:
case ARIZONA_DAC_COMP_2:
case ARIZONA_DAC_COMP_3:
case ARIZONA_DAC_COMP_4:
case ARIZONA_AIF1_BCLK_CTRL:
case ARIZONA_AIF1_TX_PIN_CTRL:
case ARIZONA_AIF1_RX_PIN_CTRL:
case ARIZONA_AIF1_RATE_CTRL:
case ARIZONA_AIF1_FORMAT:
case ARIZONA_AIF1_TX_BCLK_RATE:
case ARIZONA_AIF1_RX_BCLK_RATE:
case ARIZONA_AIF1_FRAME_CTRL_1:
case ARIZONA_AIF1_FRAME_CTRL_2:
case ARIZONA_AIF1_FRAME_CTRL_3:
case ARIZONA_AIF1_FRAME_CTRL_4:
case ARIZONA_AIF1_FRAME_CTRL_5:
case ARIZONA_AIF1_FRAME_CTRL_6:
case ARIZONA_AIF1_FRAME_CTRL_7:
case ARIZONA_AIF1_FRAME_CTRL_8:
case ARIZONA_AIF1_FRAME_CTRL_9:
case ARIZONA_AIF1_FRAME_CTRL_10:
case ARIZONA_AIF1_FRAME_CTRL_11:
case ARIZONA_AIF1_FRAME_CTRL_12:
case ARIZONA_AIF1_FRAME_CTRL_13:
case ARIZONA_AIF1_FRAME_CTRL_14:
case ARIZONA_AIF1_FRAME_CTRL_15:
case ARIZONA_AIF1_FRAME_CTRL_16:
case ARIZONA_AIF1_FRAME_CTRL_17:
case ARIZONA_AIF1_FRAME_CTRL_18:
case ARIZONA_AIF1_TX_ENABLES:
case ARIZONA_AIF1_RX_ENABLES:
case ARIZONA_AIF1_FORCE_WRITE:
case ARIZONA_AIF2_BCLK_CTRL:
case ARIZONA_AIF2_TX_PIN_CTRL:
case ARIZONA_AIF2_RX_PIN_CTRL:
case ARIZONA_AIF2_RATE_CTRL:
case ARIZONA_AIF2_FORMAT:
case ARIZONA_AIF2_TX_BCLK_RATE:
case ARIZONA_AIF2_RX_BCLK_RATE:
case ARIZONA_AIF2_FRAME_CTRL_1:
case ARIZONA_AIF2_FRAME_CTRL_2:
case ARIZONA_AIF2_FRAME_CTRL_3:
case ARIZONA_AIF2_FRAME_CTRL_4:
case ARIZONA_AIF2_FRAME_CTRL_11:
case ARIZONA_AIF2_FRAME_CTRL_12:
case ARIZONA_AIF2_TX_ENABLES:
case ARIZONA_AIF2_RX_ENABLES:
case ARIZONA_AIF2_FORCE_WRITE:
case ARIZONA_AIF3_BCLK_CTRL:
case ARIZONA_AIF3_TX_PIN_CTRL:
case ARIZONA_AIF3_RX_PIN_CTRL:
case ARIZONA_AIF3_RATE_CTRL:
case ARIZONA_AIF3_FORMAT:
case ARIZONA_AIF3_TX_BCLK_RATE:
case ARIZONA_AIF3_RX_BCLK_RATE:
case ARIZONA_AIF3_FRAME_CTRL_1:
case ARIZONA_AIF3_FRAME_CTRL_2:
case ARIZONA_AIF3_FRAME_CTRL_3:
case ARIZONA_AIF3_FRAME_CTRL_4:
case ARIZONA_AIF3_FRAME_CTRL_11:
case ARIZONA_AIF3_FRAME_CTRL_12:
case ARIZONA_AIF3_TX_ENABLES:
case ARIZONA_AIF3_RX_ENABLES:
case ARIZONA_AIF3_FORCE_WRITE:
case ARIZONA_SLIMBUS_FRAMER_REF_GEAR:
case ARIZONA_SLIMBUS_RATES_1:
case ARIZONA_SLIMBUS_RATES_2:
case ARIZONA_SLIMBUS_RATES_3:
case ARIZONA_SLIMBUS_RATES_4:
case ARIZONA_SLIMBUS_RATES_5:
case ARIZONA_SLIMBUS_RATES_6:
case ARIZONA_SLIMBUS_RATES_7:
case ARIZONA_SLIMBUS_RATES_8:
case ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE:
case ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE:
case ARIZONA_SLIMBUS_RX_PORT_STATUS:
case ARIZONA_SLIMBUS_TX_PORT_STATUS:
case ARIZONA_PWM1MIX_INPUT_1_SOURCE:
case ARIZONA_PWM1MIX_INPUT_1_VOLUME:
case ARIZONA_PWM1MIX_INPUT_2_SOURCE:
case ARIZONA_PWM1MIX_INPUT_2_VOLUME:
case ARIZONA_PWM1MIX_INPUT_3_SOURCE:
case ARIZONA_PWM1MIX_INPUT_3_VOLUME:
case ARIZONA_PWM1MIX_INPUT_4_SOURCE:
case ARIZONA_PWM1MIX_INPUT_4_VOLUME:
case ARIZONA_PWM2MIX_INPUT_1_SOURCE:
case ARIZONA_PWM2MIX_INPUT_1_VOLUME:
case ARIZONA_PWM2MIX_INPUT_2_SOURCE:
case ARIZONA_PWM2MIX_INPUT_2_VOLUME:
case ARIZONA_PWM2MIX_INPUT_3_SOURCE:
case ARIZONA_PWM2MIX_INPUT_3_VOLUME:
case ARIZONA_PWM2MIX_INPUT_4_SOURCE:
case ARIZONA_PWM2MIX_INPUT_4_VOLUME:
case ARIZONA_MICMIX_INPUT_1_SOURCE:
case ARIZONA_MICMIX_INPUT_1_VOLUME:
case ARIZONA_MICMIX_INPUT_2_SOURCE:
case ARIZONA_MICMIX_INPUT_2_VOLUME:
case ARIZONA_MICMIX_INPUT_3_SOURCE:
case ARIZONA_MICMIX_INPUT_3_VOLUME:
case ARIZONA_MICMIX_INPUT_4_SOURCE:
case ARIZONA_MICMIX_INPUT_4_VOLUME:
case ARIZONA_NOISEMIX_INPUT_1_SOURCE:
case ARIZONA_NOISEMIX_INPUT_1_VOLUME:
case ARIZONA_NOISEMIX_INPUT_2_SOURCE:
case ARIZONA_NOISEMIX_INPUT_2_VOLUME:
case ARIZONA_NOISEMIX_INPUT_3_SOURCE:
case ARIZONA_NOISEMIX_INPUT_3_VOLUME:
case ARIZONA_NOISEMIX_INPUT_4_SOURCE:
case ARIZONA_NOISEMIX_INPUT_4_VOLUME:
case ARIZONA_OUT1LMIX_INPUT_1_SOURCE:
case ARIZONA_OUT1LMIX_INPUT_1_VOLUME:
case ARIZONA_OUT1LMIX_INPUT_2_SOURCE:
case ARIZONA_OUT1LMIX_INPUT_2_VOLUME:
case ARIZONA_OUT1LMIX_INPUT_3_SOURCE:
case ARIZONA_OUT1LMIX_INPUT_3_VOLUME:
case ARIZONA_OUT1LMIX_INPUT_4_SOURCE:
case ARIZONA_OUT1LMIX_INPUT_4_VOLUME:
case ARIZONA_OUT1RMIX_INPUT_1_SOURCE:
case ARIZONA_OUT1RMIX_INPUT_1_VOLUME:
case ARIZONA_OUT1RMIX_INPUT_2_SOURCE:
case ARIZONA_OUT1RMIX_INPUT_2_VOLUME:
case ARIZONA_OUT1RMIX_INPUT_3_SOURCE:
case ARIZONA_OUT1RMIX_INPUT_3_VOLUME:
case ARIZONA_OUT1RMIX_INPUT_4_SOURCE:
case ARIZONA_OUT1RMIX_INPUT_4_VOLUME:
case ARIZONA_OUT2LMIX_INPUT_1_SOURCE:
case ARIZONA_OUT2LMIX_INPUT_1_VOLUME:
case ARIZONA_OUT2LMIX_INPUT_2_SOURCE:
case ARIZONA_OUT2LMIX_INPUT_2_VOLUME:
case ARIZONA_OUT2LMIX_INPUT_3_SOURCE:
case ARIZONA_OUT2LMIX_INPUT_3_VOLUME:
case ARIZONA_OUT2LMIX_INPUT_4_SOURCE:
case ARIZONA_OUT2LMIX_INPUT_4_VOLUME:
case ARIZONA_OUT2RMIX_INPUT_1_SOURCE:
case ARIZONA_OUT2RMIX_INPUT_1_VOLUME:
case ARIZONA_OUT2RMIX_INPUT_2_SOURCE:
case ARIZONA_OUT2RMIX_INPUT_2_VOLUME:
case ARIZONA_OUT2RMIX_INPUT_3_SOURCE:
case ARIZONA_OUT2RMIX_INPUT_3_VOLUME:
case ARIZONA_OUT2RMIX_INPUT_4_SOURCE:
case ARIZONA_OUT2RMIX_INPUT_4_VOLUME:
case ARIZONA_OUT3LMIX_INPUT_1_SOURCE:
case ARIZONA_OUT3LMIX_INPUT_1_VOLUME:
case ARIZONA_OUT3LMIX_INPUT_2_SOURCE:
case ARIZONA_OUT3LMIX_INPUT_2_VOLUME:
case ARIZONA_OUT3LMIX_INPUT_3_SOURCE:
case ARIZONA_OUT3LMIX_INPUT_3_VOLUME:
case ARIZONA_OUT3LMIX_INPUT_4_SOURCE:
case ARIZONA_OUT3LMIX_INPUT_4_VOLUME:
case ARIZONA_OUT4LMIX_INPUT_1_SOURCE:
case ARIZONA_OUT4LMIX_INPUT_1_VOLUME:
case ARIZONA_OUT4LMIX_INPUT_2_SOURCE:
case ARIZONA_OUT4LMIX_INPUT_2_VOLUME:
case ARIZONA_OUT4LMIX_INPUT_3_SOURCE:
case ARIZONA_OUT4LMIX_INPUT_3_VOLUME:
case ARIZONA_OUT4LMIX_INPUT_4_SOURCE:
case ARIZONA_OUT4LMIX_INPUT_4_VOLUME:
case ARIZONA_OUT4RMIX_INPUT_1_SOURCE:
case ARIZONA_OUT4RMIX_INPUT_1_VOLUME:
case ARIZONA_OUT4RMIX_INPUT_2_SOURCE:
case ARIZONA_OUT4RMIX_INPUT_2_VOLUME:
case ARIZONA_OUT4RMIX_INPUT_3_SOURCE:
case ARIZONA_OUT4RMIX_INPUT_3_VOLUME:
case ARIZONA_OUT4RMIX_INPUT_4_SOURCE:
case ARIZONA_OUT4RMIX_INPUT_4_VOLUME:
case ARIZONA_OUT5LMIX_INPUT_1_SOURCE:
case ARIZONA_OUT5LMIX_INPUT_1_VOLUME:
case ARIZONA_OUT5LMIX_INPUT_2_SOURCE:
case ARIZONA_OUT5LMIX_INPUT_2_VOLUME:
case ARIZONA_OUT5LMIX_INPUT_3_SOURCE:
case ARIZONA_OUT5LMIX_INPUT_3_VOLUME:
case ARIZONA_OUT5LMIX_INPUT_4_SOURCE:
case ARIZONA_OUT5LMIX_INPUT_4_VOLUME:
case ARIZONA_OUT5RMIX_INPUT_1_SOURCE:
case ARIZONA_OUT5RMIX_INPUT_1_VOLUME:
case ARIZONA_OUT5RMIX_INPUT_2_SOURCE:
case ARIZONA_OUT5RMIX_INPUT_2_VOLUME:
case ARIZONA_OUT5RMIX_INPUT_3_SOURCE:
case ARIZONA_OUT5RMIX_INPUT_3_VOLUME:
case ARIZONA_OUT5RMIX_INPUT_4_SOURCE:
case ARIZONA_OUT5RMIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX1MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX1MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX1MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX1MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX1MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX1MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX1MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX2MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX2MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX2MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX2MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX2MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX2MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX2MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX3MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX3MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX3MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX3MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX3MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX3MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX3MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX4MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX4MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX4MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX4MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX4MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX4MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX4MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX5MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX5MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX5MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX5MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX5MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX5MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX5MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX6MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX6MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX6MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX6MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX6MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX6MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX6MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX7MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX7MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX7MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX7MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX7MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX7MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX7MIX_INPUT_4_VOLUME:
case ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE:
case ARIZONA_AIF1TX8MIX_INPUT_1_VOLUME:
case ARIZONA_AIF1TX8MIX_INPUT_2_SOURCE:
case ARIZONA_AIF1TX8MIX_INPUT_2_VOLUME:
case ARIZONA_AIF1TX8MIX_INPUT_3_SOURCE:
case ARIZONA_AIF1TX8MIX_INPUT_3_VOLUME:
case ARIZONA_AIF1TX8MIX_INPUT_4_SOURCE:
case ARIZONA_AIF1TX8MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX1MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX1MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX1MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX1MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX1MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX1MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX1MIX_INPUT_4_VOLUME:
case ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE:
case ARIZONA_AIF2TX2MIX_INPUT_1_VOLUME:
case ARIZONA_AIF2TX2MIX_INPUT_2_SOURCE:
case ARIZONA_AIF2TX2MIX_INPUT_2_VOLUME:
case ARIZONA_AIF2TX2MIX_INPUT_3_SOURCE:
case ARIZONA_AIF2TX2MIX_INPUT_3_VOLUME:
case ARIZONA_AIF2TX2MIX_INPUT_4_SOURCE:
case ARIZONA_AIF2TX2MIX_INPUT_4_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE:
case ARIZONA_AIF3TX1MIX_INPUT_1_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_2_SOURCE:
case ARIZONA_AIF3TX1MIX_INPUT_2_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_3_SOURCE:
case ARIZONA_AIF3TX1MIX_INPUT_3_VOLUME:
case ARIZONA_AIF3TX1MIX_INPUT_4_SOURCE:
case ARIZONA_AIF3TX1MIX_INPUT_4_VOLUME:
case ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE:
case ARIZONA_AIF3TX2MIX_INPUT_1_VOLUME:
case ARIZONA_AIF3TX2MIX_INPUT_2_SOURCE:
case ARIZONA_AIF3TX2MIX_INPUT_2_VOLUME:
case ARIZONA_AIF3TX2MIX_INPUT_3_SOURCE:
case ARIZONA_AIF3TX2MIX_INPUT_3_VOLUME:
case ARIZONA_AIF3TX2MIX_INPUT_4_SOURCE:
case ARIZONA_AIF3TX2MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX1MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX1MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX1MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX1MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX1MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX1MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX1MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX2MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX2MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX2MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX2MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX2MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX2MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX2MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX3MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX3MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX3MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX3MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX3MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX3MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX3MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX4MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX4MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX4MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX4MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX4MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX4MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX4MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX5MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX5MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX5MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX5MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX5MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX5MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX5MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX6MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX6MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX6MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX6MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX6MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX6MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX6MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX7MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX7MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX7MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX7MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX7MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX7MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX7MIX_INPUT_4_VOLUME:
case ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE:
case ARIZONA_SLIMTX8MIX_INPUT_1_VOLUME:
case ARIZONA_SLIMTX8MIX_INPUT_2_SOURCE:
case ARIZONA_SLIMTX8MIX_INPUT_2_VOLUME:
case ARIZONA_SLIMTX8MIX_INPUT_3_SOURCE:
case ARIZONA_SLIMTX8MIX_INPUT_3_VOLUME:
case ARIZONA_SLIMTX8MIX_INPUT_4_SOURCE:
case ARIZONA_SLIMTX8MIX_INPUT_4_VOLUME:
case ARIZONA_EQ1MIX_INPUT_1_SOURCE:
case ARIZONA_EQ1MIX_INPUT_1_VOLUME:
case ARIZONA_EQ1MIX_INPUT_2_SOURCE:
case ARIZONA_EQ1MIX_INPUT_2_VOLUME:
case ARIZONA_EQ1MIX_INPUT_3_SOURCE:
case ARIZONA_EQ1MIX_INPUT_3_VOLUME:
case ARIZONA_EQ1MIX_INPUT_4_SOURCE:
case ARIZONA_EQ1MIX_INPUT_4_VOLUME:
case ARIZONA_EQ2MIX_INPUT_1_SOURCE:
case ARIZONA_EQ2MIX_INPUT_1_VOLUME:
case ARIZONA_EQ2MIX_INPUT_2_SOURCE:
case ARIZONA_EQ2MIX_INPUT_2_VOLUME:
case ARIZONA_EQ2MIX_INPUT_3_SOURCE:
case ARIZONA_EQ2MIX_INPUT_3_VOLUME:
case ARIZONA_EQ2MIX_INPUT_4_SOURCE:
case ARIZONA_EQ2MIX_INPUT_4_VOLUME:
case ARIZONA_EQ3MIX_INPUT_1_SOURCE:
case ARIZONA_EQ3MIX_INPUT_1_VOLUME:
case ARIZONA_EQ3MIX_INPUT_2_SOURCE:
case ARIZONA_EQ3MIX_INPUT_2_VOLUME:
case ARIZONA_EQ3MIX_INPUT_3_SOURCE:
case ARIZONA_EQ3MIX_INPUT_3_VOLUME:
case ARIZONA_EQ3MIX_INPUT_4_SOURCE:
case ARIZONA_EQ3MIX_INPUT_4_VOLUME:
case ARIZONA_EQ4MIX_INPUT_1_SOURCE:
case ARIZONA_EQ4MIX_INPUT_1_VOLUME:
case ARIZONA_EQ4MIX_INPUT_2_SOURCE:
case ARIZONA_EQ4MIX_INPUT_2_VOLUME:
case ARIZONA_EQ4MIX_INPUT_3_SOURCE:
case ARIZONA_EQ4MIX_INPUT_3_VOLUME:
case ARIZONA_EQ4MIX_INPUT_4_SOURCE:
case ARIZONA_EQ4MIX_INPUT_4_VOLUME:
case ARIZONA_DRC1LMIX_INPUT_1_SOURCE:
case ARIZONA_DRC1LMIX_INPUT_1_VOLUME:
case ARIZONA_DRC1LMIX_INPUT_2_SOURCE:
case ARIZONA_DRC1LMIX_INPUT_2_VOLUME:
case ARIZONA_DRC1LMIX_INPUT_3_SOURCE:
case ARIZONA_DRC1LMIX_INPUT_3_VOLUME:
case ARIZONA_DRC1LMIX_INPUT_4_SOURCE:
case ARIZONA_DRC1LMIX_INPUT_4_VOLUME:
case ARIZONA_DRC1RMIX_INPUT_1_SOURCE:
case ARIZONA_DRC1RMIX_INPUT_1_VOLUME:
case ARIZONA_DRC1RMIX_INPUT_2_SOURCE:
case ARIZONA_DRC1RMIX_INPUT_2_VOLUME:
case ARIZONA_DRC1RMIX_INPUT_3_SOURCE:
case ARIZONA_DRC1RMIX_INPUT_3_VOLUME:
case ARIZONA_DRC1RMIX_INPUT_4_SOURCE:
case ARIZONA_DRC1RMIX_INPUT_4_VOLUME:
case ARIZONA_DRC2LMIX_INPUT_1_SOURCE:
case ARIZONA_DRC2LMIX_INPUT_1_VOLUME:
case ARIZONA_DRC2LMIX_INPUT_2_SOURCE:
case ARIZONA_DRC2LMIX_INPUT_2_VOLUME:
case ARIZONA_DRC2LMIX_INPUT_3_SOURCE:
case ARIZONA_DRC2LMIX_INPUT_3_VOLUME:
case ARIZONA_DRC2LMIX_INPUT_4_SOURCE:
case ARIZONA_DRC2LMIX_INPUT_4_VOLUME:
case ARIZONA_DRC2RMIX_INPUT_1_SOURCE:
case ARIZONA_DRC2RMIX_INPUT_1_VOLUME:
case ARIZONA_DRC2RMIX_INPUT_2_SOURCE:
case ARIZONA_DRC2RMIX_INPUT_2_VOLUME:
case ARIZONA_DRC2RMIX_INPUT_3_SOURCE:
case ARIZONA_DRC2RMIX_INPUT_3_VOLUME:
case ARIZONA_DRC2RMIX_INPUT_4_SOURCE:
case ARIZONA_DRC2RMIX_INPUT_4_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_1_SOURCE:
case ARIZONA_HPLP1MIX_INPUT_1_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_2_SOURCE:
case ARIZONA_HPLP1MIX_INPUT_2_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_3_SOURCE:
case ARIZONA_HPLP1MIX_INPUT_3_VOLUME:
case ARIZONA_HPLP1MIX_INPUT_4_SOURCE:
case ARIZONA_HPLP1MIX_INPUT_4_VOLUME:
case ARIZONA_HPLP2MIX_INPUT_1_SOURCE:
case ARIZONA_HPLP2MIX_INPUT_1_VOLUME:
case ARIZONA_HPLP2MIX_INPUT_2_SOURCE:
case ARIZONA_HPLP2MIX_INPUT_2_VOLUME:
case ARIZONA_HPLP2MIX_INPUT_3_SOURCE:
case ARIZONA_HPLP2MIX_INPUT_3_VOLUME:
case ARIZONA_HPLP2MIX_INPUT_4_SOURCE:
case ARIZONA_HPLP2MIX_INPUT_4_VOLUME:
case ARIZONA_HPLP3MIX_INPUT_1_SOURCE:
case ARIZONA_HPLP3MIX_INPUT_1_VOLUME:
case ARIZONA_HPLP3MIX_INPUT_2_SOURCE:
case ARIZONA_HPLP3MIX_INPUT_2_VOLUME:
case ARIZONA_HPLP3MIX_INPUT_3_SOURCE:
case ARIZONA_HPLP3MIX_INPUT_3_VOLUME:
case ARIZONA_HPLP3MIX_INPUT_4_SOURCE:
case ARIZONA_HPLP3MIX_INPUT_4_VOLUME:
case ARIZONA_HPLP4MIX_INPUT_1_SOURCE:
case ARIZONA_HPLP4MIX_INPUT_1_VOLUME:
case ARIZONA_HPLP4MIX_INPUT_2_SOURCE:
case ARIZONA_HPLP4MIX_INPUT_2_VOLUME:
case ARIZONA_HPLP4MIX_INPUT_3_SOURCE:
case ARIZONA_HPLP4MIX_INPUT_3_VOLUME:
case ARIZONA_HPLP4MIX_INPUT_4_SOURCE:
case ARIZONA_HPLP4MIX_INPUT_4_VOLUME:
case ARIZONA_DSP1LMIX_INPUT_1_SOURCE:
case ARIZONA_DSP1LMIX_INPUT_1_VOLUME:
case ARIZONA_DSP1LMIX_INPUT_2_SOURCE:
case ARIZONA_DSP1LMIX_INPUT_2_VOLUME:
case ARIZONA_DSP1LMIX_INPUT_3_SOURCE:
case ARIZONA_DSP1LMIX_INPUT_3_VOLUME:
case ARIZONA_DSP1LMIX_INPUT_4_SOURCE:
case ARIZONA_DSP1LMIX_INPUT_4_VOLUME:
case ARIZONA_DSP1RMIX_INPUT_1_SOURCE:
case ARIZONA_DSP1RMIX_INPUT_1_VOLUME:
case ARIZONA_DSP1RMIX_INPUT_2_SOURCE:
case ARIZONA_DSP1RMIX_INPUT_2_VOLUME:
case ARIZONA_DSP1RMIX_INPUT_3_SOURCE:
case ARIZONA_DSP1RMIX_INPUT_3_VOLUME:
case ARIZONA_DSP1RMIX_INPUT_4_SOURCE:
case ARIZONA_DSP1RMIX_INPUT_4_VOLUME:
case ARIZONA_DSP1AUX1MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX2MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX3MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX4MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX5MIX_INPUT_1_SOURCE:
case ARIZONA_DSP1AUX6MIX_INPUT_1_SOURCE:
case ARIZONA_ASRC1LMIX_INPUT_1_SOURCE:
case ARIZONA_ASRC1RMIX_INPUT_1_SOURCE:
case ARIZONA_ASRC2LMIX_INPUT_1_SOURCE:
case ARIZONA_ASRC2RMIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE:
case ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE:
case ARIZONA_GPIO1_CTRL:
case ARIZONA_GPIO2_CTRL:
case ARIZONA_GPIO3_CTRL:
case ARIZONA_GPIO4_CTRL:
case ARIZONA_GPIO5_CTRL:
case ARIZONA_IRQ_CTRL_1:
case ARIZONA_GPIO_DEBOUNCE_CONFIG:
case ARIZONA_MISC_PAD_CTRL_1:
case ARIZONA_MISC_PAD_CTRL_2:
case ARIZONA_MISC_PAD_CTRL_3:
case ARIZONA_MISC_PAD_CTRL_4:
case ARIZONA_MISC_PAD_CTRL_5:
case ARIZONA_MISC_PAD_CTRL_6:
case ARIZONA_INTERRUPT_STATUS_1:
case ARIZONA_INTERRUPT_STATUS_2:
case ARIZONA_INTERRUPT_STATUS_3:
case ARIZONA_INTERRUPT_STATUS_4:
case ARIZONA_INTERRUPT_STATUS_5:
case ARIZONA_INTERRUPT_STATUS_1_MASK:
case ARIZONA_INTERRUPT_STATUS_2_MASK:
case ARIZONA_INTERRUPT_STATUS_3_MASK:
case ARIZONA_INTERRUPT_STATUS_4_MASK:
case ARIZONA_INTERRUPT_STATUS_5_MASK:
case ARIZONA_INTERRUPT_CONTROL:
case ARIZONA_IRQ2_STATUS_1:
case ARIZONA_IRQ2_STATUS_2:
case ARIZONA_IRQ2_STATUS_3:
case ARIZONA_IRQ2_STATUS_4:
case ARIZONA_IRQ2_STATUS_5:
case ARIZONA_IRQ2_STATUS_1_MASK:
case ARIZONA_IRQ2_STATUS_2_MASK:
case ARIZONA_IRQ2_STATUS_3_MASK:
case ARIZONA_IRQ2_STATUS_4_MASK:
case ARIZONA_IRQ2_STATUS_5_MASK:
case ARIZONA_IRQ2_CONTROL:
case ARIZONA_INTERRUPT_RAW_STATUS_2:
case ARIZONA_INTERRUPT_RAW_STATUS_3:
case ARIZONA_INTERRUPT_RAW_STATUS_4:
case ARIZONA_INTERRUPT_RAW_STATUS_5:
case ARIZONA_INTERRUPT_RAW_STATUS_6:
case ARIZONA_INTERRUPT_RAW_STATUS_7:
case ARIZONA_INTERRUPT_RAW_STATUS_8:
case ARIZONA_IRQ_PIN_STATUS:
case ARIZONA_ADSP2_IRQ0:
case ARIZONA_AOD_WKUP_AND_TRIG:
case ARIZONA_AOD_IRQ1:
case ARIZONA_AOD_IRQ2:
case ARIZONA_AOD_IRQ_MASK_IRQ1:
case ARIZONA_AOD_IRQ_MASK_IRQ2:
case ARIZONA_AOD_IRQ_RAW_STATUS:
case ARIZONA_JACK_DETECT_DEBOUNCE:
case ARIZONA_FX_CTRL1:
case ARIZONA_FX_CTRL2:
case ARIZONA_EQ1_1:
case ARIZONA_EQ1_2:
case ARIZONA_EQ1_3:
case ARIZONA_EQ1_4:
case ARIZONA_EQ1_5:
case ARIZONA_EQ1_6:
case ARIZONA_EQ1_7:
case ARIZONA_EQ1_8:
case ARIZONA_EQ1_9:
case ARIZONA_EQ1_10:
case ARIZONA_EQ1_11:
case ARIZONA_EQ1_12:
case ARIZONA_EQ1_13:
case ARIZONA_EQ1_14:
case ARIZONA_EQ1_15:
case ARIZONA_EQ1_16:
case ARIZONA_EQ1_17:
case ARIZONA_EQ1_18:
case ARIZONA_EQ1_19:
case ARIZONA_EQ1_20:
case ARIZONA_EQ1_21:
case ARIZONA_EQ2_1:
case ARIZONA_EQ2_2:
case ARIZONA_EQ2_3:
case ARIZONA_EQ2_4:
case ARIZONA_EQ2_5:
case ARIZONA_EQ2_6:
case ARIZONA_EQ2_7:
case ARIZONA_EQ2_8:
case ARIZONA_EQ2_9:
case ARIZONA_EQ2_10:
case ARIZONA_EQ2_11:
case ARIZONA_EQ2_12:
case ARIZONA_EQ2_13:
case ARIZONA_EQ2_14:
case ARIZONA_EQ2_15:
case ARIZONA_EQ2_16:
case ARIZONA_EQ2_17:
case ARIZONA_EQ2_18:
case ARIZONA_EQ2_19:
case ARIZONA_EQ2_20:
case ARIZONA_EQ2_21:
case ARIZONA_EQ3_1:
case ARIZONA_EQ3_2:
case ARIZONA_EQ3_3:
case ARIZONA_EQ3_4:
case ARIZONA_EQ3_5:
case ARIZONA_EQ3_6:
case ARIZONA_EQ3_7:
case ARIZONA_EQ3_8:
case ARIZONA_EQ3_9:
case ARIZONA_EQ3_10:
case ARIZONA_EQ3_11:
case ARIZONA_EQ3_12:
case ARIZONA_EQ3_13:
case ARIZONA_EQ3_14:
case ARIZONA_EQ3_15:
case ARIZONA_EQ3_16:
case ARIZONA_EQ3_17:
case ARIZONA_EQ3_18:
case ARIZONA_EQ3_19:
case ARIZONA_EQ3_20:
case ARIZONA_EQ3_21:
case ARIZONA_EQ4_1:
case ARIZONA_EQ4_2:
case ARIZONA_EQ4_3:
case ARIZONA_EQ4_4:
case ARIZONA_EQ4_5:
case ARIZONA_EQ4_6:
case ARIZONA_EQ4_7:
case ARIZONA_EQ4_8:
case ARIZONA_EQ4_9:
case ARIZONA_EQ4_10:
case ARIZONA_EQ4_11:
case ARIZONA_EQ4_12:
case ARIZONA_EQ4_13:
case ARIZONA_EQ4_14:
case ARIZONA_EQ4_15:
case ARIZONA_EQ4_16:
case ARIZONA_EQ4_17:
case ARIZONA_EQ4_18:
case ARIZONA_EQ4_19:
case ARIZONA_EQ4_20:
case ARIZONA_EQ4_21:
case ARIZONA_DRC1_CTRL1:
case ARIZONA_DRC1_CTRL2:
case ARIZONA_DRC1_CTRL3:
case ARIZONA_DRC1_CTRL4:
case ARIZONA_DRC1_CTRL5:
case ARIZONA_DRC2_CTRL1:
case ARIZONA_DRC2_CTRL2:
case ARIZONA_DRC2_CTRL3:
case ARIZONA_DRC2_CTRL4:
case ARIZONA_DRC2_CTRL5:
case ARIZONA_HPLPF1_1:
case ARIZONA_HPLPF1_2:
case ARIZONA_HPLPF2_1:
case ARIZONA_HPLPF2_2:
case ARIZONA_HPLPF3_1:
case ARIZONA_HPLPF3_2:
case ARIZONA_HPLPF4_1:
case ARIZONA_HPLPF4_2:
case ARIZONA_ASRC_ENABLE:
case ARIZONA_ASRC_RATE1:
case ARIZONA_ASRC_RATE2:
case ARIZONA_ISRC_1_CTRL_1:
case ARIZONA_ISRC_1_CTRL_2:
case ARIZONA_ISRC_1_CTRL_3:
case ARIZONA_ISRC_2_CTRL_1:
case ARIZONA_ISRC_2_CTRL_2:
case ARIZONA_ISRC_2_CTRL_3:
case ARIZONA_ISRC_3_CTRL_1:
case ARIZONA_ISRC_3_CTRL_2:
case ARIZONA_ISRC_3_CTRL_3:
case ARIZONA_DSP1_CONTROL_1:
case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
case ARIZONA_DSP1_WDMA_BUFFER_1:
case ARIZONA_DSP1_WDMA_BUFFER_2:
case ARIZONA_DSP1_WDMA_BUFFER_3:
case ARIZONA_DSP1_WDMA_BUFFER_4:
case ARIZONA_DSP1_WDMA_BUFFER_5:
case ARIZONA_DSP1_WDMA_BUFFER_6:
case ARIZONA_DSP1_WDMA_BUFFER_7:
case ARIZONA_DSP1_WDMA_BUFFER_8:
case ARIZONA_DSP1_RDMA_BUFFER_1:
case ARIZONA_DSP1_RDMA_BUFFER_2:
case ARIZONA_DSP1_RDMA_BUFFER_3:
case ARIZONA_DSP1_RDMA_BUFFER_4:
case ARIZONA_DSP1_RDMA_BUFFER_5:
case ARIZONA_DSP1_RDMA_BUFFER_6:
case ARIZONA_DSP1_WDMA_CONFIG_1:
case ARIZONA_DSP1_WDMA_CONFIG_2:
case ARIZONA_DSP1_RDMA_CONFIG_1:
case ARIZONA_DSP1_SCRATCH_0:
case ARIZONA_DSP1_SCRATCH_1:
case ARIZONA_DSP1_SCRATCH_2:
case ARIZONA_DSP1_SCRATCH_3:
return true;
default:
if ((reg >= 0x100000 && reg < 0x106000) ||
(reg >= 0x180000 && reg < 0x180800) ||
(reg >= 0x190000 && reg < 0x194800) ||
(reg >= 0x1a8000 && reg < 0x1a9800))
return true;
else
return false;
}
}
static bool wm5102_volatile_register(struct device *dev, unsigned int reg)
{
switch (reg) {
case ARIZONA_SOFTWARE_RESET:
case ARIZONA_DEVICE_REVISION:
case ARIZONA_WRITE_SEQUENCER_CTRL_0:
case ARIZONA_WRITE_SEQUENCER_CTRL_1:
case ARIZONA_WRITE_SEQUENCER_CTRL_2:
case ARIZONA_OUTPUT_STATUS_1:
case ARIZONA_RAW_OUTPUT_STATUS_1:
case ARIZONA_SLIMBUS_RX_PORT_STATUS:
case ARIZONA_SLIMBUS_TX_PORT_STATUS:
case ARIZONA_SAMPLE_RATE_1_STATUS:
case ARIZONA_SAMPLE_RATE_2_STATUS:
case ARIZONA_SAMPLE_RATE_3_STATUS:
case ARIZONA_HAPTICS_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS:
case ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS:
case ARIZONA_FLL1_NCO_TEST_0:
case ARIZONA_FLL2_NCO_TEST_0:
case ARIZONA_DAC_COMP_1:
case ARIZONA_DAC_COMP_2:
case ARIZONA_DAC_COMP_3:
case ARIZONA_DAC_COMP_4:
case ARIZONA_FX_CTRL2:
case ARIZONA_INTERRUPT_STATUS_1:
case ARIZONA_INTERRUPT_STATUS_2:
case ARIZONA_INTERRUPT_STATUS_3:
case ARIZONA_INTERRUPT_STATUS_4:
case ARIZONA_INTERRUPT_STATUS_5:
case ARIZONA_IRQ2_STATUS_1:
case ARIZONA_IRQ2_STATUS_2:
case ARIZONA_IRQ2_STATUS_3:
case ARIZONA_IRQ2_STATUS_4:
case ARIZONA_IRQ2_STATUS_5:
case ARIZONA_INTERRUPT_RAW_STATUS_2:
case ARIZONA_INTERRUPT_RAW_STATUS_3:
case ARIZONA_INTERRUPT_RAW_STATUS_4:
case ARIZONA_INTERRUPT_RAW_STATUS_5:
case ARIZONA_INTERRUPT_RAW_STATUS_6:
case ARIZONA_INTERRUPT_RAW_STATUS_7:
case ARIZONA_INTERRUPT_RAW_STATUS_8:
case ARIZONA_IRQ_PIN_STATUS:
case ARIZONA_AOD_WKUP_AND_TRIG:
case ARIZONA_AOD_IRQ1:
case ARIZONA_AOD_IRQ2:
case ARIZONA_AOD_IRQ_RAW_STATUS:
case ARIZONA_DSP1_CLOCKING_1:
case ARIZONA_DSP1_STATUS_1:
case ARIZONA_DSP1_STATUS_2:
case ARIZONA_DSP1_STATUS_3:
case ARIZONA_DSP1_WDMA_BUFFER_1:
case ARIZONA_DSP1_WDMA_BUFFER_2:
case ARIZONA_DSP1_WDMA_BUFFER_3:
case ARIZONA_DSP1_WDMA_BUFFER_4:
case ARIZONA_DSP1_WDMA_BUFFER_5:
case ARIZONA_DSP1_WDMA_BUFFER_6:
case ARIZONA_DSP1_WDMA_BUFFER_7:
case ARIZONA_DSP1_WDMA_BUFFER_8:
case ARIZONA_DSP1_RDMA_BUFFER_1:
case ARIZONA_DSP1_RDMA_BUFFER_2:
case ARIZONA_DSP1_RDMA_BUFFER_3:
case ARIZONA_DSP1_RDMA_BUFFER_4:
case ARIZONA_DSP1_RDMA_BUFFER_5:
case ARIZONA_DSP1_RDMA_BUFFER_6:
case ARIZONA_DSP1_WDMA_CONFIG_1:
case ARIZONA_DSP1_WDMA_CONFIG_2:
case ARIZONA_DSP1_RDMA_CONFIG_1:
case ARIZONA_DSP1_SCRATCH_0:
case ARIZONA_DSP1_SCRATCH_1:
case ARIZONA_DSP1_SCRATCH_2:
case ARIZONA_DSP1_SCRATCH_3:
case ARIZONA_HEADPHONE_DETECT_2:
case ARIZONA_HP_DACVAL:
case ARIZONA_MIC_DETECT_3:
return true;
default:
if ((reg >= 0x100000 && reg < 0x106000) ||
(reg >= 0x180000 && reg < 0x180800) ||
(reg >= 0x190000 && reg < 0x194800) ||
(reg >= 0x1a8000 && reg < 0x1a9800))
return true;
else
return false;
}
}
#define WM5102_MAX_REGISTER 0x1a9800
const struct regmap_config wm5102_spi_regmap = {
.reg_bits = 32,
.pad_bits = 16,
.val_bits = 16,
.max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = wm5102_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5102_reg_default),
};
EXPORT_SYMBOL_GPL(wm5102_spi_regmap);
const struct regmap_config wm5102_i2c_regmap = {
.reg_bits = 32,
.val_bits = 16,
.max_register = WM5102_MAX_REGISTER,
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
.cache_type = REGCACHE_RBTREE,
.reg_defaults = wm5102_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5102_reg_default),
};
EXPORT_SYMBOL_GPL(wm5102_i2c_regmap);
| gpl-2.0 |
hyuh/villec2-kernel | kernel/power/suspend.c | 80 | 5963 | /*
* kernel/power/suspend.c - Suspend to RAM and standby functionality.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*
* This file is released under the GPLv2.
*/
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/ftrace.h>
#include <linux/rtc.h>
#include <trace/events/power.h>
#include "power.h"
const char *const pm_states[PM_SUSPEND_MAX] = {
#ifdef CONFIG_EARLYSUSPEND
[PM_SUSPEND_ON] = "on",
#endif
[PM_SUSPEND_STANDBY] = "standby",
[PM_SUSPEND_MEM] = "mem",
};
static const struct platform_suspend_ops *suspend_ops;
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
lock_system_sleep();
suspend_ops = ops;
unlock_system_sleep();
}
EXPORT_SYMBOL_GPL(suspend_set_ops);
bool valid_state(suspend_state_t state)
{
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
}
int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static int suspend_test(int level)
{
#ifdef CONFIG_PM_DEBUG
if (pm_test_level == level) {
printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
mdelay(5000);
return 1;
}
#endif
return 0;
}
static int suspend_prepare(void)
{
int error;
if (!suspend_ops || !suspend_ops->enter)
return -EPERM;
pm_prepare_console();
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
if (error)
goto Finish;
error = suspend_freeze_processes();
if (!error)
return 0;
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
return error;
}
void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
{
local_irq_disable();
}
void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
{
local_irq_enable();
}
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
int error;
if (suspend_ops->prepare) {
error = suspend_ops->prepare();
if (error)
goto Platform_finish;
}
error = dpm_suspend_end(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: Some devices failed to power down\n");
goto Platform_finish;
}
if (suspend_ops->prepare_late) {
error = suspend_ops->prepare_late();
if (error)
goto Platform_wake;
}
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
if (!(suspend_test(TEST_CORE) || *wakeup)) {
error = suspend_ops->enter(state);
events_check_enabled = false;
}
syscore_resume();
}
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
Enable_cpus:
enable_nonboot_cpus();
Platform_wake:
if (suspend_ops->wake)
suspend_ops->wake();
dpm_resume_start(PMSG_RESUME);
Platform_finish:
if (suspend_ops->finish)
suspend_ops->finish();
return error;
}
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
bool wakeup = false;
if (!suspend_ops)
return -ENOSYS;
trace_machine_suspend(state);
if (suspend_ops->begin) {
error = suspend_ops->begin(state);
if (error)
goto Close;
}
if (!suspend_console_deferred)
suspend_console();
ftrace_stop();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: Some devices failed to suspend\n");
goto Recover_platform;
}
suspend_test_finish("suspend devices");
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
do {
error = suspend_enter(state, &wakeup);
} while (!error && !wakeup
&& suspend_ops->suspend_again && suspend_ops->suspend_again());
Resume_devices:
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
ftrace_start();
if (!suspend_console_deferred)
resume_console();
Close:
if (suspend_ops->end)
suspend_ops->end();
trace_machine_suspend(PWR_EVENT_EXIT);
return error;
Recover_platform:
if (suspend_ops->recover)
suspend_ops->recover();
goto Resume_devices;
}
static void suspend_finish(void)
{
suspend_thaw_processes();
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
}
static int enter_state(suspend_state_t state)
{
int error;
if (!valid_state(state))
return -ENODEV;
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
suspend_sys_sync_queue();
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
error = suspend_prepare();
if (error)
goto Unlock;
if (suspend_test(TEST_FREEZER))
goto Finish;
pr_debug("PM: Entering %s sleep\n", pm_states[state]);
pm_restrict_gfp_mask();
error = suspend_devices_and_enter(state);
pm_restore_gfp_mask();
Finish:
pr_debug("PM: Finishing wakeup.\n");
suspend_finish();
Unlock:
mutex_unlock(&pm_mutex);
return error;
}
static void pm_suspend_marker(char *annotation)
{
struct timespec ts;
struct rtc_time tm;
getnstimeofday(&ts);
rtc_time_to_tm(ts.tv_sec, &tm);
pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
}
int pm_suspend(suspend_state_t state)
{
int error;
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
pm_suspend_marker("entry");
error = enter_state(state);
if (error) {
suspend_stats.fail++;
dpm_save_failed_errno(error);
} else {
suspend_stats.success++;
}
pm_suspend_marker("exit");
return error;
}
EXPORT_SYMBOL(pm_suspend);
| gpl-2.0 |
ambikadash/linux-fqt | drivers/media/pci/ttpci/budget-ci.c | 592 | 45943 | /*
* budget-ci.c: driver for the SAA7146 based Budget DVB cards
*
* Compiled from various sources by Michael Hunold <michael@mihu.de>
*
* msp430 IR support contributed by Jack Thomasson <jkt@Helius.COM>
* partially based on the Siemens DVB driver by Ralph+Marcus Metzler
*
* CI interface support (c) 2004 Andrew de Quincey <adq_dvb@lidskialf.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*
*
* the project's page is at http://www.linuxtv.org/
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <media/rc-core.h>
#include "budget.h"
#include "dvb_ca_en50221.h"
#include "stv0299.h"
#include "stv0297.h"
#include "tda1004x.h"
#include "stb0899_drv.h"
#include "stb0899_reg.h"
#include "stb0899_cfg.h"
#include "stb6100.h"
#include "stb6100_cfg.h"
#include "lnbp21.h"
#include "bsbe1.h"
#include "bsru6.h"
#include "tda1002x.h"
#include "tda827x.h"
#include "bsbe1-d01a.h"
#define MODULE_NAME "budget_ci"
/*
* Regarding DEBIADDR_IR:
* Some CI modules hang if random addresses are read.
* Using address 0x4000 for the IR read means that we
* use the same address as for CI version, which should
* be a safe default.
*/
#define DEBIADDR_IR 0x4000
#define DEBIADDR_CICONTROL 0x0000
#define DEBIADDR_CIVERSION 0x4000
#define DEBIADDR_IO 0x1000
#define DEBIADDR_ATTR 0x3000
#define CICONTROL_RESET 0x01
#define CICONTROL_ENABLETS 0x02
#define CICONTROL_CAMDETECT 0x08
#define DEBICICTL 0x00420000
#define DEBICICAM 0x02420000
#define SLOTSTATUS_NONE 1
#define SLOTSTATUS_PRESENT 2
#define SLOTSTATUS_RESET 4
#define SLOTSTATUS_READY 8
#define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY)
/* RC5 device wildcard */
#define IR_DEVICE_ANY 255
static int rc5_device = -1;
module_param(rc5_device, int, 0644);
MODULE_PARM_DESC(rc5_device, "only IR commands to given RC5 device (device = 0 - 31, any device = 255, default: autodetect)");
static int ir_debug;
module_param(ir_debug, int, 0644);
MODULE_PARM_DESC(ir_debug, "enable debugging information for IR decoding");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
struct budget_ci_ir {
struct rc_dev *dev;
struct tasklet_struct msp430_irq_tasklet;
char name[72]; /* 40 + 32 for (struct saa7146_dev).name */
char phys[32];
int rc5_device;
u32 ir_key;
bool have_command;
bool full_rc5; /* Outputs a full RC5 code */
};
struct budget_ci {
struct budget budget;
struct tasklet_struct ciintf_irq_tasklet;
int slot_status;
int ci_irq;
struct dvb_ca_en50221 ca;
struct budget_ci_ir ir;
u8 tuner_pll_address; /* used for philips_tdm1316l configs */
};
static void msp430_ir_interrupt(unsigned long data)
{
struct budget_ci *budget_ci = (struct budget_ci *) data;
struct rc_dev *dev = budget_ci->ir.dev;
u32 command = ttpci_budget_debiread(&budget_ci->budget, DEBINOSWAP, DEBIADDR_IR, 2, 1, 0) >> 8;
/*
* The msp430 chip can generate two different bytes, command and device
*
* type1: X1CCCCCC, C = command bits (0 - 63)
* type2: X0TDDDDD, D = device bits (0 - 31), T = RC5 toggle bit
*
* Each signal from the remote control can generate one or more command
* bytes and one or more device bytes. For the repeated bytes, the
* highest bit (X) is set. The first command byte is always generated
* before the first device byte. Other than that, no specific order
* seems to apply. To make life interesting, bytes can also be lost.
*
* Only when we have a command and device byte, a keypress is
* generated.
*/
if (ir_debug)
printk("budget_ci: received byte 0x%02x\n", command);
/* Remove repeat bit, we use every command */
command = command & 0x7f;
/* Is this a RC5 command byte? */
if (command & 0x40) {
budget_ci->ir.have_command = true;
budget_ci->ir.ir_key = command & 0x3f;
return;
}
/* It's a RC5 device byte */
if (!budget_ci->ir.have_command)
return;
budget_ci->ir.have_command = false;
if (budget_ci->ir.rc5_device != IR_DEVICE_ANY &&
budget_ci->ir.rc5_device != (command & 0x1f))
return;
if (budget_ci->ir.full_rc5) {
rc_keydown(dev,
budget_ci->ir.rc5_device <<8 | budget_ci->ir.ir_key,
(command & 0x20) ? 1 : 0);
return;
}
/* FIXME: We should generate complete scancodes for all devices */
rc_keydown(dev, budget_ci->ir.ir_key, (command & 0x20) ? 1 : 0);
}
static int msp430_ir_init(struct budget_ci *budget_ci)
{
struct saa7146_dev *saa = budget_ci->budget.dev;
struct rc_dev *dev;
int error;
dev = rc_allocate_device();
if (!dev) {
printk(KERN_ERR "budget_ci: IR interface initialisation failed\n");
return -ENOMEM;
}
snprintf(budget_ci->ir.name, sizeof(budget_ci->ir.name),
"Budget-CI dvb ir receiver %s", saa->name);
snprintf(budget_ci->ir.phys, sizeof(budget_ci->ir.phys),
"pci-%s/ir0", pci_name(saa->pci));
dev->driver_name = MODULE_NAME;
dev->input_name = budget_ci->ir.name;
dev->input_phys = budget_ci->ir.phys;
dev->input_id.bustype = BUS_PCI;
dev->input_id.version = 1;
if (saa->pci->subsystem_vendor) {
dev->input_id.vendor = saa->pci->subsystem_vendor;
dev->input_id.product = saa->pci->subsystem_device;
} else {
dev->input_id.vendor = saa->pci->vendor;
dev->input_id.product = saa->pci->device;
}
dev->dev.parent = &saa->pci->dev;
if (rc5_device < 0)
budget_ci->ir.rc5_device = IR_DEVICE_ANY;
else
budget_ci->ir.rc5_device = rc5_device;
/* Select keymap and address */
switch (budget_ci->budget.dev->pci->subsystem_device) {
case 0x100c:
case 0x100f:
case 0x1011:
case 0x1012:
/* The hauppauge keymap is a superset of these remotes */
dev->map_name = RC_MAP_HAUPPAUGE;
budget_ci->ir.full_rc5 = true;
if (rc5_device < 0)
budget_ci->ir.rc5_device = 0x1f;
break;
case 0x1010:
case 0x1017:
case 0x1019:
case 0x101a:
case 0x101b:
/* for the Technotrend 1500 bundled remote */
dev->map_name = RC_MAP_TT_1500;
break;
default:
/* unknown remote */
dev->map_name = RC_MAP_BUDGET_CI_OLD;
break;
}
if (!budget_ci->ir.full_rc5)
dev->scanmask = 0xff;
error = rc_register_device(dev);
if (error) {
printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
rc_free_device(dev);
return error;
}
budget_ci->ir.dev = dev;
tasklet_init(&budget_ci->ir.msp430_irq_tasklet, msp430_ir_interrupt,
(unsigned long) budget_ci);
SAA7146_IER_ENABLE(saa, MASK_06);
saa7146_setgpio(saa, 3, SAA7146_GPIO_IRQHI);
return 0;
}
static void msp430_ir_deinit(struct budget_ci *budget_ci)
{
struct saa7146_dev *saa = budget_ci->budget.dev;
SAA7146_IER_DISABLE(saa, MASK_06);
saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT);
tasklet_kill(&budget_ci->ir.msp430_irq_tasklet);
rc_unregister_device(budget_ci->ir.dev);
}
static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
{
struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
if (slot != 0)
return -EINVAL;
return ttpci_budget_debiread(&budget_ci->budget, DEBICICAM,
DEBIADDR_ATTR | (address & 0xfff), 1, 1, 0);
}
static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
{
struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
if (slot != 0)
return -EINVAL;
return ttpci_budget_debiwrite(&budget_ci->budget, DEBICICAM,
DEBIADDR_ATTR | (address & 0xfff), 1, value, 1, 0);
}
static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
{
struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
if (slot != 0)
return -EINVAL;
return ttpci_budget_debiread(&budget_ci->budget, DEBICICAM,
DEBIADDR_IO | (address & 3), 1, 1, 0);
}
static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
{
struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
if (slot != 0)
return -EINVAL;
return ttpci_budget_debiwrite(&budget_ci->budget, DEBICICAM,
DEBIADDR_IO | (address & 3), 1, value, 1, 0);
}
static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
struct saa7146_dev *saa = budget_ci->budget.dev;
if (slot != 0)
return -EINVAL;
if (budget_ci->ci_irq) {
// trigger on RISING edge during reset so we know when READY is re-asserted
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI);
}
budget_ci->slot_status = SLOTSTATUS_RESET;
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 0, 1, 0);
msleep(1);
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1,
CICONTROL_RESET, 1, 0);
saa7146_setgpio(saa, 1, SAA7146_GPIO_OUTHI);
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
return 0;
}
static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
struct saa7146_dev *saa = budget_ci->budget.dev;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(saa, 1, SAA7146_GPIO_OUTHI);
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB);
return 0;
}
static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
{
struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
struct saa7146_dev *saa = budget_ci->budget.dev;
int tmp;
if (slot != 0)
return -EINVAL;
saa7146_setgpio(saa, 1, SAA7146_GPIO_OUTLO);
tmp = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0);
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1,
tmp | CICONTROL_ENABLETS, 1, 0);
ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTA);
return 0;
}
static void ciintf_interrupt(unsigned long data)
{
struct budget_ci *budget_ci = (struct budget_ci *) data;
struct saa7146_dev *saa = budget_ci->budget.dev;
unsigned int flags;
// ensure we don't get spurious IRQs during initialisation
if (!budget_ci->budget.ci_present)
return;
// read the CAM status
flags = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0);
if (flags & CICONTROL_CAMDETECT) {
// GPIO should be set to trigger on falling edge if a CAM is present
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQLO);
if (budget_ci->slot_status & SLOTSTATUS_NONE) {
// CAM insertion IRQ
budget_ci->slot_status = SLOTSTATUS_PRESENT;
dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0,
DVB_CA_EN50221_CAMCHANGE_INSERTED);
} else if (budget_ci->slot_status & SLOTSTATUS_RESET) {
// CAM ready (reset completed)
budget_ci->slot_status = SLOTSTATUS_READY;
dvb_ca_en50221_camready_irq(&budget_ci->ca, 0);
} else if (budget_ci->slot_status & SLOTSTATUS_READY) {
// FR/DA IRQ
dvb_ca_en50221_frda_irq(&budget_ci->ca, 0);
}
} else {
// trigger on rising edge if a CAM is not present - when a CAM is inserted, we
// only want to get the IRQ when it sets READY. If we trigger on the falling edge,
// the CAM might not actually be ready yet.
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI);
// generate a CAM removal IRQ if we haven't already
if (budget_ci->slot_status & SLOTSTATUS_OCCUPIED) {
// CAM removal IRQ
budget_ci->slot_status = SLOTSTATUS_NONE;
dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0,
DVB_CA_EN50221_CAMCHANGE_REMOVED);
}
}
}
static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
unsigned int flags;
// ensure we don't get spurious IRQs during initialisation
if (!budget_ci->budget.ci_present)
return -EINVAL;
// read the CAM status
flags = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0);
if (flags & CICONTROL_CAMDETECT) {
// mark it as present if it wasn't before
if (budget_ci->slot_status & SLOTSTATUS_NONE) {
budget_ci->slot_status = SLOTSTATUS_PRESENT;
}
// during a RESET, we check if we can read from IO memory to see when CAM is ready
if (budget_ci->slot_status & SLOTSTATUS_RESET) {
if (ciintf_read_attribute_mem(ca, slot, 0) == 0x1d) {
budget_ci->slot_status = SLOTSTATUS_READY;
}
}
} else {
budget_ci->slot_status = SLOTSTATUS_NONE;
}
if (budget_ci->slot_status != SLOTSTATUS_NONE) {
if (budget_ci->slot_status & SLOTSTATUS_READY) {
return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
}
return DVB_CA_EN50221_POLL_CAM_PRESENT;
}
return 0;
}
static int ciintf_init(struct budget_ci *budget_ci)
{
struct saa7146_dev *saa = budget_ci->budget.dev;
int flags;
int result;
int ci_version;
int ca_flags;
memset(&budget_ci->ca, 0, sizeof(struct dvb_ca_en50221));
// enable DEBI pins
saa7146_write(saa, MC1, MASK_27 | MASK_11);
// test if it is there
ci_version = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CIVERSION, 1, 1, 0);
if ((ci_version & 0xa0) != 0xa0) {
result = -ENODEV;
goto error;
}
// determine whether a CAM is present or not
flags = ttpci_budget_debiread(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 1, 0);
budget_ci->slot_status = SLOTSTATUS_NONE;
if (flags & CICONTROL_CAMDETECT)
budget_ci->slot_status = SLOTSTATUS_PRESENT;
// version 0xa2 of the CI firmware doesn't generate interrupts
if (ci_version == 0xa2) {
ca_flags = 0;
budget_ci->ci_irq = 0;
} else {
ca_flags = DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE |
DVB_CA_EN50221_FLAG_IRQ_FR |
DVB_CA_EN50221_FLAG_IRQ_DA;
budget_ci->ci_irq = 1;
}
// register CI interface
budget_ci->ca.owner = THIS_MODULE;
budget_ci->ca.read_attribute_mem = ciintf_read_attribute_mem;
budget_ci->ca.write_attribute_mem = ciintf_write_attribute_mem;
budget_ci->ca.read_cam_control = ciintf_read_cam_control;
budget_ci->ca.write_cam_control = ciintf_write_cam_control;
budget_ci->ca.slot_reset = ciintf_slot_reset;
budget_ci->ca.slot_shutdown = ciintf_slot_shutdown;
budget_ci->ca.slot_ts_enable = ciintf_slot_ts_enable;
budget_ci->ca.poll_slot_status = ciintf_poll_slot_status;
budget_ci->ca.data = budget_ci;
if ((result = dvb_ca_en50221_init(&budget_ci->budget.dvb_adapter,
&budget_ci->ca,
ca_flags, 1)) != 0) {
printk("budget_ci: CI interface detected, but initialisation failed.\n");
goto error;
}
// Setup CI slot IRQ
if (budget_ci->ci_irq) {
tasklet_init(&budget_ci->ciintf_irq_tasklet, ciintf_interrupt, (unsigned long) budget_ci);
if (budget_ci->slot_status != SLOTSTATUS_NONE) {
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQLO);
} else {
saa7146_setgpio(saa, 0, SAA7146_GPIO_IRQHI);
}
SAA7146_IER_ENABLE(saa, MASK_03);
}
// enable interface
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1,
CICONTROL_RESET, 1, 0);
// success!
printk("budget_ci: CI interface initialised\n");
budget_ci->budget.ci_present = 1;
// forge a fake CI IRQ so the CAM state is setup correctly
if (budget_ci->ci_irq) {
flags = DVB_CA_EN50221_CAMCHANGE_REMOVED;
if (budget_ci->slot_status != SLOTSTATUS_NONE)
flags = DVB_CA_EN50221_CAMCHANGE_INSERTED;
dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0, flags);
}
return 0;
error:
saa7146_write(saa, MC1, MASK_27);
return result;
}
static void ciintf_deinit(struct budget_ci *budget_ci)
{
struct saa7146_dev *saa = budget_ci->budget.dev;
// disable CI interrupts
if (budget_ci->ci_irq) {
SAA7146_IER_DISABLE(saa, MASK_03);
saa7146_setgpio(saa, 0, SAA7146_GPIO_INPUT);
tasklet_kill(&budget_ci->ciintf_irq_tasklet);
}
// reset interface
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1, 0, 1, 0);
msleep(1);
ttpci_budget_debiwrite(&budget_ci->budget, DEBICICTL, DEBIADDR_CICONTROL, 1,
CICONTROL_RESET, 1, 0);
// disable TS data stream to CI interface
saa7146_setgpio(saa, 1, SAA7146_GPIO_INPUT);
// release the CA device
dvb_ca_en50221_release(&budget_ci->ca);
// disable DEBI pins
saa7146_write(saa, MC1, MASK_27);
}
static void budget_ci_irq(struct saa7146_dev *dev, u32 * isr)
{
struct budget_ci *budget_ci = (struct budget_ci *) dev->ext_priv;
dprintk(8, "dev: %p, budget_ci: %p\n", dev, budget_ci);
if (*isr & MASK_06)
tasklet_schedule(&budget_ci->ir.msp430_irq_tasklet);
if (*isr & MASK_10)
ttpci_budget_irq10_handler(dev, isr);
if ((*isr & MASK_03) && (budget_ci->budget.ci_present) && (budget_ci->ci_irq))
tasklet_schedule(&budget_ci->ciintf_irq_tasklet);
}
static u8 philips_su1278_tt_inittab[] = {
0x01, 0x0f,
0x02, 0x30,
0x03, 0x00,
0x04, 0x5b,
0x05, 0x85,
0x06, 0x02,
0x07, 0x00,
0x08, 0x02,
0x09, 0x00,
0x0C, 0x01,
0x0D, 0x81,
0x0E, 0x44,
0x0f, 0x14,
0x10, 0x3c,
0x11, 0x84,
0x12, 0xda,
0x13, 0x97,
0x14, 0x95,
0x15, 0xc9,
0x16, 0x19,
0x17, 0x8c,
0x18, 0x59,
0x19, 0xf8,
0x1a, 0xfe,
0x1c, 0x7f,
0x1d, 0x00,
0x1e, 0x00,
0x1f, 0x50,
0x20, 0x00,
0x21, 0x00,
0x22, 0x00,
0x23, 0x00,
0x28, 0x00,
0x29, 0x28,
0x2a, 0x14,
0x2b, 0x0f,
0x2c, 0x09,
0x2d, 0x09,
0x31, 0x1f,
0x32, 0x19,
0x33, 0xfc,
0x34, 0x93,
0xff, 0xff
};
static int philips_su1278_tt_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio)
{
stv0299_writereg(fe, 0x0e, 0x44);
if (srate >= 10000000) {
stv0299_writereg(fe, 0x13, 0x97);
stv0299_writereg(fe, 0x14, 0x95);
stv0299_writereg(fe, 0x15, 0xc9);
stv0299_writereg(fe, 0x17, 0x8c);
stv0299_writereg(fe, 0x1a, 0xfe);
stv0299_writereg(fe, 0x1c, 0x7f);
stv0299_writereg(fe, 0x2d, 0x09);
} else {
stv0299_writereg(fe, 0x13, 0x99);
stv0299_writereg(fe, 0x14, 0x8d);
stv0299_writereg(fe, 0x15, 0xce);
stv0299_writereg(fe, 0x17, 0x43);
stv0299_writereg(fe, 0x1a, 0x1d);
stv0299_writereg(fe, 0x1c, 0x12);
stv0299_writereg(fe, 0x2d, 0x05);
}
stv0299_writereg(fe, 0x0e, 0x23);
stv0299_writereg(fe, 0x0f, 0x94);
stv0299_writereg(fe, 0x10, 0x39);
stv0299_writereg(fe, 0x15, 0xc9);
stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
stv0299_writereg(fe, 0x21, (ratio) & 0xf0);
return 0;
}
static int philips_su1278_tt_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
u32 div;
u8 buf[4];
struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
if ((p->frequency < 950000) || (p->frequency > 2150000))
return -EINVAL;
div = (p->frequency + (500 - 1)) / 500; /* round correctly */
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x80 | ((div & 0x18000) >> 10) | 2;
buf[3] = 0x20;
if (p->symbol_rate < 4000000)
buf[3] |= 1;
if (p->frequency < 1250000)
buf[3] |= 0;
else if (p->frequency < 1550000)
buf[3] |= 0x40;
else if (p->frequency < 2050000)
buf[3] |= 0x80;
else if (p->frequency < 2150000)
buf[3] |= 0xC0;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static struct stv0299_config philips_su1278_tt_config = {
.demod_address = 0x68,
.inittab = philips_su1278_tt_inittab,
.mclk = 64000000UL,
.invert = 0,
.skip_reinit = 1,
.lock_output = STV0299_LOCKOUTPUT_1,
.volt13_op0_op1 = STV0299_VOLT13_OP1,
.min_delay_ms = 50,
.set_symbol_rate = philips_su1278_tt_set_symbol_rate,
};
static int philips_tdm1316l_tuner_init(struct dvb_frontend *fe)
{
struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
static u8 td1316_init[] = { 0x0b, 0xf5, 0x85, 0xab };
static u8 disable_mc44BC374c[] = { 0x1d, 0x74, 0xa0, 0x68 };
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,.flags = 0,.buf = td1316_init,.len =
sizeof(td1316_init) };
// setup PLL configuration
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
// disable the mc44BC374c (do not check for errors)
tuner_msg.addr = 0x65;
tuner_msg.buf = disable_mc44BC374c;
tuner_msg.len = sizeof(disable_mc44BC374c);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1);
}
return 0;
}
static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
u8 tuner_buf[4];
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,.flags = 0,.buf = tuner_buf,.len = sizeof(tuner_buf) };
int tuner_frequency = 0;
u8 band, cp, filter;
// determine charge pump
tuner_frequency = p->frequency + 36130000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000)
cp = 3;
else if (tuner_frequency < 160000000)
cp = 5;
else if (tuner_frequency < 200000000)
cp = 6;
else if (tuner_frequency < 290000000)
cp = 3;
else if (tuner_frequency < 420000000)
cp = 5;
else if (tuner_frequency < 480000000)
cp = 6;
else if (tuner_frequency < 620000000)
cp = 3;
else if (tuner_frequency < 830000000)
cp = 5;
else if (tuner_frequency < 895000000)
cp = 7;
else
return -EINVAL;
// determine band
if (p->frequency < 49000000)
return -EINVAL;
else if (p->frequency < 159000000)
band = 1;
else if (p->frequency < 444000000)
band = 2;
else if (p->frequency < 861000000)
band = 4;
else
return -EINVAL;
// setup PLL filter and TDA9889
switch (p->bandwidth_hz) {
case 6000000:
tda1004x_writereg(fe, 0x0C, 0x14);
filter = 0;
break;
case 7000000:
tda1004x_writereg(fe, 0x0C, 0x80);
filter = 0;
break;
case 8000000:
tda1004x_writereg(fe, 0x0C, 0x14);
filter = 1;
break;
default:
return -EINVAL;
}
// calculate divisor
// ((36130000+((1000000/6)/2)) + Finput)/(1000000/6)
tuner_frequency = (((p->frequency / 1000) * 6) + 217280) / 1000;
// setup tuner buffer
tuner_buf[0] = tuner_frequency >> 8;
tuner_buf[1] = tuner_frequency & 0xff;
tuner_buf[2] = 0xca;
tuner_buf[3] = (cp << 5) | (filter << 3) | band;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
return 0;
}
static int philips_tdm1316l_request_firmware(struct dvb_frontend *fe,
const struct firmware **fw, char *name)
{
struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
return request_firmware(fw, name, &budget_ci->budget.dev->pci->dev);
}
static struct tda1004x_config philips_tdm1316l_config = {
.demod_address = 0x8,
.invert = 0,
.invert_oclk = 0,
.xtal_freq = TDA10046_XTAL_4M,
.agc_config = TDA10046_AGC_DEFAULT,
.if_freq = TDA10046_FREQ_3617,
.request_firmware = philips_tdm1316l_request_firmware,
};
static struct tda1004x_config philips_tdm1316l_config_invert = {
.demod_address = 0x8,
.invert = 1,
.invert_oclk = 0,
.xtal_freq = TDA10046_XTAL_4M,
.agc_config = TDA10046_AGC_DEFAULT,
.if_freq = TDA10046_FREQ_3617,
.request_firmware = philips_tdm1316l_request_firmware,
};
static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
u8 tuner_buf[5];
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,
.flags = 0,
.buf = tuner_buf,
.len = sizeof(tuner_buf) };
int tuner_frequency = 0;
u8 band, cp, filter;
// determine charge pump
tuner_frequency = p->frequency + 36125000;
if (tuner_frequency < 87000000)
return -EINVAL;
else if (tuner_frequency < 130000000) {
cp = 3;
band = 1;
} else if (tuner_frequency < 160000000) {
cp = 5;
band = 1;
} else if (tuner_frequency < 200000000) {
cp = 6;
band = 1;
} else if (tuner_frequency < 290000000) {
cp = 3;
band = 2;
} else if (tuner_frequency < 420000000) {
cp = 5;
band = 2;
} else if (tuner_frequency < 480000000) {
cp = 6;
band = 2;
} else if (tuner_frequency < 620000000) {
cp = 3;
band = 4;
} else if (tuner_frequency < 830000000) {
cp = 5;
band = 4;
} else if (tuner_frequency < 895000000) {
cp = 7;
band = 4;
} else
return -EINVAL;
// assume PLL filter should always be 8MHz for the moment.
filter = 1;
// calculate divisor
tuner_frequency = (p->frequency + 36125000 + (62500/2)) / 62500;
// setup tuner buffer
tuner_buf[0] = tuner_frequency >> 8;
tuner_buf[1] = tuner_frequency & 0xff;
tuner_buf[2] = 0xc8;
tuner_buf[3] = (cp << 5) | (filter << 3) | band;
tuner_buf[4] = 0x80;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(50);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&budget_ci->budget.i2c_adap, &tuner_msg, 1) != 1)
return -EIO;
msleep(1);
return 0;
}
static u8 dvbc_philips_tdm1316l_inittab[] = {
0x80, 0x01,
0x80, 0x00,
0x81, 0x01,
0x81, 0x00,
0x00, 0x09,
0x01, 0x69,
0x03, 0x00,
0x04, 0x00,
0x07, 0x00,
0x08, 0x00,
0x20, 0x00,
0x21, 0x40,
0x22, 0x00,
0x23, 0x00,
0x24, 0x40,
0x25, 0x88,
0x30, 0xff,
0x31, 0x00,
0x32, 0xff,
0x33, 0x00,
0x34, 0x50,
0x35, 0x7f,
0x36, 0x00,
0x37, 0x20,
0x38, 0x00,
0x40, 0x1c,
0x41, 0xff,
0x42, 0x29,
0x43, 0x20,
0x44, 0xff,
0x45, 0x00,
0x46, 0x00,
0x49, 0x04,
0x4a, 0x00,
0x4b, 0x7b,
0x52, 0x30,
0x55, 0xae,
0x56, 0x47,
0x57, 0xe1,
0x58, 0x3a,
0x5a, 0x1e,
0x5b, 0x34,
0x60, 0x00,
0x63, 0x00,
0x64, 0x00,
0x65, 0x00,
0x66, 0x00,
0x67, 0x00,
0x68, 0x00,
0x69, 0x00,
0x6a, 0x02,
0x6b, 0x00,
0x70, 0xff,
0x71, 0x00,
0x72, 0x00,
0x73, 0x00,
0x74, 0x0c,
0x80, 0x00,
0x81, 0x00,
0x82, 0x00,
0x83, 0x00,
0x84, 0x04,
0x85, 0x80,
0x86, 0x24,
0x87, 0x78,
0x88, 0x10,
0x89, 0x00,
0x90, 0x01,
0x91, 0x01,
0xa0, 0x04,
0xa1, 0x00,
0xa2, 0x00,
0xb0, 0x91,
0xb1, 0x0b,
0xc0, 0x53,
0xc1, 0x70,
0xc2, 0x12,
0xd0, 0x00,
0xd1, 0x00,
0xd2, 0x00,
0xd3, 0x00,
0xd4, 0x00,
0xd5, 0x00,
0xde, 0x00,
0xdf, 0x00,
0x61, 0x38,
0x62, 0x0a,
0x53, 0x13,
0x59, 0x08,
0xff, 0xff,
};
static struct stv0297_config dvbc_philips_tdm1316l_config = {
.demod_address = 0x1c,
.inittab = dvbc_philips_tdm1316l_inittab,
.invert = 0,
.stop_during_read = 1,
};
static struct tda10023_config tda10023_config = {
.demod_address = 0xc,
.invert = 0,
.xtal = 16000000,
.pll_m = 11,
.pll_p = 3,
.pll_n = 1,
.deltaf = 0xa511,
};
static struct tda827x_config tda827x_config = {
.config = 0,
};
/* TT S2-3200 DVB-S (STB0899) Inittab */
static const struct stb0899_s1_reg tt3200_stb0899_s1_init_1[] = {
{ STB0899_DEV_ID , 0x81 },
{ STB0899_DISCNTRL1 , 0x32 },
{ STB0899_DISCNTRL2 , 0x80 },
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x8c },
{ STB0899_DISF22RX , 0x9a },
{ STB0899_SYSREG , 0x0b },
{ STB0899_ACRPRESC , 0x11 },
{ STB0899_ACRDIV1 , 0x0a },
{ STB0899_ACRDIV2 , 0x05 },
{ STB0899_DACR1 , 0x00 },
{ STB0899_DACR2 , 0x00 },
{ STB0899_OUTCFG , 0x00 },
{ STB0899_MODECFG , 0x00 },
{ STB0899_IRQSTATUS_3 , 0x30 },
{ STB0899_IRQSTATUS_2 , 0x00 },
{ STB0899_IRQSTATUS_1 , 0x00 },
{ STB0899_IRQSTATUS_0 , 0x00 },
{ STB0899_IRQMSK_3 , 0xf3 },
{ STB0899_IRQMSK_2 , 0xfc },
{ STB0899_IRQMSK_1 , 0xff },
{ STB0899_IRQMSK_0 , 0xff },
{ STB0899_IRQCFG , 0x00 },
{ STB0899_I2CCFG , 0x88 },
{ STB0899_I2CRPT , 0x48 }, /* 12k Pullup, Repeater=16, Stop=disabled */
{ STB0899_IOPVALUE5 , 0x00 },
{ STB0899_IOPVALUE4 , 0x20 },
{ STB0899_IOPVALUE3 , 0xc9 },
{ STB0899_IOPVALUE2 , 0x90 },
{ STB0899_IOPVALUE1 , 0x40 },
{ STB0899_IOPVALUE0 , 0x00 },
{ STB0899_GPIO00CFG , 0x82 },
{ STB0899_GPIO01CFG , 0x82 },
{ STB0899_GPIO02CFG , 0x82 },
{ STB0899_GPIO03CFG , 0x82 },
{ STB0899_GPIO04CFG , 0x82 },
{ STB0899_GPIO05CFG , 0x82 },
{ STB0899_GPIO06CFG , 0x82 },
{ STB0899_GPIO07CFG , 0x82 },
{ STB0899_GPIO08CFG , 0x82 },
{ STB0899_GPIO09CFG , 0x82 },
{ STB0899_GPIO10CFG , 0x82 },
{ STB0899_GPIO11CFG , 0x82 },
{ STB0899_GPIO12CFG , 0x82 },
{ STB0899_GPIO13CFG , 0x82 },
{ STB0899_GPIO14CFG , 0x82 },
{ STB0899_GPIO15CFG , 0x82 },
{ STB0899_GPIO16CFG , 0x82 },
{ STB0899_GPIO17CFG , 0x82 },
{ STB0899_GPIO18CFG , 0x82 },
{ STB0899_GPIO19CFG , 0x82 },
{ STB0899_GPIO20CFG , 0x82 },
{ STB0899_SDATCFG , 0xb8 },
{ STB0899_SCLTCFG , 0xba },
{ STB0899_AGCRFCFG , 0x1c }, /* 0x11 */
{ STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
{ STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
{ STB0899_DIRCLKCFG , 0x82 },
{ STB0899_CLKOUT27CFG , 0x7e },
{ STB0899_STDBYCFG , 0x82 },
{ STB0899_CS0CFG , 0x82 },
{ STB0899_CS1CFG , 0x82 },
{ STB0899_DISEQCOCFG , 0x20 },
{ STB0899_GPIO32CFG , 0x82 },
{ STB0899_GPIO33CFG , 0x82 },
{ STB0899_GPIO34CFG , 0x82 },
{ STB0899_GPIO35CFG , 0x82 },
{ STB0899_GPIO36CFG , 0x82 },
{ STB0899_GPIO37CFG , 0x82 },
{ STB0899_GPIO38CFG , 0x82 },
{ STB0899_GPIO39CFG , 0x82 },
{ STB0899_NCOARSE , 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
{ STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
{ STB0899_FILTCTRL , 0x00 },
{ STB0899_SYSCTRL , 0x00 },
{ STB0899_STOPCLK1 , 0x20 },
{ STB0899_STOPCLK2 , 0x00 },
{ STB0899_INTBUFSTATUS , 0x00 },
{ STB0899_INTBUFCTRL , 0x0a },
{ 0xffff , 0xff },
};
static const struct stb0899_s1_reg tt3200_stb0899_s1_init_3[] = {
{ STB0899_DEMOD , 0x00 },
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x41 },
{ STB0899_AGC1REF , 0x10 },
{ STB0899_RTC , 0x7a },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xc7 },
{ STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xdd },
{ STB0899_LDT2 , 0xc9 },
{ STB0899_EQUALREF , 0xb4 },
{ STB0899_TMGRAMP , 0x10 },
{ STB0899_TMGTHD , 0x30 },
{ STB0899_IDCCOMP , 0xfb },
{ STB0899_QDCCOMP , 0x03 },
{ STB0899_POWERI , 0x3b },
{ STB0899_POWERQ , 0x3d },
{ STB0899_RCOMP , 0x81 },
{ STB0899_AGCIQIN , 0x80 },
{ STB0899_AGC2I1 , 0x04 },
{ STB0899_AGC2I2 , 0xf5 },
{ STB0899_TLIR , 0x25 },
{ STB0899_RTF , 0x80 },
{ STB0899_DSTATUS , 0x00 },
{ STB0899_LDI , 0xca },
{ STB0899_CFRM , 0xf1 },
{ STB0899_CFRL , 0xf3 },
{ STB0899_NIRM , 0x2a },
{ STB0899_NIRL , 0x05 },
{ STB0899_ISYMB , 0x17 },
{ STB0899_QSYMB , 0xfa },
{ STB0899_SFRH , 0x2f },
{ STB0899_SFRM , 0x68 },
{ STB0899_SFRL , 0x40 },
{ STB0899_SFRUPH , 0x2f },
{ STB0899_SFRUPM , 0x68 },
{ STB0899_SFRUPL , 0x40 },
{ STB0899_EQUAI1 , 0xfd },
{ STB0899_EQUAQ1 , 0x04 },
{ STB0899_EQUAI2 , 0x0f },
{ STB0899_EQUAQ2 , 0xff },
{ STB0899_EQUAI3 , 0xdf },
{ STB0899_EQUAQ3 , 0xfa },
{ STB0899_EQUAI4 , 0x37 },
{ STB0899_EQUAQ4 , 0x0d },
{ STB0899_EQUAI5 , 0xbd },
{ STB0899_EQUAQ5 , 0xf7 },
{ STB0899_DSTATUS2 , 0x00 },
{ STB0899_VSTATUS , 0x00 },
{ STB0899_VERROR , 0xff },
{ STB0899_IQSWAP , 0x2a },
{ STB0899_ECNT1M , 0x00 },
{ STB0899_ECNT1L , 0x00 },
{ STB0899_ECNT2M , 0x00 },
{ STB0899_ECNT2L , 0x00 },
{ STB0899_ECNT3M , 0x00 },
{ STB0899_ECNT3L , 0x00 },
{ STB0899_FECAUTO1 , 0x06 },
{ STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xf0 },
{ STB0899_VTH23 , 0xa0 },
{ STB0899_VTH34 , 0x78 },
{ STB0899_VTH56 , 0x4e },
{ STB0899_VTH67 , 0x48 },
{ STB0899_VTH78 , 0x38 },
{ STB0899_PRVIT , 0xff },
{ STB0899_VITSYNC , 0x19 },
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x40 },
{ STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x0c },
{ STB0899_TSOUT , 0x4d }, /* 0x0d for CAM */
{ STB0899_RSSYNCDEL , 0x00 },
{ STB0899_TSINHDELH , 0x02 },
{ STB0899_TSINHDELM , 0x00 },
{ STB0899_TSINHDELL , 0x00 },
{ STB0899_TSLLSTKM , 0x00 },
{ STB0899_TSLLSTKL , 0x00 },
{ STB0899_TSULSTKM , 0x00 },
{ STB0899_TSULSTKL , 0xab },
{ STB0899_PCKLENUL , 0x00 },
{ STB0899_PCKLENLL , 0xcc },
{ STB0899_RSPCKLEN , 0xcc },
{ STB0899_TSSTATUS , 0x80 },
{ STB0899_ERRCTRL1 , 0xb6 },
{ STB0899_ERRCTRL2 , 0x96 },
{ STB0899_ERRCTRL3 , 0x89 },
{ STB0899_DMONMSK1 , 0x27 },
{ STB0899_DMONMSK0 , 0x03 },
{ STB0899_DEMAPVIT , 0x5c },
{ STB0899_PLPARM , 0x1f },
{ STB0899_PDELCTRL , 0x48 },
{ STB0899_PDELCTRL2 , 0x00 },
{ STB0899_BBHCTRL1 , 0x00 },
{ STB0899_BBHCTRL2 , 0x00 },
{ STB0899_HYSTTHRESH , 0x77 },
{ STB0899_MATCSTM , 0x00 },
{ STB0899_MATCSTL , 0x00 },
{ STB0899_UPLCSTM , 0x00 },
{ STB0899_UPLCSTL , 0x00 },
{ STB0899_DFLCSTM , 0x00 },
{ STB0899_DFLCSTL , 0x00 },
{ STB0899_SYNCCST , 0x00 },
{ STB0899_SYNCDCSTM , 0x00 },
{ STB0899_SYNCDCSTL , 0x00 },
{ STB0899_ISI_ENTRY , 0x00 },
{ STB0899_ISI_BIT_EN , 0x00 },
{ STB0899_MATSTRM , 0x00 },
{ STB0899_MATSTRL , 0x00 },
{ STB0899_UPLSTRM , 0x00 },
{ STB0899_UPLSTRL , 0x00 },
{ STB0899_DFLSTRM , 0x00 },
{ STB0899_DFLSTRL , 0x00 },
{ STB0899_SYNCSTR , 0x00 },
{ STB0899_SYNCDSTRM , 0x00 },
{ STB0899_SYNCDSTRL , 0x00 },
{ STB0899_CFGPDELSTATUS1 , 0x10 },
{ STB0899_CFGPDELSTATUS2 , 0x00 },
{ STB0899_BBFERRORM , 0x00 },
{ STB0899_BBFERRORL , 0x00 },
{ STB0899_UPKTERRORM , 0x00 },
{ STB0899_UPKTERRORL , 0x00 },
{ 0xffff , 0xff },
};
static struct stb0899_config tt3200_config = {
.init_dev = tt3200_stb0899_s1_init_1,
.init_s2_demod = stb0899_s2_init_2,
.init_s1_demod = tt3200_stb0899_s1_init_3,
.init_s2_fec = stb0899_s2_init_4,
.init_tst = stb0899_s1_init_5,
.postproc = NULL,
.demod_address = 0x68,
.xtal_freq = 27000000,
.inversion = IQ_SWAP_ON,
.lo_clk = 76500000,
.hi_clk = 99000000,
.esno_ave = STB0899_DVBS2_ESNO_AVE,
.esno_quant = STB0899_DVBS2_ESNO_QUANT,
.avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
.avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
.miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
.uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
.uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
.uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
.sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
.btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
.btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
.crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
.ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
.tuner_get_frequency = stb6100_get_frequency,
.tuner_set_frequency = stb6100_set_frequency,
.tuner_set_bandwidth = stb6100_set_bandwidth,
.tuner_get_bandwidth = stb6100_get_bandwidth,
.tuner_set_rfsiggain = NULL
};
static struct stb6100_config tt3200_stb6100_config = {
.tuner_address = 0x60,
.refclock = 27000000,
};
static void frontend_init(struct budget_ci *budget_ci)
{
switch (budget_ci->budget.dev->pci->subsystem_device) {
case 0x100c: // Hauppauge/TT Nova-CI budget (stv0299/ALPS BSRU6(tsa5059))
budget_ci->budget.dvb_frontend =
dvb_attach(stv0299_attach, &alps_bsru6_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget_ci->budget.dvb_frontend->tuner_priv = &budget_ci->budget.i2c_adap;
break;
}
break;
case 0x100f: // Hauppauge/TT Nova-CI budget (stv0299b/Philips su1278(tsa5059))
budget_ci->budget.dvb_frontend =
dvb_attach(stv0299_attach, &philips_su1278_tt_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = philips_su1278_tt_tuner_set_params;
break;
}
break;
case 0x1010: // TT DVB-C CI budget (stv0297/Philips tdm1316l(tda6651tt))
budget_ci->tuner_pll_address = 0x61;
budget_ci->budget.dvb_frontend =
dvb_attach(stv0297_attach, &dvbc_philips_tdm1316l_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = dvbc_philips_tdm1316l_tuner_set_params;
break;
}
break;
case 0x1011: // Hauppauge/TT Nova-T budget (tda10045/Philips tdm1316l(tda6651tt) + TDA9889)
budget_ci->tuner_pll_address = 0x63;
budget_ci->budget.dvb_frontend =
dvb_attach(tda10045_attach, &philips_tdm1316l_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.init = philips_tdm1316l_tuner_init;
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = philips_tdm1316l_tuner_set_params;
break;
}
break;
case 0x1012: // TT DVB-T CI budget (tda10046/Philips tdm1316l(tda6651tt))
budget_ci->tuner_pll_address = 0x60;
budget_ci->budget.dvb_frontend =
dvb_attach(tda10046_attach, &philips_tdm1316l_config_invert, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.init = philips_tdm1316l_tuner_init;
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = philips_tdm1316l_tuner_set_params;
break;
}
break;
case 0x1017: // TT S-1500 PCI
budget_ci->budget.dvb_frontend = dvb_attach(stv0299_attach, &alps_bsbe1_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
budget_ci->budget.dvb_frontend->ops.tuner_ops.set_params = alps_bsbe1_tuner_set_params;
budget_ci->budget.dvb_frontend->tuner_priv = &budget_ci->budget.i2c_adap;
budget_ci->budget.dvb_frontend->ops.dishnetwork_send_legacy_command = NULL;
if (dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, LNBP21_LLC, 0) == NULL) {
printk("%s: No LNBP21 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
break;
case 0x101a: /* TT Budget-C-1501 (philips tda10023/philips tda8274A) */
budget_ci->budget.dvb_frontend = dvb_attach(tda10023_attach, &tda10023_config, &budget_ci->budget.i2c_adap, 0x48);
if (budget_ci->budget.dvb_frontend) {
if (dvb_attach(tda827x_attach, budget_ci->budget.dvb_frontend, 0x61, &budget_ci->budget.i2c_adap, &tda827x_config) == NULL) {
printk(KERN_ERR "%s: No tda827x found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
break;
case 0x101b: /* TT S-1500B (BSBE1-D01A - STV0288/STB6000/LNBP21) */
budget_ci->budget.dvb_frontend = dvb_attach(stv0288_attach, &stv0288_bsbe1_d01a_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
if (dvb_attach(stb6000_attach, budget_ci->budget.dvb_frontend, 0x63, &budget_ci->budget.i2c_adap)) {
if (!dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, 0, 0)) {
printk(KERN_ERR "%s: No LNBP21 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
} else {
printk(KERN_ERR "%s: No STB6000 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
break;
case 0x1019: // TT S2-3200 PCI
/*
* NOTE! on some STB0899 versions, the internal PLL takes a longer time
* to settle, aka LOCK. On the older revisions of the chip, we don't see
* this, as a result on the newer chips the entire clock tree, will not
* be stable after a freshly POWER 'ed up situation.
* In this case, we should RESET the STB0899 (Active LOW) and wait for
* PLL stabilization.
*
* On the TT S2 3200 and clones, the STB0899 demodulator's RESETB is
* connected to the SAA7146 GPIO, GPIO2, Pin 142
*/
/* Reset Demodulator */
saa7146_setgpio(budget_ci->budget.dev, 2, SAA7146_GPIO_OUTLO);
/* Wait for everything to die */
msleep(50);
/* Pull it up out of Reset state */
saa7146_setgpio(budget_ci->budget.dev, 2, SAA7146_GPIO_OUTHI);
/* Wait for PLL to stabilize */
msleep(250);
/*
* PLL state should be stable now. Ideally, we should check
* for PLL LOCK status. But well, never mind!
*/
budget_ci->budget.dvb_frontend = dvb_attach(stb0899_attach, &tt3200_config, &budget_ci->budget.i2c_adap);
if (budget_ci->budget.dvb_frontend) {
if (dvb_attach(stb6100_attach, budget_ci->budget.dvb_frontend, &tt3200_stb6100_config, &budget_ci->budget.i2c_adap)) {
if (!dvb_attach(lnbp21_attach, budget_ci->budget.dvb_frontend, &budget_ci->budget.i2c_adap, 0, 0)) {
printk("%s: No LNBP21 found!\n", __func__);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
} else {
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
break;
}
if (budget_ci->budget.dvb_frontend == NULL) {
printk("budget-ci: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
budget_ci->budget.dev->pci->vendor,
budget_ci->budget.dev->pci->device,
budget_ci->budget.dev->pci->subsystem_vendor,
budget_ci->budget.dev->pci->subsystem_device);
} else {
if (dvb_register_frontend
(&budget_ci->budget.dvb_adapter, budget_ci->budget.dvb_frontend)) {
printk("budget-ci: Frontend registration failed!\n");
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
budget_ci->budget.dvb_frontend = NULL;
}
}
}
static int budget_ci_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
struct budget_ci *budget_ci;
int err;
budget_ci = kzalloc(sizeof(struct budget_ci), GFP_KERNEL);
if (!budget_ci) {
err = -ENOMEM;
goto out1;
}
dprintk(2, "budget_ci: %p\n", budget_ci);
dev->ext_priv = budget_ci;
err = ttpci_budget_init(&budget_ci->budget, dev, info, THIS_MODULE,
adapter_nr);
if (err)
goto out2;
err = msp430_ir_init(budget_ci);
if (err)
goto out3;
ciintf_init(budget_ci);
budget_ci->budget.dvb_adapter.priv = budget_ci;
frontend_init(budget_ci);
ttpci_budget_init_hooks(&budget_ci->budget);
return 0;
out3:
ttpci_budget_deinit(&budget_ci->budget);
out2:
kfree(budget_ci);
out1:
return err;
}
static int budget_ci_detach(struct saa7146_dev *dev)
{
struct budget_ci *budget_ci = (struct budget_ci *) dev->ext_priv;
struct saa7146_dev *saa = budget_ci->budget.dev;
int err;
if (budget_ci->budget.ci_present)
ciintf_deinit(budget_ci);
msp430_ir_deinit(budget_ci);
if (budget_ci->budget.dvb_frontend) {
dvb_unregister_frontend(budget_ci->budget.dvb_frontend);
dvb_frontend_detach(budget_ci->budget.dvb_frontend);
}
err = ttpci_budget_deinit(&budget_ci->budget);
// disable frontend and CI interface
saa7146_setgpio(saa, 2, SAA7146_GPIO_INPUT);
kfree(budget_ci);
return err;
}
static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(ttbs2, "TT-Budget/S-1500 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbci, "TT-Budget/WinTV-NOVA-CI PCI", BUDGET_TT_HW_DISEQC);
MAKE_BUDGET_INFO(ttbt2, "TT-Budget/WinTV-NOVA-T PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbtci, "TT-Budget-T-CI PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbcci, "TT-Budget-C-CI PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttc1501, "TT-Budget C-1501 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(tt3200, "TT-Budget S2-3200 PCI", BUDGET_TT);
MAKE_BUDGET_INFO(ttbs1500b, "TT-Budget S-1500B PCI", BUDGET_TT);
static struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100c),
MAKE_EXTENSION_PCI(ttbci, 0x13c2, 0x100f),
MAKE_EXTENSION_PCI(ttbcci, 0x13c2, 0x1010),
MAKE_EXTENSION_PCI(ttbt2, 0x13c2, 0x1011),
MAKE_EXTENSION_PCI(ttbtci, 0x13c2, 0x1012),
MAKE_EXTENSION_PCI(ttbs2, 0x13c2, 0x1017),
MAKE_EXTENSION_PCI(ttc1501, 0x13c2, 0x101a),
MAKE_EXTENSION_PCI(tt3200, 0x13c2, 0x1019),
MAKE_EXTENSION_PCI(ttbs1500b, 0x13c2, 0x101b),
{
.vendor = 0,
}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_extension budget_extension = {
.name = "budget_ci dvb",
.flags = SAA7146_USE_I2C_IRQ,
.module = THIS_MODULE,
.pci_tbl = &pci_tbl[0],
.attach = budget_ci_attach,
.detach = budget_ci_detach,
.irq_mask = MASK_03 | MASK_06 | MASK_10,
.irq_func = budget_ci_irq,
};
static int __init budget_ci_init(void)
{
return saa7146_register_extension(&budget_extension);
}
static void __exit budget_ci_exit(void)
{
saa7146_unregister_extension(&budget_extension);
}
module_init(budget_ci_init);
module_exit(budget_ci_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hunold, Jack Thomasson, Andrew de Quincey, others");
MODULE_DESCRIPTION("driver for the SAA7146 based so-called "
"budget PCI DVB cards w/ CI-module produced by "
"Siemens, Technotrend, Hauppauge");
| gpl-2.0 |
dh-harald/android_kernel_samsung_codina | drivers/gpu/drm/radeon/radeon_pm.c | 1616 | 26575 | /*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Rafał Miłecki <zajec5@gmail.com>
* Alex Deucher <alexdeucher@gmail.com>
*/
#include "drmP.h"
#include "radeon.h"
#include "avivod.h"
#include "atom.h"
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
#endif
#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
#define RADEON_WAIT_IDLE_TIMEOUT 200
static const char *radeon_pm_state_type_name[5] = {
"Default",
"Powersave",
"Battery",
"Balanced",
"Performance",
};
static void radeon_dynpm_idle_work_handler(struct work_struct *work);
static int radeon_debugfs_pm_init(struct radeon_device *rdev);
static bool radeon_pm_in_vbl(struct radeon_device *rdev);
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
#define ACPI_AC_CLASS "ac_adapter"
#ifdef CONFIG_ACPI
static int radeon_acpi_event(struct notifier_block *nb,
unsigned long val,
void *data)
{
struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
if (power_supply_is_system_supplied() > 0)
DRM_DEBUG_DRIVER("pm: AC\n");
else
DRM_DEBUG_DRIVER("pm: DC\n");
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
mutex_unlock(&rdev->pm.mutex);
}
}
}
return NOTIFY_OK;
}
#endif
static void radeon_pm_update_profile(struct radeon_device *rdev)
{
switch (rdev->pm.profile) {
case PM_PROFILE_DEFAULT:
rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
break;
case PM_PROFILE_AUTO:
if (power_supply_is_system_supplied() > 0) {
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
} else {
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
}
break;
case PM_PROFILE_LOW:
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
break;
case PM_PROFILE_MID:
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
break;
case PM_PROFILE_HIGH:
if (rdev->pm.active_crtc_count > 1)
rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
else
rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
break;
}
if (rdev->pm.active_crtc_count == 0) {
rdev->pm.requested_power_state_index =
rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
rdev->pm.requested_clock_mode_index =
rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
} else {
rdev->pm.requested_power_state_index =
rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
rdev->pm.requested_clock_mode_index =
rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
}
}
static void radeon_unmap_vram_bos(struct radeon_device *rdev)
{
struct radeon_bo *bo, *n;
if (list_empty(&rdev->gem.objects))
return;
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
ttm_bo_unmap_virtual(&bo->tbo);
}
}
static void radeon_sync_with_vblank(struct radeon_device *rdev)
{
if (rdev->pm.active_crtcs) {
rdev->pm.vblank_sync = false;
wait_event_timeout(
rdev->irq.vblank_queue, rdev->pm.vblank_sync,
msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
}
}
static void radeon_set_power_state(struct radeon_device *rdev)
{
u32 sclk, mclk;
bool misc_after = false;
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
if (radeon_gui_idle(rdev)) {
sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk;
if (sclk > rdev->pm.default_sclk)
sclk = rdev->pm.default_sclk;
mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk;
if (mclk > rdev->pm.default_mclk)
mclk = rdev->pm.default_mclk;
/* upvolt before raising clocks, downvolt after lowering clocks */
if (sclk < rdev->pm.current_sclk)
misc_after = true;
radeon_sync_with_vblank(rdev);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (!radeon_pm_in_vbl(rdev))
return;
}
radeon_pm_prepare(rdev);
if (!misc_after)
/* voltage, pcie lanes, etc.*/
radeon_pm_misc(rdev);
/* set engine clock */
if (sclk != rdev->pm.current_sclk) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_engine_clock(rdev, sclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_sclk = sclk;
DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
}
/* set memory clock */
if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_mclk = mclk;
DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
}
if (misc_after)
/* voltage, pcie lanes, etc.*/
radeon_pm_misc(rdev);
radeon_pm_finish(rdev);
rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
} else
DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
}
static void radeon_pm_set_clocks(struct radeon_device *rdev)
{
int i;
/* no need to take locks, etc. if nothing's going to change */
if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
(rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
return;
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
mutex_lock(&rdev->cp.mutex);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
if (rdev->irq.installed) {
/* wait for GPU idle */
rdev->pm.gui_idle = false;
rdev->irq.gui_idle = true;
radeon_irq_set(rdev);
wait_event_interruptible_timeout(
rdev->irq.idle_queue, rdev->pm.gui_idle,
msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT));
rdev->irq.gui_idle = false;
radeon_irq_set(rdev);
}
} else {
if (rdev->cp.ready) {
struct radeon_fence *fence;
radeon_ring_alloc(rdev, 64);
radeon_fence_create(rdev, &fence);
radeon_fence_emit(rdev, fence);
radeon_ring_commit(rdev);
radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
}
}
radeon_unmap_vram_bos(rdev);
if (rdev->irq.installed) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->pm.active_crtcs & (1 << i)) {
rdev->pm.req_vblank |= (1 << i);
drm_vblank_get(rdev->ddev, i);
}
}
}
radeon_set_power_state(rdev);
if (rdev->irq.installed) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->pm.req_vblank & (1 << i)) {
rdev->pm.req_vblank &= ~(1 << i);
drm_vblank_put(rdev->ddev, i);
}
}
}
/* update display watermarks based on new power state */
radeon_update_bandwidth_info(rdev);
if (rdev->pm.active_crtc_count)
radeon_bandwidth_update(rdev);
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
mutex_unlock(&rdev->cp.mutex);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
static void radeon_pm_print_states(struct radeon_device *rdev)
{
int i, j;
struct radeon_power_state *power_state;
struct radeon_pm_clock_info *clock_info;
DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
for (i = 0; i < rdev->pm.num_power_states; i++) {
power_state = &rdev->pm.power_state[i];
DRM_DEBUG_DRIVER("State %d: %s\n", i,
radeon_pm_state_type_name[power_state->type]);
if (i == rdev->pm.default_power_state_index)
DRM_DEBUG_DRIVER("\tDefault");
if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
DRM_DEBUG_DRIVER("\tSingle display only\n");
DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
for (j = 0; j < power_state->num_clock_modes; j++) {
clock_info = &(power_state->clock_info[j]);
if (rdev->flags & RADEON_IS_IGP)
DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
j,
clock_info->sclk * 10,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
else
DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
j,
clock_info->sclk * 10,
clock_info->mclk * 10,
clock_info->voltage.voltage,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
}
}
}
static ssize_t radeon_get_pm_profile(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int cp = rdev->pm.profile;
return snprintf(buf, PAGE_SIZE, "%s\n",
(cp == PM_PROFILE_AUTO) ? "auto" :
(cp == PM_PROFILE_LOW) ? "low" :
(cp == PM_PROFILE_MID) ? "mid" :
(cp == PM_PROFILE_HIGH) ? "high" : "default");
}
static ssize_t radeon_set_pm_profile(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (strncmp("default", buf, strlen("default")) == 0)
rdev->pm.profile = PM_PROFILE_DEFAULT;
else if (strncmp("auto", buf, strlen("auto")) == 0)
rdev->pm.profile = PM_PROFILE_AUTO;
else if (strncmp("low", buf, strlen("low")) == 0)
rdev->pm.profile = PM_PROFILE_LOW;
else if (strncmp("mid", buf, strlen("mid")) == 0)
rdev->pm.profile = PM_PROFILE_MID;
else if (strncmp("high", buf, strlen("high")) == 0)
rdev->pm.profile = PM_PROFILE_HIGH;
else {
count = -EINVAL;
goto fail;
}
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else
count = -EINVAL;
fail:
mutex_unlock(&rdev->pm.mutex);
return count;
}
static ssize_t radeon_get_pm_method(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int pm = rdev->pm.pm_method;
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
}
static ssize_t radeon_set_pm_method(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
mutex_lock(&rdev->pm.mutex);
rdev->pm.pm_method = PM_METHOD_DYNPM;
rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
mutex_unlock(&rdev->pm.mutex);
} else if (strncmp("profile", buf, strlen("profile")) == 0) {
mutex_lock(&rdev->pm.mutex);
/* disable dynpm */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.pm_method = PM_METHOD_PROFILE;
mutex_unlock(&rdev->pm.mutex);
cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
} else {
count = -EINVAL;
goto fail;
}
radeon_pm_compute_clocks(rdev);
fail:
return count;
}
static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int temp;
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
temp = rv6xx_get_temp(rdev);
break;
case THERMAL_TYPE_RV770:
temp = rv770_get_temp(rdev);
break;
case THERMAL_TYPE_EVERGREEN:
case THERMAL_TYPE_NI:
temp = evergreen_get_temp(rdev);
break;
case THERMAL_TYPE_SUMO:
temp = sumo_get_temp(rdev);
break;
default:
temp = 0;
break;
}
return snprintf(buf, PAGE_SIZE, "%d\n", temp);
}
static ssize_t radeon_hwmon_show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "radeon\n");
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL
};
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
};
static int radeon_hwmon_init(struct radeon_device *rdev)
{
int err = 0;
rdev->pm.int_hwmon_dev = NULL;
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
case THERMAL_TYPE_RV770:
case THERMAL_TYPE_EVERGREEN:
case THERMAL_TYPE_NI:
case THERMAL_TYPE_SUMO:
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
if (IS_ERR(rdev->pm.int_hwmon_dev)) {
err = PTR_ERR(rdev->pm.int_hwmon_dev);
dev_err(rdev->dev,
"Unable to register hwmon device: %d\n", err);
break;
}
dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
&hwmon_attrgroup);
if (err) {
dev_err(rdev->dev,
"Unable to create hwmon sysfs file: %d\n", err);
hwmon_device_unregister(rdev->dev);
}
break;
default:
break;
}
return err;
}
static void radeon_hwmon_fini(struct radeon_device *rdev)
{
if (rdev->pm.int_hwmon_dev) {
sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
hwmon_device_unregister(rdev->pm.int_hwmon_dev);
}
}
void radeon_pm_suspend(struct radeon_device *rdev)
{
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
}
mutex_unlock(&rdev->pm.mutex);
cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
}
void radeon_pm_resume(struct radeon_device *rdev)
{
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_vddci)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
}
/* asic init will reset the default power state */
mutex_lock(&rdev->pm.mutex);
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
schedule_delayed_work(&rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
}
int radeon_pm_init(struct radeon_device *rdev)
{
int ret;
/* default to profile method */
rdev->pm.pm_method = PM_METHOD_PROFILE;
rdev->pm.profile = PM_PROFILE_DEFAULT;
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
rdev->pm.dynpm_can_upclock = true;
rdev->pm.dynpm_can_downclock = true;
rdev->pm.default_sclk = rdev->clock.default_sclk;
rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
if (rdev->bios) {
if (rdev->is_atom_bios)
radeon_atombios_get_power_modes(rdev);
else
radeon_combios_get_power_modes(rdev);
radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
/* set up the default clocks if the MC ucode is loaded */
if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
if (rdev->pm.default_vddci)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
SET_VOLTAGE_TYPE_ASIC_VDDCI);
if (rdev->pm.default_sclk)
radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
if (rdev->pm.default_mclk)
radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
}
}
/* set up the internal thermal sensor if applicable */
ret = radeon_hwmon_init(rdev);
if (ret)
return ret;
INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
if (rdev->pm.num_power_states > 1) {
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
if (ret)
DRM_ERROR("failed to create device file for power profile\n");
ret = device_create_file(rdev->dev, &dev_attr_power_method);
if (ret)
DRM_ERROR("failed to create device file for power method\n");
#ifdef CONFIG_ACPI
rdev->acpi_nb.notifier_call = radeon_acpi_event;
register_acpi_notifier(&rdev->acpi_nb);
#endif
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
DRM_INFO("radeon: power management initialized\n");
}
return 0;
}
void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
rdev->pm.profile = PM_PROFILE_DEFAULT;
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
/* reset default clocks */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_set_clocks(rdev);
}
mutex_unlock(&rdev->pm.mutex);
cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
#ifdef CONFIG_ACPI
unregister_acpi_notifier(&rdev->acpi_nb);
#endif
}
if (rdev->pm.power_state)
kfree(rdev->pm.power_state);
radeon_hwmon_fini(rdev);
}
void radeon_pm_compute_clocks(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
if (rdev->pm.num_power_states < 2)
return;
mutex_lock(&rdev->pm.mutex);
rdev->pm.active_crtcs = 0;
rdev->pm.active_crtc_count = 0;
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (radeon_crtc->enabled) {
rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
rdev->pm.active_crtc_count++;
}
}
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
if (rdev->pm.active_crtc_count > 1) {
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
}
} else if (rdev->pm.active_crtc_count == 1) {
/* TODO: Increase clocks if needed for current mode */
if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
schedule_delayed_work(&rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
schedule_delayed_work(&rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
}
} else { /* count == 0 */
if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
}
}
}
mutex_unlock(&rdev->pm.mutex);
}
static bool radeon_pm_in_vbl(struct radeon_device *rdev)
{
int crtc, vpos, hpos, vbl_status;
bool in_vbl = true;
/* Iterate over all active crtc's. All crtc's must be in vblank,
* otherwise return in_vbl == false.
*/
for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
if (rdev->pm.active_crtcs & (1 << crtc)) {
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
!(vbl_status & DRM_SCANOUTPOS_INVBL))
in_vbl = false;
}
}
return in_vbl;
}
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
{
u32 stat_crtc = 0;
bool in_vbl = radeon_pm_in_vbl(rdev);
if (in_vbl == false)
DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
finish ? "exit" : "entry");
return in_vbl;
}
static void radeon_dynpm_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev;
int resched;
rdev = container_of(work, struct radeon_device,
pm.dynpm_idle_work.work);
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
unsigned long irq_flags;
int not_processed = 0;
read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (!list_empty(&rdev->fence_drv.emited)) {
struct list_head *ptr;
list_for_each(ptr, &rdev->fence_drv.emited) {
/* count up to 3, that's enought info */
if (++not_processed >= 3)
break;
}
}
read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (not_processed >= 3) { /* should upclock */
if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
rdev->pm.dynpm_can_upclock) {
rdev->pm.dynpm_planned_action =
DYNPM_ACTION_UPCLOCK;
rdev->pm.dynpm_action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
} else if (not_processed == 0) { /* should downclock */
if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
} else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
rdev->pm.dynpm_can_downclock) {
rdev->pm.dynpm_planned_action =
DYNPM_ACTION_DOWNCLOCK;
rdev->pm.dynpm_action_timeout = jiffies +
msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
}
}
/* Note, radeon_pm_set_clocks is called with static_switch set
* to false since we want to wait for vbl to avoid flicker.
*/
if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
jiffies > rdev->pm.dynpm_action_timeout) {
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
schedule_delayed_work(&rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
if (rdev->pm.current_vddc)
seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
if (rdev->asic->get_pcie_lanes)
seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
return 0;
}
static struct drm_info_list radeon_pm_info_list[] = {
{"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
};
#endif
static int radeon_debugfs_pm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
#else
return 0;
#endif
}
| gpl-2.0 |
cholokei/Qualcomm_MSM_Android_Kernel | arch/arm/kernel/elf.c | 2384 | 2276 | #include <linux/module.h>
#include <linux/sched.h>
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/elf.h>
int elf_check_arch(const struct elf32_hdr *x)
{
unsigned int eflags;
/* Make sure it's an ARM executable */
if (x->e_machine != EM_ARM)
return 0;
/* Make sure the entry address is reasonable */
if (x->e_entry & 1) {
if (!(elf_hwcap & HWCAP_THUMB))
return 0;
} else if (x->e_entry & 3)
return 0;
eflags = x->e_flags;
if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
unsigned int flt_fmt;
/* APCS26 is only allowed if the CPU supports it */
if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT))
return 0;
flt_fmt = eflags & (EF_ARM_VFP_FLOAT | EF_ARM_SOFT_FLOAT);
/* VFP requires the supporting code */
if (flt_fmt == EF_ARM_VFP_FLOAT && !(elf_hwcap & HWCAP_VFP))
return 0;
}
return 1;
}
EXPORT_SYMBOL(elf_check_arch);
void elf_set_personality(const struct elf32_hdr *x)
{
unsigned int eflags = x->e_flags;
unsigned int personality = current->personality & ~PER_MASK;
/*
* We only support Linux ELF executables, so always set the
* personality to LINUX.
*/
personality |= PER_LINUX;
/*
* APCS-26 is only valid for OABI executables
*/
if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN &&
(eflags & EF_ARM_APCS_26))
personality &= ~ADDR_LIMIT_32BIT;
else
personality |= ADDR_LIMIT_32BIT;
set_personality(personality);
/*
* Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0
* and CP1, we only enable access to the iWMMXt coprocessor if the
* binary is EABI or softfloat (and thus, guaranteed not to use
* FPA instructions.)
*/
if (elf_hwcap & HWCAP_IWMMXT &&
eflags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) {
set_thread_flag(TIF_USING_IWMMXT);
} else {
clear_thread_flag(TIF_USING_IWMMXT);
}
}
EXPORT_SYMBOL(elf_set_personality);
/*
* Set READ_IMPLIES_EXEC if:
* - the binary requires an executable stack
* - we're running on a CPU which doesn't support NX.
*/
int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack)
{
if (executable_stack != EXSTACK_DISABLE_X)
return 1;
if (cpu_architecture() < CPU_ARCH_ARMv6)
return 1;
return 0;
}
EXPORT_SYMBOL(arm_elf_read_implies_exec);
| gpl-2.0 |
wan5xp/android_kernel_sony_u8500 | drivers/net/ixgbe/ixgbe_fcoe.c | 2384 | 24942 | /*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#ifdef CONFIG_IXGBE_DCB
#include "ixgbe_dcb_82599.h"
#endif /* CONFIG_IXGBE_DCB */
#include <linux/if_ether.h>
#include <linux/gfp.h>
#include <linux/if_vlan.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/fc/fc_fs.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
/**
* ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
* @rx_desc: advanced rx descriptor
*
* Returns : true if it is FCoE pkt
*/
static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
{
u16 p;
p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info);
if (p & IXGBE_RXDADV_PKTTYPE_ETQF) {
p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK;
p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT;
return p == IXGBE_ETQF_FILTER_FCOE;
}
return false;
}
/**
* ixgbe_fcoe_clear_ddp - clear the given ddp context
* @ddp - ptr to the ixgbe_fcoe_ddp
*
* Returns : none
*
*/
static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
{
ddp->len = 0;
ddp->err = 1;
ddp->udl = NULL;
ddp->udp = 0UL;
ddp->sgl = NULL;
ddp->sgc = 0;
}
/**
* ixgbe_fcoe_ddp_put - free the ddp context for a given xid
* @netdev: the corresponding net_device
* @xid: the xid that corresponding ddp will be freed
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_done
* and it is expected to be called by ULD, i.e., FCP layer of libfc
* to release the corresponding ddp context when the I/O is done.
*
* Returns : data length already ddp-ed in bytes
*/
int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
{
int len = 0;
struct ixgbe_fcoe *fcoe;
struct ixgbe_adapter *adapter;
struct ixgbe_fcoe_ddp *ddp;
u32 fcbuff;
if (!netdev)
goto out_ddp_put;
if (xid >= IXGBE_FCOE_DDP_MAX)
goto out_ddp_put;
adapter = netdev_priv(netdev);
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
goto out_ddp_put;
len = ddp->len;
/* if there an error, force to invalidate ddp context */
if (ddp->err) {
spin_lock_bh(&fcoe->lock);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW,
(xid | IXGBE_FCFLTRW_WE));
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
(xid | IXGBE_FCDMARW_WE));
/* guaranteed to be invalidated after 100us */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
(xid | IXGBE_FCDMARW_RE));
fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
spin_unlock_bh(&fcoe->lock);
if (fcbuff & IXGBE_FCBUFF_VALID)
udelay(100);
}
if (ddp->sgl)
pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
ixgbe_fcoe_clear_ddp(ddp);
out_ddp_put:
return len;
}
/**
* ixgbe_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* Returns : 1 for success and 0 for no ddp
*/
static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc,
int target_mode)
{
struct ixgbe_adapter *adapter;
struct ixgbe_hw *hw;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
unsigned int firstoff = 0;
unsigned int lastsize;
unsigned int thisoff = 0;
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0;
if (!netdev || !sgl)
return 0;
adapter = netdev_priv(netdev);
if (xid >= IXGBE_FCOE_DDP_MAX) {
e_warn(drv, "xid=0x%x out-of-range\n", xid);
return 0;
}
/* no DDP if we are already down or resetting */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
return 0;
fcoe = &adapter->fcoe;
if (!fcoe->pool) {
e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
return 0;
}
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
xid, ddp->sgl, ddp->sgc);
return 0;
}
ixgbe_fcoe_clear_ddp(ddp);
/* setup dma from scsi command sgl */
dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) {
e_err(drv, "xid 0x%x DMA map error\n", xid);
return 0;
}
/* alloc the udl from our ddp pool */
ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
}
ddp->sgl = sgl;
ddp->sgc = sgc;
j = 0;
for_each_sg(sgl, sg, dmacount, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= IXGBE_BUFFCNT_MAX) {
e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
"not enough descriptors\n",
xid, i, j, dmacount, (u64)addr);
goto out_noddp_free;
}
/* get the offset of length of current buffer */
thisoff = addr & ((dma_addr_t)bufflen - 1);
thislen = min((bufflen - thisoff), len);
/*
* all but the 1st buffer (j == 0)
* must be aligned on bufflen
*/
if ((j != 0) && (thisoff))
goto out_noddp_free;
/*
* all but the last buffer
* ((i == (dmacount - 1)) && (thislen == len))
* must end at bufflen
*/
if (((i != (dmacount - 1)) || (thislen != len))
&& ((thislen + thisoff) != bufflen))
goto out_noddp_free;
ddp->udl[j] = (u64)(addr - thisoff);
/* only the first buffer may have none-zero offset */
if (j == 0)
firstoff = thisoff;
len -= thislen;
addr += thislen;
j++;
}
}
/* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
/*
* lastsize can not be buffer len.
* If it is then adding another buffer with lastsize = 1.
*/
if (lastsize == bufflen) {
if (j >= IXGBE_BUFFCNT_MAX) {
e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
"not enough user buffers. We need an extra "
"buffer because lastsize is bufflen.\n",
xid, i, j, dmacount, (u64)addr);
goto out_noddp_free;
}
ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
j++;
lastsize = 1;
}
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
/* Set WRCONTX bit to allow DDP for target */
if (target_mode)
fcbuff |= (IXGBE_FCBUFF_WRCONTX);
fcbuff |= (IXGBE_FCBUFF_VALID);
fcdmarw = xid;
fcdmarw |= IXGBE_FCDMARW_WE;
fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
fcfltrw = xid;
fcfltrw |= IXGBE_FCFLTRW_WE;
/* program DMA context */
hw = &adapter->hw;
spin_lock_bh(&fcoe->lock);
/* turn on last frame indication for target mode as FCP_RSPtarget is
* supposed to send FCP_RSP when it is done. */
if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
}
IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
/* program filter context */
IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
spin_unlock_bh(&fcoe->lock);
return 1;
out_noddp_free:
pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
ixgbe_fcoe_clear_ddp(ddp);
out_noddp_unmap:
pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
return 0;
}
/**
* ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O.
*
* Returns : 1 for success and 0 for no ddp
*/
int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
}
/**
* ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
* @netdev: the corresponding net_device
* @xid: the exchange id requesting ddp
* @sgl: the scatter-gather list for this request
* @sgc: the number of scatter-gather items
*
* This is the implementation of net_device_ops.ndo_fcoe_ddp_target
* and is expected to be called from ULD, e.g., FCP layer of libfc
* to set up ddp for the corresponding xid of the given sglist for
* the corresponding I/O. The DDP in target mode is a write I/O request
* from the initiator.
*
* Returns : 1 for success and 0 for no ddp
*/
int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc)
{
return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
}
/**
* ixgbe_fcoe_ddp - check ddp status and mark it done
* @adapter: ixgbe adapter
* @rx_desc: advanced rx descriptor
* @skb: the skb holding the received data
*
* This checks ddp status.
*
* Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
* not passing the skb to ULD, > 0 indicates is the length of data
* being ddped.
*/
int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
u16 xid;
u32 fctl;
u32 sterr, fceofe, fcerr, fcstat;
int rc = -EINVAL;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
struct fc_frame_header *fh;
struct fcoe_crc_eof *crc;
if (!ixgbe_rx_is_fcoe(rx_desc))
goto ddp_out;
sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
if (fcerr == IXGBE_FCERR_BADCRC)
skb_checksum_none_assert(skb);
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
fh = (struct fc_frame_header *)(skb->data +
sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
else
fh = (struct fc_frame_header *)(skb->data +
sizeof(struct fcoe_hdr));
fctl = ntoh24(fh->fh_f_ctl);
if (fctl & FC_FC_EX_CTX)
xid = be16_to_cpu(fh->fh_ox_id);
else
xid = be16_to_cpu(fh->fh_rx_id);
if (xid >= IXGBE_FCOE_DDP_MAX)
goto ddp_out;
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid];
if (!ddp->udl)
goto ddp_out;
if (fcerr | fceofe)
goto ddp_out;
fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT);
if (fcstat) {
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
/* unmap the sg list when FCP_RSP is received */
if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
pci_unmap_sg(adapter->pdev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = (fcerr | fceofe);
ddp->sgl = NULL;
ddp->sgc = 0;
}
/* return 0 to bypass going to ULD for DDPed data */
if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP)
rc = 0;
else if (ddp->len)
rc = ddp->len;
}
/* In target mode, check the last data frame of the sequence.
* For DDP in target mode, data is already DDPed but the header
* indication of the last data frame ould allow is to tell if we
* got all the data and the ULP can send FCP_RSP back, as this is
* not a full fcoe frame, we fill the trailer here so it won't be
* dropped by the ULP stack.
*/
if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
(fctl & FC_FC_END_SEQ)) {
crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
}
ddp_out:
return rc;
}
/**
* ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
* @adapter: ixgbe adapter
* @tx_ring: tx desc ring
* @skb: associated skb
* @tx_flags: tx flags
* @hdr_len: hdr_len to be returned
*
* This sets up large send offload for FCoE
*
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
*/
int ixgbe_fso(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len)
{
u8 sof, eof;
u32 vlan_macip_lens;
u32 fcoe_sof_eof;
u32 type_tucmd;
u32 mss_l4len_idx;
int mss = 0;
unsigned int i;
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_adv_tx_context_desc *context_desc;
struct fc_frame_header *fh;
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
skb_shinfo(skb)->gso_type);
return -EINVAL;
}
/* resets the header to point fcoe/fc */
skb_set_network_header(skb, skb->mac_len);
skb_set_transport_header(skb, skb->mac_len +
sizeof(struct fcoe_hdr));
/* sets up SOF and ORIS */
fcoe_sof_eof = 0;
sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
switch (sof) {
case FC_SOF_I2:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
break;
case FC_SOF_I3:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
break;
case FC_SOF_N2:
break;
case FC_SOF_N3:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
break;
default:
e_warn(drv, "unknown sof = 0x%x\n", sof);
return -EINVAL;
}
/* the first byte of the last dword is EOF */
skb_copy_bits(skb, skb->len - 4, &eof, 1);
/* sets up EOF and ORIE */
switch (eof) {
case FC_EOF_N:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
break;
case FC_EOF_T:
/* lso needs ORIE */
if (skb_is_gso(skb)) {
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE;
} else {
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
}
break;
case FC_EOF_NI:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
break;
case FC_EOF_A:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
break;
default:
e_warn(drv, "unknown eof = 0x%x\n", eof);
return -EINVAL;
}
/* sets up PARINC indicating data offset */
fh = (struct fc_frame_header *)skb_transport_header(skb);
if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
/* hdr_len includes fc_hdr if FCoE lso is enabled */
*hdr_len = sizeof(struct fcoe_crc_eof);
if (skb_is_gso(skb))
*hdr_len += (skb_transport_offset(skb) +
sizeof(struct fc_frame_header));
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = (skb_transport_offset(skb) +
sizeof(struct fc_frame_header));
vlan_macip_lens |= ((skb_transport_offset(skb) - 4)
<< IXGBE_ADVTXD_MACLEN_SHIFT);
vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
/* type_tycmd and mss: set TUCMD.FCoE to enable offload */
type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT |
IXGBE_ADVTXT_TUCMD_FCOE;
if (skb_is_gso(skb))
mss = skb_shinfo(skb)->gso_size;
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) |
(1 << IXGBE_ADVTXD_IDX_SHIFT);
/* write context desc */
i = tx_ring->next_to_use;
context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
i++;
if (i == tx_ring->count)
i = 0;
tx_ring->next_to_use = i;
return skb_is_gso(skb);
}
/**
* ixgbe_configure_fcoe - configures registers for fcoe at start
* @adapter: ptr to ixgbe adapter
*
* This sets up FCoE related registers
*
* Returns : none
*/
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
{
int i, fcoe_q, fcoe_i;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
#ifdef CONFIG_IXGBE_DCB
u8 tc;
u32 up2tc;
#endif
/* create the pool for ddp if not created yet */
if (!fcoe->pool) {
/* allocate ddp pool */
fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp",
adapter->pdev, IXGBE_FCPTR_MAX,
IXGBE_FCPTR_ALIGN, PAGE_SIZE);
if (!fcoe->pool)
e_err(drv, "failed to allocated FCoE DDP pool\n");
spin_lock_init(&fcoe->lock);
/* Extra buffer to be shared by all DDPs for HW work around */
fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
if (fcoe->extra_ddp_buffer == NULL) {
e_err(drv, "failed to allocated extra DDP buffer\n");
goto out_extra_ddp_buffer_alloc;
}
fcoe->extra_ddp_buffer_dma =
dma_map_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma)) {
e_err(drv, "failed to map extra DDP buffer\n");
goto out_extra_ddp_buffer_dma;
}
}
/* Enable L2 eth type filter for FCoE */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
/* Enable L2 eth type filter for FIP */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
(ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
if (adapter->ring_feature[RING_F_FCOE].indices) {
/* Use multiple rx queues for FCoE by redirection table */
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->mask + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
}
/* send FIP frames to the first FCoE queue */
fcoe_i = f->mask;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
IXGBE_FCRXCTRL_FCOELLI |
IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
#ifdef CONFIG_IXGBE_DCB
up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
for (i = 0; i < MAX_USER_PRIORITY; i++) {
tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
tc &= (MAX_TRAFFIC_CLASS - 1);
if (fcoe->tc == tc) {
fcoe->up = i;
break;
}
}
#endif
return;
out_extra_ddp_buffer_dma:
kfree(fcoe->extra_ddp_buffer);
out_extra_ddp_buffer_alloc:
pci_pool_destroy(fcoe->pool);
fcoe->pool = NULL;
}
/**
* ixgbe_cleanup_fcoe - release all fcoe ddp context resources
* @adapter : ixgbe adapter
*
* Cleans up outstanding ddp context resources
*
* Returns : none
*/
void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
{
int i;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
/* release ddp resource */
if (fcoe->pool) {
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
dma_unmap_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
kfree(fcoe->extra_ddp_buffer);
pci_pool_destroy(fcoe->pool);
fcoe->pool = NULL;
}
}
/**
* ixgbe_fcoe_enable - turn on FCoE offload feature
* @netdev: the corresponding netdev
*
* Turns on FCoE offload feature in 82599.
*
* Returns : 0 indicates success or -EINVAL on failure
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
goto out_enable;
atomic_inc(&fcoe->refcnt);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
goto out_enable;
e_info(drv, "Enabling FCoE offload features.\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
netdev->features |= NETIF_F_FCOE_CRC;
netdev->features |= NETIF_F_FSO;
netdev->features |= NETIF_F_FCOE_MTU;
netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
ixgbe_init_interrupt_scheme(adapter);
netdev_features_change(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
rc = 0;
out_enable:
return rc;
}
/**
* ixgbe_fcoe_disable - turn off FCoE offload feature
* @netdev: the corresponding netdev
*
* Turns off FCoE offload feature in 82599.
*
* Returns : 0 indicates success or -EINVAL on failure
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
goto out_disable;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
goto out_disable;
if (!atomic_dec_and_test(&fcoe->refcnt))
goto out_disable;
e_info(drv, "Disabling FCoE offload features.\n");
netdev->features &= ~NETIF_F_FCOE_CRC;
netdev->features &= ~NETIF_F_FSO;
netdev->features &= ~NETIF_F_FCOE_MTU;
netdev->fcoe_ddp_xid = 0;
netdev_features_change(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0;
ixgbe_cleanup_fcoe(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
rc = 0;
out_disable:
return rc;
}
#ifdef CONFIG_IXGBE_DCB
/**
* ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
* @adapter : ixgbe adapter
* @up : 802.1p user priority bitmap
*
* Finds out the traffic class from the input user priority
* bitmap for FCoE.
*
* Returns : 0 on success otherwise returns 1 on error
*/
u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
{
int i;
u32 up2tc;
/* valid user priority bitmap must not be 0 */
if (up) {
/* from user priority to the corresponding traffic class */
up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
for (i = 0; i < MAX_USER_PRIORITY; i++) {
if (up & (1 << i)) {
up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
up2tc &= (MAX_TRAFFIC_CLASS - 1);
adapter->fcoe.tc = (u8)up2tc;
adapter->fcoe.up = i;
return 0;
}
}
}
return 1;
}
#endif /* CONFIG_IXGBE_DCB */
/**
* ixgbe_fcoe_get_wwn - get world wide name for the node or the port
* @netdev : ixgbe adapter
* @wwn : the world wide name
* @type: the type of world wide name
*
* Returns the node or port world wide name if both the prefix and the san
* mac address are valid, then the wwn is formed based on the NAA-2 for
* IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
*
* Returns : 0 on success
*/
int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
int rc = -EINVAL;
u16 prefix = 0xffff;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_mac_info *mac = &adapter->hw.mac;
switch (type) {
case NETDEV_FCOE_WWNN:
prefix = mac->wwnn_prefix;
break;
case NETDEV_FCOE_WWPN:
prefix = mac->wwpn_prefix;
break;
default:
break;
}
if ((prefix != 0xffff) &&
is_valid_ether_addr(mac->san_addr)) {
*wwn = ((u64) prefix << 48) |
((u64) mac->san_addr[0] << 40) |
((u64) mac->san_addr[1] << 32) |
((u64) mac->san_addr[2] << 24) |
((u64) mac->san_addr[3] << 16) |
((u64) mac->san_addr[4] << 8) |
((u64) mac->san_addr[5]);
rc = 0;
}
return rc;
}
| gpl-2.0 |
deepongi/android_kernel_sony_msm | arch/arm/mach-omap2/cclock2420_data.c | 2640 | 52437 | /*
* OMAP2420 clock data
*
* Copyright (C) 2005-2012 Texas Instruments, Inc.
* Copyright (C) 2004-2011 Nokia Corporation
*
* Contacts:
* Richard Woodruff <r-woodruff2@ti.com>
* Paul Walmsley
* Updated to COMMON clk format by Rajendra Nayak <rnayak@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clk-private.h>
#include <linux/list.h>
#include "soc.h"
#include "iomap.h"
#include "clock.h"
#include "clock2xxx.h"
#include "opp2xxx.h"
#include "cm2xxx.h"
#include "prm2xxx.h"
#include "prm-regbits-24xx.h"
#include "cm-regbits-24xx.h"
#include "sdrc.h"
#include "control.h"
#define OMAP_CM_REGADDR OMAP2420_CM_REGADDR
/*
* 2420 clock tree.
*
* NOTE:In many cases here we are assigning a 'default' parent. In
* many cases the parent is selectable. The set parent calls will
* also switch sources.
*
* Several sources are given initial rates which may be wrong, this will
* be fixed up in the init func.
*
* Things are broadly separated below by clock domains. It is
* noteworthy that most peripherals have dependencies on multiple clock
* domains. Many get their interface clocks from the L4 domain, but get
* functional clocks from fixed sources or other core domain derived
* clocks.
*/
DEFINE_CLK_FIXED_RATE(alt_ck, CLK_IS_ROOT, 54000000, 0x0);
DEFINE_CLK_FIXED_RATE(func_32k_ck, CLK_IS_ROOT, 32768, 0x0);
DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0);
static struct clk osc_ck;
static const struct clk_ops osc_ck_ops = {
.recalc_rate = &omap2_osc_clk_recalc,
};
static struct clk_hw_omap osc_ck_hw = {
.hw = {
.clk = &osc_ck,
},
};
static struct clk osc_ck = {
.name = "osc_ck",
.ops = &osc_ck_ops,
.hw = &osc_ck_hw.hw,
.flags = CLK_IS_ROOT,
};
DEFINE_CLK_FIXED_RATE(secure_32k_ck, CLK_IS_ROOT, 32768, 0x0);
static struct clk sys_ck;
static const char *sys_ck_parent_names[] = {
"osc_ck",
};
static const struct clk_ops sys_ck_ops = {
.init = &omap2_init_clk_clkdm,
.recalc_rate = &omap2xxx_sys_clk_recalc,
};
DEFINE_STRUCT_CLK_HW_OMAP(sys_ck, "wkup_clkdm");
DEFINE_STRUCT_CLK(sys_ck, sys_ck_parent_names, sys_ck_ops);
static struct dpll_data dpll_dd = {
.mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
.mult_mask = OMAP24XX_DPLL_MULT_MASK,
.div1_mask = OMAP24XX_DPLL_DIV_MASK,
.clk_bypass = &sys_ck,
.clk_ref = &sys_ck,
.control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
.enable_mask = OMAP24XX_EN_DPLL_MASK,
.max_multiplier = 1023,
.min_divider = 1,
.max_divider = 16,
};
static struct clk dpll_ck;
static const char *dpll_ck_parent_names[] = {
"sys_ck",
};
static const struct clk_ops dpll_ck_ops = {
.init = &omap2_init_clk_clkdm,
.get_parent = &omap2_init_dpll_parent,
.recalc_rate = &omap2_dpllcore_recalc,
.round_rate = &omap2_dpll_round_rate,
.set_rate = &omap2_reprogram_dpllcore,
};
static struct clk_hw_omap dpll_ck_hw = {
.hw = {
.clk = &dpll_ck,
},
.ops = &clkhwops_omap2xxx_dpll,
.dpll_data = &dpll_dd,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(dpll_ck, dpll_ck_parent_names, dpll_ck_ops);
static struct clk core_ck;
static const char *core_ck_parent_names[] = {
"dpll_ck",
};
static const struct clk_ops core_ck_ops = {
.init = &omap2_init_clk_clkdm,
};
DEFINE_STRUCT_CLK_HW_OMAP(core_ck, "wkup_clkdm");
DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops);
DEFINE_CLK_DIVIDER(core_l3_ck, "core_ck", &core_ck, 0x0,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
OMAP24XX_CLKSEL_L3_SHIFT, OMAP24XX_CLKSEL_L3_WIDTH,
CLK_DIVIDER_ONE_BASED, NULL);
DEFINE_CLK_DIVIDER(l4_ck, "core_l3_ck", &core_l3_ck, 0x0,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
OMAP24XX_CLKSEL_L4_SHIFT, OMAP24XX_CLKSEL_L4_WIDTH,
CLK_DIVIDER_ONE_BASED, NULL);
static struct clk aes_ick;
static const char *aes_ick_parent_names[] = {
"l4_ck",
};
static const struct clk_ops aes_ick_ops = {
.init = &omap2_init_clk_clkdm,
.enable = &omap2_dflt_clk_enable,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
};
static struct clk_hw_omap aes_ick_hw = {
.hw = {
.clk = &aes_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_AES_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(aes_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk apll54_ck;
static const struct clk_ops apll54_ck_ops = {
.init = &omap2_init_clk_clkdm,
.enable = &omap2_clk_apll54_enable,
.disable = &omap2_clk_apll54_disable,
.recalc_rate = &omap2_clk_apll54_recalc,
};
static struct clk_hw_omap apll54_ck_hw = {
.hw = {
.clk = &apll54_ck,
},
.ops = &clkhwops_apll54,
.enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
.enable_bit = OMAP24XX_EN_54M_PLL_SHIFT,
.flags = ENABLE_ON_INIT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(apll54_ck, dpll_ck_parent_names, apll54_ck_ops);
static struct clk apll96_ck;
static const struct clk_ops apll96_ck_ops = {
.init = &omap2_init_clk_clkdm,
.enable = &omap2_clk_apll96_enable,
.disable = &omap2_clk_apll96_disable,
.recalc_rate = &omap2_clk_apll96_recalc,
};
static struct clk_hw_omap apll96_ck_hw = {
.hw = {
.clk = &apll96_ck,
},
.ops = &clkhwops_apll96,
.enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
.enable_bit = OMAP24XX_EN_96M_PLL_SHIFT,
.flags = ENABLE_ON_INIT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(apll96_ck, dpll_ck_parent_names, apll96_ck_ops);
static struct clk func_96m_ck;
static const char *func_96m_ck_parent_names[] = {
"apll96_ck",
};
DEFINE_STRUCT_CLK_HW_OMAP(func_96m_ck, "wkup_clkdm");
DEFINE_STRUCT_CLK(func_96m_ck, func_96m_ck_parent_names, core_ck_ops);
static struct clk cam_fck;
static const char *cam_fck_parent_names[] = {
"func_96m_ck",
};
static struct clk_hw_omap cam_fck_hw = {
.hw = {
.clk = &cam_fck,
},
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_CAM_SHIFT,
.clkdm_name = "core_l3_clkdm",
};
DEFINE_STRUCT_CLK(cam_fck, cam_fck_parent_names, aes_ick_ops);
static struct clk cam_ick;
static struct clk_hw_omap cam_ick_hw = {
.hw = {
.clk = &cam_ick,
},
.ops = &clkhwops_iclk,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_CAM_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(cam_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk des_ick;
static struct clk_hw_omap des_ick_hw = {
.hw = {
.clk = &des_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_DES_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(des_ick, aes_ick_parent_names, aes_ick_ops);
static const struct clksel_rate dsp_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_242X },
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 12, .val = 12, .flags = RATE_IN_242X },
{ .div = 0 }
};
static const struct clksel dsp_fck_clksel[] = {
{ .parent = &core_ck, .rates = dsp_fck_core_rates },
{ .parent = NULL },
};
static const char *dsp_fck_parent_names[] = {
"core_ck",
};
static const struct clk_ops dsp_fck_ops = {
.init = &omap2_init_clk_clkdm,
.enable = &omap2_dflt_clk_enable,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
.recalc_rate = &omap2_clksel_recalc,
.set_rate = &omap2_clksel_set_rate,
.round_rate = &omap2_clksel_round_rate,
};
DEFINE_CLK_OMAP_MUX_GATE(dsp_fck, "dsp_clkdm", dsp_fck_clksel,
OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
OMAP24XX_CLKSEL_DSP_MASK,
OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT, &clkhwops_wait,
dsp_fck_parent_names, dsp_fck_ops);
static const struct clksel dsp_ick_clksel[] = {
{ .parent = &dsp_fck, .rates = dsp_ick_rates },
{ .parent = NULL },
};
static const char *dsp_ick_parent_names[] = {
"dsp_fck",
};
DEFINE_CLK_OMAP_MUX_GATE(dsp_ick, "dsp_clkdm", dsp_ick_clksel,
OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
OMAP24XX_CLKSEL_DSP_IF_MASK,
OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_ICLKEN),
OMAP2420_EN_DSP_IPI_SHIFT, &clkhwops_iclk_wait,
dsp_ick_parent_names, dsp_fck_ops);
static const struct clksel_rate dss1_fck_sys_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate dss1_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 5, .val = 5, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_24XX },
{ .div = 8, .val = 8, .flags = RATE_IN_24XX },
{ .div = 9, .val = 9, .flags = RATE_IN_24XX },
{ .div = 12, .val = 12, .flags = RATE_IN_24XX },
{ .div = 16, .val = 16, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel dss1_fck_clksel[] = {
{ .parent = &sys_ck, .rates = dss1_fck_sys_rates },
{ .parent = &core_ck, .rates = dss1_fck_core_rates },
{ .parent = NULL },
};
static const char *dss1_fck_parent_names[] = {
"sys_ck", "core_ck",
};
static struct clk dss1_fck;
static const struct clk_ops dss1_fck_ops = {
.init = &omap2_init_clk_clkdm,
.enable = &omap2_dflt_clk_enable,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
.recalc_rate = &omap2_clksel_recalc,
.get_parent = &omap2_clksel_find_parent_index,
.set_parent = &omap2_clksel_set_parent,
};
DEFINE_CLK_OMAP_MUX_GATE(dss1_fck, "dss_clkdm", dss1_fck_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
OMAP24XX_CLKSEL_DSS1_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_DSS1_SHIFT, NULL,
dss1_fck_parent_names, dss1_fck_ops);
static const struct clksel_rate dss2_fck_sys_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate dss2_fck_48m_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate func_48m_apll96_rates[] = {
{ .div = 2, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate func_48m_alt_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel func_48m_clksel[] = {
{ .parent = &apll96_ck, .rates = func_48m_apll96_rates },
{ .parent = &alt_ck, .rates = func_48m_alt_rates },
{ .parent = NULL },
};
static const char *func_48m_ck_parent_names[] = {
"apll96_ck", "alt_ck",
};
static struct clk func_48m_ck;
static const struct clk_ops func_48m_ck_ops = {
.init = &omap2_init_clk_clkdm,
.recalc_rate = &omap2_clksel_recalc,
.set_rate = &omap2_clksel_set_rate,
.round_rate = &omap2_clksel_round_rate,
.get_parent = &omap2_clksel_find_parent_index,
.set_parent = &omap2_clksel_set_parent,
};
static struct clk_hw_omap func_48m_ck_hw = {
.hw = {
.clk = &func_48m_ck,
},
.clksel = func_48m_clksel,
.clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_48M_SOURCE_MASK,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(func_48m_ck, func_48m_ck_parent_names, func_48m_ck_ops);
static const struct clksel dss2_fck_clksel[] = {
{ .parent = &sys_ck, .rates = dss2_fck_sys_rates },
{ .parent = &func_48m_ck, .rates = dss2_fck_48m_rates },
{ .parent = NULL },
};
static const char *dss2_fck_parent_names[] = {
"sys_ck", "func_48m_ck",
};
DEFINE_CLK_OMAP_MUX_GATE(dss2_fck, "dss_clkdm", dss2_fck_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
OMAP24XX_CLKSEL_DSS2_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_DSS2_SHIFT, NULL,
dss2_fck_parent_names, dss1_fck_ops);
static const char *func_54m_ck_parent_names[] = {
"apll54_ck", "alt_ck",
};
DEFINE_CLK_MUX(func_54m_ck, func_54m_ck_parent_names, NULL, 0x0,
OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
OMAP24XX_54M_SOURCE_SHIFT, OMAP24XX_54M_SOURCE_WIDTH,
0x0, NULL);
static struct clk dss_54m_fck;
static const char *dss_54m_fck_parent_names[] = {
"func_54m_ck",
};
static struct clk_hw_omap dss_54m_fck_hw = {
.hw = {
.clk = &dss_54m_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_TV_SHIFT,
.clkdm_name = "dss_clkdm",
};
DEFINE_STRUCT_CLK(dss_54m_fck, dss_54m_fck_parent_names, aes_ick_ops);
static struct clk dss_ick;
static struct clk_hw_omap dss_ick_hw = {
.hw = {
.clk = &dss_ick,
},
.ops = &clkhwops_iclk,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_DSS1_SHIFT,
.clkdm_name = "dss_clkdm",
};
DEFINE_STRUCT_CLK(dss_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk eac_fck;
static struct clk_hw_omap eac_fck_hw = {
.hw = {
.clk = &eac_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_EAC_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(eac_fck, cam_fck_parent_names, aes_ick_ops);
static struct clk eac_ick;
static struct clk_hw_omap eac_ick_hw = {
.hw = {
.clk = &eac_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_EAC_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(eac_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk emul_ck;
static struct clk_hw_omap emul_ck_hw = {
.hw = {
.clk = &emul_ck,
},
.enable_reg = OMAP2420_PRCM_CLKEMUL_CTRL,
.enable_bit = OMAP24XX_EMULATION_EN_SHIFT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(emul_ck, dss_54m_fck_parent_names, aes_ick_ops);
DEFINE_CLK_FIXED_FACTOR(func_12m_ck, "func_48m_ck", &func_48m_ck, 0x0, 1, 4);
static struct clk fac_fck;
static const char *fac_fck_parent_names[] = {
"func_12m_ck",
};
static struct clk_hw_omap fac_fck_hw = {
.hw = {
.clk = &fac_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_FAC_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(fac_fck, fac_fck_parent_names, aes_ick_ops);
static struct clk fac_ick;
static struct clk_hw_omap fac_ick_hw = {
.hw = {
.clk = &fac_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_FAC_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(fac_ick, aes_ick_parent_names, aes_ick_ops);
static const struct clksel gfx_fck_clksel[] = {
{ .parent = &core_l3_ck, .rates = gfx_l3_rates },
{ .parent = NULL },
};
static const char *gfx_2d_fck_parent_names[] = {
"core_l3_ck",
};
DEFINE_CLK_OMAP_MUX_GATE(gfx_2d_fck, "gfx_clkdm", gfx_fck_clksel,
OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
OMAP_CLKSEL_GFX_MASK,
OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
OMAP24XX_EN_2D_SHIFT, &clkhwops_wait,
gfx_2d_fck_parent_names, dsp_fck_ops);
DEFINE_CLK_OMAP_MUX_GATE(gfx_3d_fck, "gfx_clkdm", gfx_fck_clksel,
OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
OMAP_CLKSEL_GFX_MASK,
OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
OMAP24XX_EN_3D_SHIFT, &clkhwops_wait,
gfx_2d_fck_parent_names, dsp_fck_ops);
static struct clk gfx_ick;
static const char *gfx_ick_parent_names[] = {
"core_l3_ck",
};
static struct clk_hw_omap gfx_ick_hw = {
.hw = {
.clk = &gfx_ick,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
.enable_bit = OMAP_EN_GFX_SHIFT,
.clkdm_name = "gfx_clkdm",
};
DEFINE_STRUCT_CLK(gfx_ick, gfx_ick_parent_names, aes_ick_ops);
static struct clk gpios_fck;
static const char *gpios_fck_parent_names[] = {
"func_32k_ck",
};
static struct clk_hw_omap gpios_fck_hw = {
.hw = {
.clk = &gpios_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
.enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(gpios_fck, gpios_fck_parent_names, aes_ick_ops);
static struct clk gpios_ick;
static const char *gpios_ick_parent_names[] = {
"sys_ck",
};
static struct clk_hw_omap gpios_ick_hw = {
.hw = {
.clk = &gpios_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(gpios_ick, gpios_ick_parent_names, aes_ick_ops);
static struct clk gpmc_fck;
static struct clk_hw_omap gpmc_fck_hw = {
.hw = {
.clk = &gpmc_fck,
},
.ops = &clkhwops_iclk,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
.enable_bit = OMAP24XX_AUTO_GPMC_SHIFT,
.flags = ENABLE_ON_INIT,
.clkdm_name = "core_l3_clkdm",
};
DEFINE_STRUCT_CLK(gpmc_fck, gfx_ick_parent_names, core_ck_ops);
static const struct clksel_rate gpt_alt_rates[] = {
{ .div = 1, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel omap24xx_gpt_clksel[] = {
{ .parent = &func_32k_ck, .rates = gpt_32k_rates },
{ .parent = &sys_ck, .rates = gpt_sys_rates },
{ .parent = &alt_ck, .rates = gpt_alt_rates },
{ .parent = NULL },
};
static const char *gpt10_fck_parent_names[] = {
"func_32k_ck", "sys_ck", "alt_ck",
};
DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT10_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT10_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt10_ick;
static struct clk_hw_omap gpt10_ick_hw = {
.hw = {
.clk = &gpt10_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT10_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt10_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT11_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT11_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt11_ick;
static struct clk_hw_omap gpt11_ick_hw = {
.hw = {
.clk = &gpt11_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT11_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt11_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt12_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT12_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT12_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt12_ick;
static struct clk_hw_omap gpt12_ick_hw = {
.hw = {
.clk = &gpt12_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT12_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt12_ick, aes_ick_parent_names, aes_ick_ops);
static const struct clk_ops gpt1_fck_ops = {
.init = &omap2_init_clk_clkdm,
.enable = &omap2_dflt_clk_enable,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
.recalc_rate = &omap2_clksel_recalc,
.set_rate = &omap2_clksel_set_rate,
.round_rate = &omap2_clksel_round_rate,
.get_parent = &omap2_clksel_find_parent_index,
.set_parent = &omap2_clksel_set_parent,
};
DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL1),
OMAP24XX_CLKSEL_GPT1_MASK,
OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
OMAP24XX_EN_GPT1_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, gpt1_fck_ops);
static struct clk gpt1_ick;
static struct clk_hw_omap gpt1_ick_hw = {
.hw = {
.clk = &gpt1_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_GPT1_SHIFT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(gpt1_ick, gpios_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT2_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT2_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt2_ick;
static struct clk_hw_omap gpt2_ick_hw = {
.hw = {
.clk = &gpt2_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT2_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt2_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT3_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT3_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt3_ick;
static struct clk_hw_omap gpt3_ick_hw = {
.hw = {
.clk = &gpt3_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT3_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt3_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT4_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT4_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt4_ick;
static struct clk_hw_omap gpt4_ick_hw = {
.hw = {
.clk = &gpt4_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT4_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt4_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT5_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT5_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt5_ick;
static struct clk_hw_omap gpt5_ick_hw = {
.hw = {
.clk = &gpt5_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT5_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt5_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT6_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT6_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt6_ick;
static struct clk_hw_omap gpt6_ick_hw = {
.hw = {
.clk = &gpt6_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT6_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt6_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT7_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT7_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt7_ick;
static struct clk_hw_omap gpt7_ick_hw = {
.hw = {
.clk = &gpt7_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT7_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt7_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT8_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT8_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt8_ick;
static struct clk_hw_omap gpt8_ick_hw = {
.hw = {
.clk = &gpt8_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT8_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt8_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "core_l4_clkdm", omap24xx_gpt_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
OMAP24XX_CLKSEL_GPT9_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_GPT9_SHIFT, &clkhwops_wait,
gpt10_fck_parent_names, dss1_fck_ops);
static struct clk gpt9_ick;
static struct clk_hw_omap gpt9_ick_hw = {
.hw = {
.clk = &gpt9_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT9_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(gpt9_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk hdq_fck;
static struct clk_hw_omap hdq_fck_hw = {
.hw = {
.clk = &hdq_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_HDQ_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(hdq_fck, fac_fck_parent_names, aes_ick_ops);
static struct clk hdq_ick;
static struct clk_hw_omap hdq_ick_hw = {
.hw = {
.clk = &hdq_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_HDQ_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(hdq_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk i2c1_fck;
static struct clk_hw_omap i2c1_fck_hw = {
.hw = {
.clk = &i2c1_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_I2C1_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(i2c1_fck, fac_fck_parent_names, aes_ick_ops);
static struct clk i2c1_ick;
static struct clk_hw_omap i2c1_ick_hw = {
.hw = {
.clk = &i2c1_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_I2C1_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(i2c1_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk i2c2_fck;
static struct clk_hw_omap i2c2_fck_hw = {
.hw = {
.clk = &i2c2_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_I2C2_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(i2c2_fck, fac_fck_parent_names, aes_ick_ops);
static struct clk i2c2_ick;
static struct clk_hw_omap i2c2_ick_hw = {
.hw = {
.clk = &i2c2_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_I2C2_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(i2c2_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(iva1_ifck, "iva1_clkdm", dsp_fck_clksel,
OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
OMAP2420_CLKSEL_IVA_MASK,
OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
OMAP2420_EN_IVA_COP_SHIFT, &clkhwops_wait,
dsp_fck_parent_names, dsp_fck_ops);
static struct clk iva1_mpu_int_ifck;
static const char *iva1_mpu_int_ifck_parent_names[] = {
"iva1_ifck",
};
static const struct clk_ops iva1_mpu_int_ifck_ops = {
.init = &omap2_init_clk_clkdm,
.enable = &omap2_dflt_clk_enable,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
.recalc_rate = &omap_fixed_divisor_recalc,
};
static struct clk_hw_omap iva1_mpu_int_ifck_hw = {
.hw = {
.clk = &iva1_mpu_int_ifck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
.enable_bit = OMAP2420_EN_IVA_MPU_SHIFT,
.clkdm_name = "iva1_clkdm",
.fixed_div = 2,
};
DEFINE_STRUCT_CLK(iva1_mpu_int_ifck, iva1_mpu_int_ifck_parent_names,
iva1_mpu_int_ifck_ops);
static struct clk mailboxes_ick;
static struct clk_hw_omap mailboxes_ick_hw = {
.hw = {
.clk = &mailboxes_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MAILBOXES_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mailboxes_ick, aes_ick_parent_names, aes_ick_ops);
static const struct clksel_rate common_mcbsp_96m_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel mcbsp_fck_clksel[] = {
{ .parent = &func_96m_ck, .rates = common_mcbsp_96m_rates },
{ .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
{ .parent = NULL },
};
static const char *mcbsp1_fck_parent_names[] = {
"func_96m_ck", "mcbsp_clks",
};
DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_fck_clksel,
OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
OMAP2_MCBSP1_CLKS_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_MCBSP1_SHIFT, &clkhwops_wait,
mcbsp1_fck_parent_names, dss1_fck_ops);
static struct clk mcbsp1_ick;
static struct clk_hw_omap mcbsp1_ick_hw = {
.hw = {
.clk = &mcbsp1_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mcbsp1_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "core_l4_clkdm", mcbsp_fck_clksel,
OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
OMAP2_MCBSP2_CLKS_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP24XX_EN_MCBSP2_SHIFT, &clkhwops_wait,
mcbsp1_fck_parent_names, dss1_fck_ops);
static struct clk mcbsp2_ick;
static struct clk_hw_omap mcbsp2_ick_hw = {
.hw = {
.clk = &mcbsp2_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mcbsp2_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk mcspi1_fck;
static const char *mcspi1_fck_parent_names[] = {
"func_48m_ck",
};
static struct clk_hw_omap mcspi1_fck_hw = {
.hw = {
.clk = &mcspi1_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mcspi1_fck, mcspi1_fck_parent_names, aes_ick_ops);
static struct clk mcspi1_ick;
static struct clk_hw_omap mcspi1_ick_hw = {
.hw = {
.clk = &mcspi1_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mcspi1_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk mcspi2_fck;
static struct clk_hw_omap mcspi2_fck_hw = {
.hw = {
.clk = &mcspi2_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mcspi2_fck, mcspi1_fck_parent_names, aes_ick_ops);
static struct clk mcspi2_ick;
static struct clk_hw_omap mcspi2_ick_hw = {
.hw = {
.clk = &mcspi2_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mcspi2_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk mmc_fck;
static struct clk_hw_omap mmc_fck_hw = {
.hw = {
.clk = &mmc_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_MMC_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mmc_fck, cam_fck_parent_names, aes_ick_ops);
static struct clk mmc_ick;
static struct clk_hw_omap mmc_ick_hw = {
.hw = {
.clk = &mmc_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_MMC_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mmc_ick, aes_ick_parent_names, aes_ick_ops);
DEFINE_CLK_DIVIDER(mpu_ck, "core_ck", &core_ck, 0x0,
OMAP_CM_REGADDR(MPU_MOD, CM_CLKSEL),
OMAP24XX_CLKSEL_MPU_SHIFT, OMAP24XX_CLKSEL_MPU_WIDTH,
CLK_DIVIDER_ONE_BASED, NULL);
static struct clk mpu_wdt_fck;
static struct clk_hw_omap mpu_wdt_fck_hw = {
.hw = {
.clk = &mpu_wdt_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
.enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(mpu_wdt_fck, gpios_fck_parent_names, aes_ick_ops);
static struct clk mpu_wdt_ick;
static struct clk_hw_omap mpu_wdt_ick_hw = {
.hw = {
.clk = &mpu_wdt_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(mpu_wdt_ick, gpios_ick_parent_names, aes_ick_ops);
static struct clk mspro_fck;
static struct clk_hw_omap mspro_fck_hw = {
.hw = {
.clk = &mspro_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mspro_fck, cam_fck_parent_names, aes_ick_ops);
static struct clk mspro_ick;
static struct clk_hw_omap mspro_ick_hw = {
.hw = {
.clk = &mspro_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(mspro_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk omapctrl_ick;
static struct clk_hw_omap omapctrl_ick_hw = {
.hw = {
.clk = &omapctrl_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_OMAPCTRL_SHIFT,
.flags = ENABLE_ON_INIT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(omapctrl_ick, gpios_ick_parent_names, aes_ick_ops);
static struct clk pka_ick;
static struct clk_hw_omap pka_ick_hw = {
.hw = {
.clk = &pka_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_PKA_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(pka_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk rng_ick;
static struct clk_hw_omap rng_ick_hw = {
.hw = {
.clk = &rng_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_RNG_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(rng_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk sdma_fck;
DEFINE_STRUCT_CLK_HW_OMAP(sdma_fck, "core_l3_clkdm");
DEFINE_STRUCT_CLK(sdma_fck, gfx_ick_parent_names, core_ck_ops);
static struct clk sdma_ick;
static struct clk_hw_omap sdma_ick_hw = {
.hw = {
.clk = &sdma_ick,
},
.ops = &clkhwops_iclk,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
.enable_bit = OMAP24XX_AUTO_SDMA_SHIFT,
.clkdm_name = "core_l3_clkdm",
};
DEFINE_STRUCT_CLK(sdma_ick, gfx_ick_parent_names, core_ck_ops);
static struct clk sdrc_ick;
static struct clk_hw_omap sdrc_ick_hw = {
.hw = {
.clk = &sdrc_ick,
},
.ops = &clkhwops_iclk,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
.enable_bit = OMAP24XX_AUTO_SDRC_SHIFT,
.flags = ENABLE_ON_INIT,
.clkdm_name = "core_l3_clkdm",
};
DEFINE_STRUCT_CLK(sdrc_ick, gfx_ick_parent_names, core_ck_ops);
static struct clk sha_ick;
static struct clk_hw_omap sha_ick_hw = {
.hw = {
.clk = &sha_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_SHA_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(sha_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk ssi_l4_ick;
static struct clk_hw_omap ssi_l4_ick_hw = {
.hw = {
.clk = &ssi_l4_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
.enable_bit = OMAP24XX_EN_SSI_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(ssi_l4_ick, aes_ick_parent_names, aes_ick_ops);
static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_242X },
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 0 }
};
static const struct clksel ssi_ssr_sst_fck_clksel[] = {
{ .parent = &core_ck, .rates = ssi_ssr_sst_fck_core_rates },
{ .parent = NULL },
};
static const char *ssi_ssr_sst_fck_parent_names[] = {
"core_ck",
};
DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_sst_fck, "core_l3_clkdm",
ssi_ssr_sst_fck_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
OMAP24XX_CLKSEL_SSI_MASK,
OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
OMAP24XX_EN_SSI_SHIFT, &clkhwops_wait,
ssi_ssr_sst_fck_parent_names, dsp_fck_ops);
static struct clk sync_32k_ick;
static struct clk_hw_omap sync_32k_ick_hw = {
.hw = {
.clk = &sync_32k_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_32KSYNC_SHIFT,
.flags = ENABLE_ON_INIT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(sync_32k_ick, gpios_ick_parent_names, aes_ick_ops);
static const struct clksel_rate common_clkout_src_core_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_sys_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_96m_rates[] = {
{ .div = 1, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_54m_rates[] = {
{ .div = 1, .val = 3, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel common_clkout_src_clksel[] = {
{ .parent = &core_ck, .rates = common_clkout_src_core_rates },
{ .parent = &sys_ck, .rates = common_clkout_src_sys_rates },
{ .parent = &func_96m_ck, .rates = common_clkout_src_96m_rates },
{ .parent = &func_54m_ck, .rates = common_clkout_src_54m_rates },
{ .parent = NULL },
};
static const char *sys_clkout_src_parent_names[] = {
"core_ck", "sys_ck", "func_96m_ck", "func_54m_ck",
};
DEFINE_CLK_OMAP_MUX_GATE(sys_clkout_src, "wkup_clkdm", common_clkout_src_clksel,
OMAP2420_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_SOURCE_MASK,
OMAP2420_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_EN_SHIFT,
NULL, sys_clkout_src_parent_names, gpt1_fck_ops);
DEFINE_CLK_DIVIDER(sys_clkout, "sys_clkout_src", &sys_clkout_src, 0x0,
OMAP2420_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_DIV_SHIFT,
OMAP24XX_CLKOUT_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
DEFINE_CLK_OMAP_MUX_GATE(sys_clkout2_src, "wkup_clkdm",
common_clkout_src_clksel, OMAP2420_PRCM_CLKOUT_CTRL,
OMAP2420_CLKOUT2_SOURCE_MASK,
OMAP2420_PRCM_CLKOUT_CTRL, OMAP2420_CLKOUT2_EN_SHIFT,
NULL, sys_clkout_src_parent_names, gpt1_fck_ops);
DEFINE_CLK_DIVIDER(sys_clkout2, "sys_clkout2_src", &sys_clkout2_src, 0x0,
OMAP2420_PRCM_CLKOUT_CTRL, OMAP2420_CLKOUT2_DIV_SHIFT,
OMAP2420_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
static struct clk uart1_fck;
static struct clk_hw_omap uart1_fck_hw = {
.hw = {
.clk = &uart1_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_UART1_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(uart1_fck, mcspi1_fck_parent_names, aes_ick_ops);
static struct clk uart1_ick;
static struct clk_hw_omap uart1_ick_hw = {
.hw = {
.clk = &uart1_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_UART1_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(uart1_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk uart2_fck;
static struct clk_hw_omap uart2_fck_hw = {
.hw = {
.clk = &uart2_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_UART2_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(uart2_fck, mcspi1_fck_parent_names, aes_ick_ops);
static struct clk uart2_ick;
static struct clk_hw_omap uart2_ick_hw = {
.hw = {
.clk = &uart2_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_UART2_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(uart2_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk uart3_fck;
static struct clk_hw_omap uart3_fck_hw = {
.hw = {
.clk = &uart3_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
.enable_bit = OMAP24XX_EN_UART3_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(uart3_fck, mcspi1_fck_parent_names, aes_ick_ops);
static struct clk uart3_ick;
static struct clk_hw_omap uart3_ick_hw = {
.hw = {
.clk = &uart3_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
.enable_bit = OMAP24XX_EN_UART3_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(uart3_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk usb_fck;
static struct clk_hw_omap usb_fck_hw = {
.hw = {
.clk = &usb_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
.enable_bit = OMAP24XX_EN_USB_SHIFT,
.clkdm_name = "core_l3_clkdm",
};
DEFINE_STRUCT_CLK(usb_fck, mcspi1_fck_parent_names, aes_ick_ops);
static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel usb_l4_ick_clksel[] = {
{ .parent = &core_l3_ck, .rates = usb_l4_ick_core_l3_rates },
{ .parent = NULL },
};
static const char *usb_l4_ick_parent_names[] = {
"core_l3_ck",
};
DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_ick_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
OMAP24XX_CLKSEL_USB_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
OMAP24XX_EN_USB_SHIFT, &clkhwops_iclk_wait,
usb_l4_ick_parent_names, dsp_fck_ops);
static struct clk virt_prcm_set;
static const char *virt_prcm_set_parent_names[] = {
"mpu_ck",
};
static const struct clk_ops virt_prcm_set_ops = {
.recalc_rate = &omap2_table_mpu_recalc,
.set_rate = &omap2_select_table_rate,
.round_rate = &omap2_round_to_table_rate,
};
DEFINE_STRUCT_CLK_HW_OMAP(virt_prcm_set, NULL);
DEFINE_STRUCT_CLK(virt_prcm_set, virt_prcm_set_parent_names, virt_prcm_set_ops);
static const struct clksel_rate vlynq_fck_96m_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_242X },
{ .div = 0 }
};
static const struct clksel_rate vlynq_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_242X },
{ .div = 2, .val = 2, .flags = RATE_IN_242X },
{ .div = 3, .val = 3, .flags = RATE_IN_242X },
{ .div = 4, .val = 4, .flags = RATE_IN_242X },
{ .div = 6, .val = 6, .flags = RATE_IN_242X },
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 9, .val = 9, .flags = RATE_IN_242X },
{ .div = 12, .val = 12, .flags = RATE_IN_242X },
{ .div = 16, .val = 16, .flags = RATE_IN_242X },
{ .div = 18, .val = 18, .flags = RATE_IN_242X },
{ .div = 0 }
};
static const struct clksel vlynq_fck_clksel[] = {
{ .parent = &func_96m_ck, .rates = vlynq_fck_96m_rates },
{ .parent = &core_ck, .rates = vlynq_fck_core_rates },
{ .parent = NULL },
};
static const char *vlynq_fck_parent_names[] = {
"func_96m_ck", "core_ck",
};
DEFINE_CLK_OMAP_MUX_GATE(vlynq_fck, "core_l3_clkdm", vlynq_fck_clksel,
OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
OMAP2420_CLKSEL_VLYNQ_MASK,
OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
OMAP2420_EN_VLYNQ_SHIFT, &clkhwops_wait,
vlynq_fck_parent_names, dss1_fck_ops);
static struct clk vlynq_ick;
static struct clk_hw_omap vlynq_ick_hw = {
.hw = {
.clk = &vlynq_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_VLYNQ_SHIFT,
.clkdm_name = "core_l3_clkdm",
};
DEFINE_STRUCT_CLK(vlynq_ick, gfx_ick_parent_names, aes_ick_ops);
static struct clk wdt1_ick;
static struct clk_hw_omap wdt1_ick_hw = {
.hw = {
.clk = &wdt1_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_WDT1_SHIFT,
.clkdm_name = "wkup_clkdm",
};
DEFINE_STRUCT_CLK(wdt1_ick, gpios_ick_parent_names, aes_ick_ops);
static struct clk wdt3_fck;
static struct clk_hw_omap wdt3_fck_hw = {
.hw = {
.clk = &wdt3_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_WDT3_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(wdt3_fck, gpios_fck_parent_names, aes_ick_ops);
static struct clk wdt3_ick;
static struct clk_hw_omap wdt3_ick_hw = {
.hw = {
.clk = &wdt3_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_WDT3_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(wdt3_ick, aes_ick_parent_names, aes_ick_ops);
static struct clk wdt4_fck;
static struct clk_hw_omap wdt4_fck_hw = {
.hw = {
.clk = &wdt4_fck,
},
.ops = &clkhwops_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_WDT4_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(wdt4_fck, gpios_fck_parent_names, aes_ick_ops);
static struct clk wdt4_ick;
static struct clk_hw_omap wdt4_ick_hw = {
.hw = {
.clk = &wdt4_ick,
},
.ops = &clkhwops_iclk_wait,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_WDT4_SHIFT,
.clkdm_name = "core_l4_clkdm",
};
DEFINE_STRUCT_CLK(wdt4_ick, aes_ick_parent_names, aes_ick_ops);
/*
* clkdev integration
*/
static struct omap_clk omap2420_clks[] = {
/* external root sources */
CLK(NULL, "func_32k_ck", &func_32k_ck),
CLK(NULL, "secure_32k_ck", &secure_32k_ck),
CLK(NULL, "osc_ck", &osc_ck),
CLK(NULL, "sys_ck", &sys_ck),
CLK(NULL, "alt_ck", &alt_ck),
CLK(NULL, "mcbsp_clks", &mcbsp_clks),
/* internal analog sources */
CLK(NULL, "dpll_ck", &dpll_ck),
CLK(NULL, "apll96_ck", &apll96_ck),
CLK(NULL, "apll54_ck", &apll54_ck),
/* internal prcm root sources */
CLK(NULL, "func_54m_ck", &func_54m_ck),
CLK(NULL, "core_ck", &core_ck),
CLK(NULL, "func_96m_ck", &func_96m_ck),
CLK(NULL, "func_48m_ck", &func_48m_ck),
CLK(NULL, "func_12m_ck", &func_12m_ck),
CLK(NULL, "sys_clkout_src", &sys_clkout_src),
CLK(NULL, "sys_clkout", &sys_clkout),
CLK(NULL, "sys_clkout2_src", &sys_clkout2_src),
CLK(NULL, "sys_clkout2", &sys_clkout2),
CLK(NULL, "emul_ck", &emul_ck),
/* mpu domain clocks */
CLK(NULL, "mpu_ck", &mpu_ck),
/* dsp domain clocks */
CLK(NULL, "dsp_fck", &dsp_fck),
CLK(NULL, "dsp_ick", &dsp_ick),
CLK(NULL, "iva1_ifck", &iva1_ifck),
CLK(NULL, "iva1_mpu_int_ifck", &iva1_mpu_int_ifck),
/* GFX domain clocks */
CLK(NULL, "gfx_3d_fck", &gfx_3d_fck),
CLK(NULL, "gfx_2d_fck", &gfx_2d_fck),
CLK(NULL, "gfx_ick", &gfx_ick),
/* DSS domain clocks */
CLK("omapdss_dss", "ick", &dss_ick),
CLK(NULL, "dss_ick", &dss_ick),
CLK(NULL, "dss1_fck", &dss1_fck),
CLK(NULL, "dss2_fck", &dss2_fck),
CLK(NULL, "dss_54m_fck", &dss_54m_fck),
/* L3 domain clocks */
CLK(NULL, "core_l3_ck", &core_l3_ck),
CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck),
CLK(NULL, "usb_l4_ick", &usb_l4_ick),
/* L4 domain clocks */
CLK(NULL, "l4_ck", &l4_ck),
CLK(NULL, "ssi_l4_ick", &ssi_l4_ick),
/* virtual meta-group clock */
CLK(NULL, "virt_prcm_set", &virt_prcm_set),
/* general l4 interface ck, multi-parent functional clk */
CLK(NULL, "gpt1_ick", &gpt1_ick),
CLK(NULL, "gpt1_fck", &gpt1_fck),
CLK(NULL, "gpt2_ick", &gpt2_ick),
CLK(NULL, "gpt2_fck", &gpt2_fck),
CLK(NULL, "gpt3_ick", &gpt3_ick),
CLK(NULL, "gpt3_fck", &gpt3_fck),
CLK(NULL, "gpt4_ick", &gpt4_ick),
CLK(NULL, "gpt4_fck", &gpt4_fck),
CLK(NULL, "gpt5_ick", &gpt5_ick),
CLK(NULL, "gpt5_fck", &gpt5_fck),
CLK(NULL, "gpt6_ick", &gpt6_ick),
CLK(NULL, "gpt6_fck", &gpt6_fck),
CLK(NULL, "gpt7_ick", &gpt7_ick),
CLK(NULL, "gpt7_fck", &gpt7_fck),
CLK(NULL, "gpt8_ick", &gpt8_ick),
CLK(NULL, "gpt8_fck", &gpt8_fck),
CLK(NULL, "gpt9_ick", &gpt9_ick),
CLK(NULL, "gpt9_fck", &gpt9_fck),
CLK(NULL, "gpt10_ick", &gpt10_ick),
CLK(NULL, "gpt10_fck", &gpt10_fck),
CLK(NULL, "gpt11_ick", &gpt11_ick),
CLK(NULL, "gpt11_fck", &gpt11_fck),
CLK(NULL, "gpt12_ick", &gpt12_ick),
CLK(NULL, "gpt12_fck", &gpt12_fck),
CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
CLK("omap-mcbsp.2", "ick", &mcbsp2_ick),
CLK(NULL, "mcbsp2_ick", &mcbsp2_ick),
CLK(NULL, "mcbsp2_fck", &mcbsp2_fck),
CLK("omap2_mcspi.1", "ick", &mcspi1_ick),
CLK(NULL, "mcspi1_ick", &mcspi1_ick),
CLK(NULL, "mcspi1_fck", &mcspi1_fck),
CLK("omap2_mcspi.2", "ick", &mcspi2_ick),
CLK(NULL, "mcspi2_ick", &mcspi2_ick),
CLK(NULL, "mcspi2_fck", &mcspi2_fck),
CLK(NULL, "uart1_ick", &uart1_ick),
CLK(NULL, "uart1_fck", &uart1_fck),
CLK(NULL, "uart2_ick", &uart2_ick),
CLK(NULL, "uart2_fck", &uart2_fck),
CLK(NULL, "uart3_ick", &uart3_ick),
CLK(NULL, "uart3_fck", &uart3_fck),
CLK(NULL, "gpios_ick", &gpios_ick),
CLK(NULL, "gpios_fck", &gpios_fck),
CLK("omap_wdt", "ick", &mpu_wdt_ick),
CLK(NULL, "mpu_wdt_ick", &mpu_wdt_ick),
CLK(NULL, "mpu_wdt_fck", &mpu_wdt_fck),
CLK(NULL, "sync_32k_ick", &sync_32k_ick),
CLK(NULL, "wdt1_ick", &wdt1_ick),
CLK(NULL, "omapctrl_ick", &omapctrl_ick),
CLK("omap24xxcam", "fck", &cam_fck),
CLK(NULL, "cam_fck", &cam_fck),
CLK("omap24xxcam", "ick", &cam_ick),
CLK(NULL, "cam_ick", &cam_ick),
CLK(NULL, "mailboxes_ick", &mailboxes_ick),
CLK(NULL, "wdt4_ick", &wdt4_ick),
CLK(NULL, "wdt4_fck", &wdt4_fck),
CLK(NULL, "wdt3_ick", &wdt3_ick),
CLK(NULL, "wdt3_fck", &wdt3_fck),
CLK(NULL, "mspro_ick", &mspro_ick),
CLK(NULL, "mspro_fck", &mspro_fck),
CLK("mmci-omap.0", "ick", &mmc_ick),
CLK(NULL, "mmc_ick", &mmc_ick),
CLK("mmci-omap.0", "fck", &mmc_fck),
CLK(NULL, "mmc_fck", &mmc_fck),
CLK(NULL, "fac_ick", &fac_ick),
CLK(NULL, "fac_fck", &fac_fck),
CLK(NULL, "eac_ick", &eac_ick),
CLK(NULL, "eac_fck", &eac_fck),
CLK("omap_hdq.0", "ick", &hdq_ick),
CLK(NULL, "hdq_ick", &hdq_ick),
CLK("omap_hdq.0", "fck", &hdq_fck),
CLK(NULL, "hdq_fck", &hdq_fck),
CLK("omap_i2c.1", "ick", &i2c1_ick),
CLK(NULL, "i2c1_ick", &i2c1_ick),
CLK(NULL, "i2c1_fck", &i2c1_fck),
CLK("omap_i2c.2", "ick", &i2c2_ick),
CLK(NULL, "i2c2_ick", &i2c2_ick),
CLK(NULL, "i2c2_fck", &i2c2_fck),
CLK(NULL, "gpmc_fck", &gpmc_fck),
CLK(NULL, "sdma_fck", &sdma_fck),
CLK(NULL, "sdma_ick", &sdma_ick),
CLK(NULL, "sdrc_ick", &sdrc_ick),
CLK(NULL, "vlynq_ick", &vlynq_ick),
CLK(NULL, "vlynq_fck", &vlynq_fck),
CLK(NULL, "des_ick", &des_ick),
CLK("omap-sham", "ick", &sha_ick),
CLK(NULL, "sha_ick", &sha_ick),
CLK("omap_rng", "ick", &rng_ick),
CLK(NULL, "rng_ick", &rng_ick),
CLK("omap-aes", "ick", &aes_ick),
CLK(NULL, "aes_ick", &aes_ick),
CLK(NULL, "pka_ick", &pka_ick),
CLK(NULL, "usb_fck", &usb_fck),
CLK("musb-hdrc", "fck", &osc_ck),
CLK(NULL, "timer_32k_ck", &func_32k_ck),
CLK(NULL, "timer_sys_ck", &sys_ck),
CLK(NULL, "timer_ext_ck", &alt_ck),
CLK(NULL, "cpufreq_ck", &virt_prcm_set),
};
static const char *enable_init_clks[] = {
"apll96_ck",
"apll54_ck",
"sync_32k_ick",
"omapctrl_ick",
"gpmc_fck",
"sdrc_ick",
};
/*
* init code
*/
int __init omap2420_clk_init(void)
{
prcm_clksrc_ctrl = OMAP2420_PRCM_CLKSRC_CTRL;
cpu_mask = RATE_IN_242X;
rate_table = omap2420_rate_table;
omap2xxx_clkt_dpllcore_init(&dpll_ck_hw.hw);
omap2xxx_clkt_vps_check_bootloader_rates();
omap_clocks_register(omap2420_clks, ARRAY_SIZE(omap2420_clks));
omap2xxx_clkt_vps_late_init();
omap2_clk_disable_autoidle_all();
omap2_clk_enable_init_clocks(enable_init_clks,
ARRAY_SIZE(enable_init_clks));
pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n",
(clk_get_rate(&sys_ck) / 1000000),
(clk_get_rate(&sys_ck) / 100000) % 10,
(clk_get_rate(&dpll_ck) / 1000000),
(clk_get_rate(&mpu_ck) / 1000000));
return 0;
}
| gpl-2.0 |
eklovya/android_kernel_lge_p725 | drivers/net/e2100.c | 2640 | 14537 | /* e2100.c: A Cabletron E2100 series ethernet driver for linux. */
/*
Written 1993-1994 by Donald Becker.
Copyright 1994 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency. This software may be used and
distributed according to the terms of the GNU General Public License,
incorporated herein by reference.
This is a driver for the Cabletron E2100 series ethercards.
The Author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
The E2100 series ethercard is a fairly generic shared memory 8390
implementation. The only unusual aspect is the way the shared memory
registers are set: first you do an inb() in what is normally the
station address region, and the low three bits of next outb() *address*
is used as the write value for that register. Either someone wasn't
too used to dem bit en bites, or they were trying to obfuscate the
programming interface.
There is an additional complication when setting the window on the packet
buffer. You must first do a read into the packet buffer region with the
low 8 address bits the address setting the page for the start of the packet
buffer window, and then do the above operation. See mem_on() for details.
One bug on the chip is that even a hard reset won't disable the memory
window, usually resulting in a hung machine if mem_off() isn't called.
If this happens, you must power down the machine for about 30 seconds.
*/
static const char version[] =
"e2100.c:v1.01 7/21/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/system.h>
#include "8390.h"
#define DRV_NAME "e2100"
static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
/* Offsets from the base_addr.
Read from the ASIC register, and the low three bits of the next outb()
address is used to set the corresponding register. */
#define E21_NIC_OFFSET 0 /* Offset to the 8390 NIC. */
#define E21_ASIC 0x10
#define E21_MEM_ENABLE 0x10
#define E21_MEM_ON 0x05 /* Enable memory in 16 bit mode. */
#define E21_MEM_ON_8 0x07 /* Enable memory in 8 bit mode. */
#define E21_MEM_BASE 0x11
#define E21_IRQ_LOW 0x12 /* The low three bits of the IRQ number. */
#define E21_IRQ_HIGH 0x14 /* The high IRQ bit and media select ... */
#define E21_MEDIA 0x14 /* (alias). */
#define E21_ALT_IFPORT 0x02 /* Set to use the other (BNC,AUI) port. */
#define E21_BIG_MEM 0x04 /* Use a bigger (64K) buffer (we don't) */
#define E21_SAPROM 0x10 /* Offset to station address data. */
#define E21_IO_EXTENT 0x20
static inline void mem_on(short port, volatile char __iomem *mem_base,
unsigned char start_page )
{
/* This is a little weird: set the shared memory window by doing a
read. The low address bits specify the starting page. */
readb(mem_base+start_page);
inb(port + E21_MEM_ENABLE);
outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
}
static inline void mem_off(short port)
{
inb(port + E21_MEM_ENABLE);
outb(0x00, port + E21_MEM_ENABLE);
}
/* In other drivers I put the TX pages first, but the E2100 window circuitry
is designed to have a 4K Tx region last. The windowing circuitry wraps the
window at 0x2fff->0x0000 so that the packets at e.g. 0x2f00 in the RX ring
appear contiguously in the window. */
#define E21_RX_START_PG 0x00 /* First page of RX buffer */
#define E21_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
#define E21_BIG_RX_STOP_PG 0xF0 /* Last page +1 of RX ring */
#define E21_TX_START_PG E21_RX_STOP_PG /* First page of TX buffer */
static int e21_probe1(struct net_device *dev, int ioaddr);
static int e21_open(struct net_device *dev);
static void e21_reset_8390(struct net_device *dev);
static void e21_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset);
static void e21_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
int ring_page);
static int e21_open(struct net_device *dev);
static int e21_close(struct net_device *dev);
/* Probe for the E2100 series ethercards. These cards have an 8390 at the
base address and the station address at both offset 0x10 and 0x18. I read
the station address from offset 0x18 to avoid the dataport of NE2000
ethercards, and look for Ctron's unique ID (first three octets of the
station address).
*/
static int __init do_e2100_probe(struct net_device *dev)
{
int *port;
int base_addr = dev->base_addr;
int irq = dev->irq;
if (base_addr > 0x1ff) /* Check a single specified location. */
return e21_probe1(dev, base_addr);
else if (base_addr != 0) /* Don't probe at all. */
return -ENXIO;
for (port = e21_probe_list; *port; port++) {
dev->irq = irq;
if (e21_probe1(dev, *port) == 0)
return 0;
}
return -ENODEV;
}
#ifndef MODULE
struct net_device * __init e2100_probe(int unit)
{
struct net_device *dev = alloc_ei_netdev();
int err;
if (!dev)
return ERR_PTR(-ENOMEM);
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
err = do_e2100_probe(dev);
if (err)
goto out;
return dev;
out:
free_netdev(dev);
return ERR_PTR(err);
}
#endif
static const struct net_device_ops e21_netdev_ops = {
.ndo_open = e21_open,
.ndo_stop = e21_close,
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
#endif
};
static int __init e21_probe1(struct net_device *dev, int ioaddr)
{
int i, status, retval;
unsigned char *station_addr = dev->dev_addr;
static unsigned version_printed;
if (!request_region(ioaddr, E21_IO_EXTENT, DRV_NAME))
return -EBUSY;
/* First check the station address for the Ctron prefix. */
if (inb(ioaddr + E21_SAPROM + 0) != 0x00 ||
inb(ioaddr + E21_SAPROM + 1) != 0x00 ||
inb(ioaddr + E21_SAPROM + 2) != 0x1d) {
retval = -ENODEV;
goto out;
}
/* Verify by making certain that there is a 8390 at there. */
outb(E8390_NODMA + E8390_STOP, ioaddr);
udelay(1); /* we want to delay one I/O cycle - which is 2MHz */
status = inb(ioaddr);
if (status != 0x21 && status != 0x23) {
retval = -ENODEV;
goto out;
}
/* Read the station address PROM. */
for (i = 0; i < 6; i++)
station_addr[i] = inb(ioaddr + E21_SAPROM + i);
inb(ioaddr + E21_MEDIA); /* Point to media selection. */
outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
if (ei_debug && version_printed++ == 0)
printk(version);
for (i = 0; i < 6; i++)
printk(" %02X", station_addr[i]);
if (dev->irq < 2) {
static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
for (i = 0; i < ARRAY_SIZE(irqlist); i++)
if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
dev->irq = irqlist[i];
break;
}
if (i >= ARRAY_SIZE(irqlist)) {
printk(" unable to get IRQ %d.\n", dev->irq);
retval = -EAGAIN;
goto out;
}
} else if (dev->irq == 2) /* Fixup luser bogosity: IRQ2 is really IRQ9 */
dev->irq = 9;
/* The 8390 is at the base address. */
dev->base_addr = ioaddr;
ei_status.name = "E2100";
ei_status.word16 = 1;
ei_status.tx_start_page = E21_TX_START_PG;
ei_status.rx_start_page = E21_RX_START_PG;
ei_status.stop_page = E21_RX_STOP_PG;
ei_status.saved_irq = dev->irq;
/* Check the media port used. The port can be passed in on the
low mem_end bits. */
if (dev->mem_end & 15)
dev->if_port = dev->mem_end & 7;
else {
dev->if_port = 0;
inb(ioaddr + E21_MEDIA); /* Turn automatic media detection on. */
for(i = 0; i < 6; i++)
if (station_addr[i] != inb(ioaddr + E21_SAPROM + 8 + i)) {
dev->if_port = 1;
break;
}
}
/* Never map in the E21 shared memory unless you are actively using it.
Also, the shared memory has effective only one setting -- spread all
over the 128K region! */
if (dev->mem_start == 0)
dev->mem_start = 0xd0000;
ei_status.mem = ioremap(dev->mem_start, 2*1024);
if (!ei_status.mem) {
printk("unable to remap memory\n");
retval = -EAGAIN;
goto out;
}
#ifdef notdef
/* These values are unused. The E2100 has a 2K window into the packet
buffer. The window can be set to start on any page boundary. */
ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
dev->mem_end = ei_status.rmem_end = dev->mem_start + 2*1024;
#endif
printk(", IRQ %d, %s media, memory @ %#lx.\n", dev->irq,
dev->if_port ? "secondary" : "primary", dev->mem_start);
ei_status.reset_8390 = &e21_reset_8390;
ei_status.block_input = &e21_block_input;
ei_status.block_output = &e21_block_output;
ei_status.get_8390_hdr = &e21_get_8390_hdr;
dev->netdev_ops = &e21_netdev_ops;
NS8390_init(dev, 0);
retval = register_netdev(dev);
if (retval)
goto out;
return 0;
out:
release_region(ioaddr, E21_IO_EXTENT);
return retval;
}
static int
e21_open(struct net_device *dev)
{
short ioaddr = dev->base_addr;
int retval;
if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
return retval;
/* Set the interrupt line and memory base on the hardware. */
inb(ioaddr + E21_IRQ_LOW);
outb(0, ioaddr + E21_ASIC + (dev->irq & 7));
inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
outb(0, ioaddr + E21_ASIC + (dev->irq > 7 ? 1:0)
+ (dev->if_port ? E21_ALT_IFPORT : 0));
inb(ioaddr + E21_MEM_BASE);
outb(0, ioaddr + E21_ASIC + ((dev->mem_start >> 17) & 7));
ei_open(dev);
return 0;
}
static void
e21_reset_8390(struct net_device *dev)
{
short ioaddr = dev->base_addr;
outb(0x01, ioaddr);
if (ei_debug > 1) printk("resetting the E2180x3 t=%ld...", jiffies);
ei_status.txing = 0;
/* Set up the ASIC registers, just in case something changed them. */
if (ei_debug > 1) printk("reset done\n");
}
/* Grab the 8390 specific header. We put the 2k window so the header page
appears at the start of the shared memory. */
static void
e21_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
{
short ioaddr = dev->base_addr;
char __iomem *shared_mem = ei_status.mem;
mem_on(ioaddr, shared_mem, ring_page);
#ifdef notdef
/* Officially this is what we are doing, but the readl() is faster */
memcpy_fromio(hdr, shared_mem, sizeof(struct e8390_pkt_hdr));
#else
((unsigned int*)hdr)[0] = readl(shared_mem);
#endif
/* Turn off memory access: we would need to reprogram the window anyway. */
mem_off(ioaddr);
}
/* Block input and output are easy on shared memory ethercards.
The E21xx makes block_input() especially easy by wrapping the top
ring buffer to the bottom automatically. */
static void
e21_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
{
short ioaddr = dev->base_addr;
char __iomem *shared_mem = ei_status.mem;
mem_on(ioaddr, shared_mem, (ring_offset>>8));
memcpy_fromio(skb->data, ei_status.mem + (ring_offset & 0xff), count);
mem_off(ioaddr);
}
static void
e21_block_output(struct net_device *dev, int count, const unsigned char *buf,
int start_page)
{
short ioaddr = dev->base_addr;
volatile char __iomem *shared_mem = ei_status.mem;
/* Set the shared memory window start by doing a read, with the low address
bits specifying the starting page. */
readb(shared_mem + start_page);
mem_on(ioaddr, shared_mem, start_page);
memcpy_toio(shared_mem, buf, count);
mem_off(ioaddr);
}
static int
e21_close(struct net_device *dev)
{
short ioaddr = dev->base_addr;
if (ei_debug > 1)
printk("%s: Shutting down ethercard.\n", dev->name);
free_irq(dev->irq, dev);
dev->irq = ei_status.saved_irq;
/* Shut off the interrupt line and secondary interface. */
inb(ioaddr + E21_IRQ_LOW);
outb(0, ioaddr + E21_ASIC);
inb(ioaddr + E21_IRQ_HIGH); /* High IRQ bit, and if_port. */
outb(0, ioaddr + E21_ASIC);
ei_close(dev);
/* Double-check that the memory has been turned off, because really
really bad things happen if it isn't. */
mem_off(ioaddr);
return 0;
}
#ifdef MODULE
#define MAX_E21_CARDS 4 /* Max number of E21 cards per module */
static struct net_device *dev_e21[MAX_E21_CARDS];
static int io[MAX_E21_CARDS];
static int irq[MAX_E21_CARDS];
static int mem[MAX_E21_CARDS];
static int xcvr[MAX_E21_CARDS]; /* choose int. or ext. xcvr */
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param_array(mem, int, NULL, 0);
module_param_array(xcvr, int, NULL, 0);
MODULE_PARM_DESC(io, "I/O base address(es)");
MODULE_PARM_DESC(irq, "IRQ number(s)");
MODULE_PARM_DESC(mem, " memory base address(es)");
MODULE_PARM_DESC(xcvr, "transceiver(s) (0=internal, 1=external)");
MODULE_DESCRIPTION("Cabletron E2100 ISA ethernet driver");
MODULE_LICENSE("GPL");
/* This is set up so that only a single autoprobe takes place per call.
ISA device autoprobes on a running machine are not recommended. */
int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
if (io[this_dev] == 0) {
if (this_dev != 0) break; /* only autoprobe 1st one */
printk(KERN_NOTICE "e2100.c: Presently autoprobing (not recommended) for a single card.\n");
}
dev = alloc_ei_netdev();
if (!dev)
break;
dev->irq = irq[this_dev];
dev->base_addr = io[this_dev];
dev->mem_start = mem[this_dev];
dev->mem_end = xcvr[this_dev]; /* low 4bits = xcvr sel. */
if (do_e2100_probe(dev) == 0) {
dev_e21[found++] = dev;
continue;
}
free_netdev(dev);
printk(KERN_WARNING "e2100.c: No E2100 card found (i/o = 0x%x).\n", io[this_dev]);
break;
}
if (found)
return 0;
return -ENXIO;
}
static void cleanup_card(struct net_device *dev)
{
/* NB: e21_close() handles free_irq */
iounmap(ei_status.mem);
release_region(dev->base_addr, E21_IO_EXTENT);
}
void __exit
cleanup_module(void)
{
int this_dev;
for (this_dev = 0; this_dev < MAX_E21_CARDS; this_dev++) {
struct net_device *dev = dev_e21[this_dev];
if (dev) {
unregister_netdev(dev);
cleanup_card(dev);
free_netdev(dev);
}
}
}
#endif /* MODULE */
| gpl-2.0 |
aka-mccloud/ployer-momo7-kernel | sound/core/memalloc.c | 2896 | 13760 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Takashi Iwai <tiwai@suse.de>
*
* Generic memory allocators
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <sound/memalloc.h>
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Memory allocator for ALSA system.");
MODULE_LICENSE("GPL");
/*
*/
static DEFINE_MUTEX(list_mutex);
static LIST_HEAD(mem_list_head);
/* buffer preservation list */
struct snd_mem_list {
struct snd_dma_buffer buffer;
unsigned int id;
struct list_head list;
};
/* id for pre-allocated buffers */
#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
/*
*
* Generic memory allocators
*
*/
static long snd_allocated_pages; /* holding the number of allocated pages */
static inline void inc_snd_pages(int order)
{
snd_allocated_pages += 1 << order;
}
static inline void dec_snd_pages(int order)
{
snd_allocated_pages -= 1 << order;
}
/**
* snd_malloc_pages - allocate pages with the given size
* @size: the size to allocate in bytes
* @gfp_flags: the allocation conditions, GFP_XXX
*
* Allocates the physically contiguous pages with the given size.
*
* Returns the pointer of the buffer, or NULL if no enoguh memory.
*/
void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
{
int pg;
void *res;
if (WARN_ON(!size))
return NULL;
if (WARN_ON(!gfp_flags))
return NULL;
gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
pg = get_order(size);
if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
inc_snd_pages(pg);
return res;
}
/**
* snd_free_pages - release the pages
* @ptr: the buffer pointer to release
* @size: the allocated buffer size
*
* Releases the buffer allocated via snd_malloc_pages().
*/
void snd_free_pages(void *ptr, size_t size)
{
int pg;
if (ptr == NULL)
return;
pg = get_order(size);
dec_snd_pages(pg);
free_pages((unsigned long) ptr, pg);
}
/*
*
* Bus-specific memory allocators
*
*/
#ifdef CONFIG_HAS_DMA
/* allocate the coherent DMA pages */
static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
{
int pg;
void *res;
gfp_t gfp_flags;
if (WARN_ON(!dma))
return NULL;
pg = get_order(size);
gfp_flags = GFP_KERNEL
| __GFP_COMP /* compound page lets parts be mapped */
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
if (res != NULL)
inc_snd_pages(pg);
return res;
}
/* free the coherent DMA pages */
static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
dma_addr_t dma)
{
int pg;
if (ptr == NULL)
return;
pg = get_order(size);
dec_snd_pages(pg);
dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
}
#endif /* CONFIG_HAS_DMA */
/*
*
* ALSA generic memory management
*
*/
/**
* snd_dma_alloc_pages - allocate the buffer area according to the given type
* @type: the DMA buffer type
* @device: the device pointer
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
* Calls the memory-allocator function for the corresponding
* buffer type.
*
* Returns zero if the buffer with the given size is allocated successfuly,
* other a negative value at error.
*/
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
if (WARN_ON(!size))
return -ENXIO;
if (WARN_ON(!dmab))
return -ENXIO;
dmab->dev.type = type;
dmab->dev.dev = device;
dmab->bytes = 0;
switch (type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
dmab->area = snd_malloc_pages(size,
(__force gfp_t)(unsigned long)device);
dmab->addr = 0;
break;
#ifdef CONFIG_HAS_DMA
case SNDRV_DMA_TYPE_DEV:
dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_malloc_sgbuf_pages(device, size, dmab, NULL);
break;
#endif
default:
printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
dmab->area = NULL;
dmab->addr = 0;
return -ENXIO;
}
if (! dmab->area)
return -ENOMEM;
dmab->bytes = size;
return 0;
}
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
* @type: the DMA buffer type
* @device: the device pointer
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
* Calls the memory-allocator function for the corresponding
* buffer type. When no space is left, this function reduces the size and
* tries to allocate again. The size actually allocated is stored in
* res_size argument.
*
* Returns zero if the buffer with the given size is allocated successfuly,
* other a negative value at error.
*/
int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
int err;
while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
size_t aligned_size;
if (err != -ENOMEM)
return err;
if (size <= PAGE_SIZE)
return -ENOMEM;
aligned_size = PAGE_SIZE << get_order(size);
if (size != aligned_size)
size = aligned_size;
else
size >>= 1;
}
if (! dmab->area)
return -ENOMEM;
return 0;
}
/**
* snd_dma_free_pages - release the allocated buffer
* @dmab: the buffer allocation record to release
*
* Releases the allocated buffer via snd_dma_alloc_pages().
*/
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{
switch (dmab->dev.type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
snd_free_pages(dmab->area, dmab->bytes);
break;
#ifdef CONFIG_HAS_DMA
case SNDRV_DMA_TYPE_DEV:
snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_free_sgbuf_pages(dmab);
break;
#endif
default:
printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
}
}
/**
* snd_dma_get_reserved - get the reserved buffer for the given device
* @dmab: the buffer allocation record to store
* @id: the buffer id
*
* Looks for the reserved-buffer list and re-uses if the same buffer
* is found in the list. When the buffer is found, it's removed from the free list.
*
* Returns the size of buffer if the buffer is found, or zero if not found.
*/
size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
{
struct snd_mem_list *mem;
if (WARN_ON(!dmab))
return 0;
mutex_lock(&list_mutex);
list_for_each_entry(mem, &mem_list_head, list) {
if (mem->id == id &&
(mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
struct device *dev = dmab->dev.dev;
list_del(&mem->list);
*dmab = mem->buffer;
if (dmab->dev.dev == NULL)
dmab->dev.dev = dev;
kfree(mem);
mutex_unlock(&list_mutex);
return dmab->bytes;
}
}
mutex_unlock(&list_mutex);
return 0;
}
/**
* snd_dma_reserve_buf - reserve the buffer
* @dmab: the buffer to reserve
* @id: the buffer id
*
* Reserves the given buffer as a reserved buffer.
*
* Returns zero if successful, or a negative code at error.
*/
int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
{
struct snd_mem_list *mem;
if (WARN_ON(!dmab))
return -EINVAL;
mem = kmalloc(sizeof(*mem), GFP_KERNEL);
if (! mem)
return -ENOMEM;
mutex_lock(&list_mutex);
mem->buffer = *dmab;
mem->id = id;
list_add_tail(&mem->list, &mem_list_head);
mutex_unlock(&list_mutex);
return 0;
}
/*
* purge all reserved buffers
*/
static void free_all_reserved_pages(void)
{
struct list_head *p;
struct snd_mem_list *mem;
mutex_lock(&list_mutex);
while (! list_empty(&mem_list_head)) {
p = mem_list_head.next;
mem = list_entry(p, struct snd_mem_list, list);
list_del(p);
snd_dma_free_pages(&mem->buffer);
kfree(mem);
}
mutex_unlock(&list_mutex);
}
#ifdef CONFIG_PROC_FS
/*
* proc file interface
*/
#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
static struct proc_dir_entry *snd_mem_proc;
static int snd_mem_proc_read(struct seq_file *seq, void *offset)
{
long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
struct snd_mem_list *mem;
int devno;
static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" };
mutex_lock(&list_mutex);
seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
devno = 0;
list_for_each_entry(mem, &mem_list_head, list) {
devno++;
seq_printf(seq, "buffer %d : ID %08x : type %s\n",
devno, mem->id, types[mem->buffer.dev.type]);
seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
(unsigned long)mem->buffer.addr,
(int)mem->buffer.bytes);
}
mutex_unlock(&list_mutex);
return 0;
}
static int snd_mem_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, snd_mem_proc_read, NULL);
}
/* FIXME: for pci only - other bus? */
#ifdef CONFIG_PCI
#define gettoken(bufp) strsep(bufp, " \t\n")
static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
char buf[128];
char *token, *p;
if (count > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
buf[count] = '\0';
p = buf;
token = gettoken(&p);
if (! token || *token == '#')
return count;
if (strcmp(token, "add") == 0) {
char *endp;
int vendor, device, size, buffers;
long mask;
int i, alloced;
struct pci_dev *pci;
if ((token = gettoken(&p)) == NULL ||
(vendor = simple_strtol(token, NULL, 0)) <= 0 ||
(token = gettoken(&p)) == NULL ||
(device = simple_strtol(token, NULL, 0)) <= 0 ||
(token = gettoken(&p)) == NULL ||
(mask = simple_strtol(token, NULL, 0)) < 0 ||
(token = gettoken(&p)) == NULL ||
(size = memparse(token, &endp)) < 64*1024 ||
size > 16*1024*1024 /* too big */ ||
(token = gettoken(&p)) == NULL ||
(buffers = simple_strtol(token, NULL, 0)) <= 0 ||
buffers > 4) {
printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
return count;
}
vendor &= 0xffff;
device &= 0xffff;
alloced = 0;
pci = NULL;
while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
if (mask > 0 && mask < 0xffffffff) {
if (pci_set_dma_mask(pci, mask) < 0 ||
pci_set_consistent_dma_mask(pci, mask) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
pci_dev_put(pci);
return count;
}
}
for (i = 0; i < buffers; i++) {
struct snd_dma_buffer dmab;
memset(&dmab, 0, sizeof(dmab));
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
size, &dmab) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
pci_dev_put(pci);
return count;
}
snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
}
alloced++;
}
if (! alloced) {
for (i = 0; i < buffers; i++) {
struct snd_dma_buffer dmab;
memset(&dmab, 0, sizeof(dmab));
/* FIXME: We can allocate only in ZONE_DMA
* without a device pointer!
*/
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
size, &dmab) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
break;
}
snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
}
}
} else if (strcmp(token, "erase") == 0)
/* FIXME: need for releasing each buffer chunk? */
free_all_reserved_pages();
else
printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
return count;
}
#endif /* CONFIG_PCI */
static const struct file_operations snd_mem_proc_fops = {
.owner = THIS_MODULE,
.open = snd_mem_proc_open,
.read = seq_read,
#ifdef CONFIG_PCI
.write = snd_mem_proc_write,
#endif
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PROC_FS */
/*
* module entry
*/
static int __init snd_mem_init(void)
{
#ifdef CONFIG_PROC_FS
snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
&snd_mem_proc_fops);
#endif
return 0;
}
static void __exit snd_mem_exit(void)
{
remove_proc_entry(SND_MEM_PROC_FILE, NULL);
free_all_reserved_pages();
if (snd_allocated_pages > 0)
printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
}
module_init(snd_mem_init)
module_exit(snd_mem_exit)
/*
* exports
*/
EXPORT_SYMBOL(snd_dma_alloc_pages);
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
EXPORT_SYMBOL(snd_dma_free_pages);
EXPORT_SYMBOL(snd_dma_get_reserved_buf);
EXPORT_SYMBOL(snd_dma_reserve_buf);
EXPORT_SYMBOL(snd_malloc_pages);
EXPORT_SYMBOL(snd_free_pages);
| gpl-2.0 |
neohackt/android_kernel_motorola_otus | arch/powerpc/kernel/ptrace.c | 3664 | 45635 | /*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/m68k/kernel/ptrace.c"
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
* and Paul Mackerras (paulus@samba.org).
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file README.legal in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/seccomp.h>
#include <linux/audit.h>
#include <trace/syscall.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/switch_to.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* The parameter save area on the stack is used to store arguments being passed
* to callee function and is located at fixed offset from stack pointer.
*/
#ifdef CONFIG_PPC32
#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
#else /* CONFIG_PPC32 */
#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
#endif
struct pt_regs_offset {
const char *name;
int offset;
};
#define STR(s) #s /* convert to string */
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define GPR_OFFSET_NAME(num) \
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
GPR_OFFSET_NAME(1),
GPR_OFFSET_NAME(2),
GPR_OFFSET_NAME(3),
GPR_OFFSET_NAME(4),
GPR_OFFSET_NAME(5),
GPR_OFFSET_NAME(6),
GPR_OFFSET_NAME(7),
GPR_OFFSET_NAME(8),
GPR_OFFSET_NAME(9),
GPR_OFFSET_NAME(10),
GPR_OFFSET_NAME(11),
GPR_OFFSET_NAME(12),
GPR_OFFSET_NAME(13),
GPR_OFFSET_NAME(14),
GPR_OFFSET_NAME(15),
GPR_OFFSET_NAME(16),
GPR_OFFSET_NAME(17),
GPR_OFFSET_NAME(18),
GPR_OFFSET_NAME(19),
GPR_OFFSET_NAME(20),
GPR_OFFSET_NAME(21),
GPR_OFFSET_NAME(22),
GPR_OFFSET_NAME(23),
GPR_OFFSET_NAME(24),
GPR_OFFSET_NAME(25),
GPR_OFFSET_NAME(26),
GPR_OFFSET_NAME(27),
GPR_OFFSET_NAME(28),
GPR_OFFSET_NAME(29),
GPR_OFFSET_NAME(30),
GPR_OFFSET_NAME(31),
REG_OFFSET_NAME(nip),
REG_OFFSET_NAME(msr),
REG_OFFSET_NAME(ctr),
REG_OFFSET_NAME(link),
REG_OFFSET_NAME(xer),
REG_OFFSET_NAME(ccr),
#ifdef CONFIG_PPC64
REG_OFFSET_NAME(softe),
#else
REG_OFFSET_NAME(mq),
#endif
REG_OFFSET_NAME(trap),
REG_OFFSET_NAME(dar),
REG_OFFSET_NAME(dsisr),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* Set of msr bits that gdb can change on behalf of a process.
*/
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
#define MSR_DEBUGCHANGE 0
#else
#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
#endif
/*
* Max register writeable via put_reg
*/
#ifdef CONFIG_PPC32
#define PT_MAX_PUT_REG PT_MQ
#else
#define PT_MAX_PUT_REG PT_CCR
#endif
static unsigned long get_user_msr(struct task_struct *task)
{
return task->thread.regs->msr | task->thread.fpexc_mode;
}
static int set_user_msr(struct task_struct *task, unsigned long msr)
{
task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
return 0;
}
/*
* We prevent mucking around with the reserved area of trap
* which are used internally by the kernel.
*/
static int set_user_trap(struct task_struct *task, unsigned long trap)
{
task->thread.regs->trap = trap & 0xfff0;
return 0;
}
/*
* Get contents of register REGNO in task TASK.
*/
unsigned long ptrace_get_reg(struct task_struct *task, int regno)
{
if (task->thread.regs == NULL)
return -EIO;
if (regno == PT_MSR)
return get_user_msr(task);
if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
return ((unsigned long *)task->thread.regs)[regno];
return -EIO;
}
/*
* Write contents of register REGNO in task TASK.
*/
int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
{
if (task->thread.regs == NULL)
return -EIO;
if (regno == PT_MSR)
return set_user_msr(task, data);
if (regno == PT_TRAP)
return set_user_trap(task, data);
if (regno <= PT_MAX_PUT_REG) {
((unsigned long *)task->thread.regs)[regno] = data;
return 0;
}
return -EIO;
}
static int gpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int i, ret;
if (target->thread.regs == NULL)
return -EIO;
if (!FULL_REGS(target->thread.regs)) {
/* We have a partial register set. Fill 14-31 with bogus values */
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, offsetof(struct pt_regs, msr));
if (!ret) {
unsigned long msr = get_user_msr(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
offsetof(struct pt_regs, msr),
offsetof(struct pt_regs, msr) +
sizeof(msr));
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
offsetof(struct pt_regs, orig_gpr3),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int gpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long reg;
int ret;
if (target->thread.regs == NULL)
return -EIO;
CHECK_FULL_REGS(target->thread.regs);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, PT_MSR * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_MSR * sizeof(reg),
(PT_MSR + 1) * sizeof(reg));
if (!ret)
ret = set_user_msr(target, reg);
}
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct pt_regs, msr) + sizeof(long));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
PT_ORIG_R3 * sizeof(reg),
(PT_MAX_PUT_REG + 1) * sizeof(reg));
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_MAX_PUT_REG + 1) * sizeof(reg),
PT_TRAP * sizeof(reg));
if (!ret && count > 0) {
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
PT_TRAP * sizeof(reg),
(PT_TRAP + 1) * sizeof(reg));
if (!ret)
ret = set_user_trap(target, reg);
}
if (!ret)
ret = user_regset_copyin_ignore(
&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
return ret;
}
static int fpr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
#ifdef CONFIG_VSX
double buf[33];
int i;
#endif
flush_fp_to_thread(target);
#ifdef CONFIG_VSX
/* copy to local buffer then write that out */
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.TS_FPR(i);
memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
#else
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
offsetof(struct thread_struct, TS_FPR(32)));
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.fpr, 0, -1);
#endif
}
static int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
#ifdef CONFIG_VSX
double buf[33];
int i;
#endif
flush_fp_to_thread(target);
#ifdef CONFIG_VSX
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
return i;
for (i = 0; i < 32 ; i++)
target->thread.TS_FPR(i) = buf[i];
memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
return 0;
#else
BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
offsetof(struct thread_struct, TS_FPR(32)));
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fpr, 0, -1);
#endif
}
#ifdef CONFIG_ALTIVEC
/*
* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
* The transfer totals 34 quadword. Quadwords 0-31 contain the
* corresponding vector registers. Quadword 32 contains the vscr as the
* last word (offset 12) within that quadword. Quadword 33 contains the
* vrsave as the first word (offset 0) within the quadword.
*
* This definition of the VMX state is compatible with the current PPC32
* ptrace interface. This allows signal handling and ptrace to use the
* same structures. This also simplifies the implementation of a bi-arch
* (combined (32- and 64-bit) gdb.
*/
static int vr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_altivec_to_thread(target);
return target->thread.used_vr ? regset->n : 0;
}
static int vr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
offsetof(struct thread_struct, vr[32]));
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.vr, 0,
33 * sizeof(vector128));
if (!ret) {
/*
* Copy out only the low-order word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
}
return ret;
}
static int vr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
flush_altivec_to_thread(target);
BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
offsetof(struct thread_struct, vr[32]));
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.vr, 0, 33 * sizeof(vector128));
if (!ret && count > 0) {
/*
* We use only the first word of vrsave.
*/
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
memset(&vrsave, 0, sizeof(vrsave));
vrsave.word = target->thread.vrsave;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
33 * sizeof(vector128), -1);
if (!ret)
target->thread.vrsave = vrsave.word;
}
return ret;
}
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
/*
* Currently to set and and get all the vsx state, you need to call
* the fp and VMX calls as well. This only get/sets the lower 32
* 128bit VSX registers.
*/
static int vsr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_vsx_to_thread(target);
return target->thread.used_vsr ? regset->n : 0;
}
static int vsr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
double buf[32];
int ret, i;
flush_vsx_to_thread(target);
for (i = 0; i < 32 ; i++)
buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
return ret;
}
static int vsr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
double buf[32];
int ret,i;
flush_vsx_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
for (i = 0; i < 32 ; i++)
target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
return ret;
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/*
* For get_evrregs/set_evrregs functions 'data' has the following layout:
*
* struct {
* u32 evr[32];
* u64 acc;
* u32 spefscr;
* }
*/
static int evr_active(struct task_struct *target,
const struct user_regset *regset)
{
flush_spe_to_thread(target);
return target->thread.used_spe ? regset->n : 0;
}
static int evr_get(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
flush_spe_to_thread(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.evr,
0, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.acc,
sizeof(target->thread.evr), -1);
return ret;
}
static int evr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
flush_spe_to_thread(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.evr,
0, sizeof(target->thread.evr));
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
offsetof(struct thread_struct, spefscr));
if (!ret)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.acc,
sizeof(target->thread.evr), -1);
return ret;
}
#endif /* CONFIG_SPE */
/*
* These are our native regset flavors.
*/
enum powerpc_regset {
REGSET_GPR,
REGSET_FPR,
#ifdef CONFIG_ALTIVEC
REGSET_VMX,
#endif
#ifdef CONFIG_VSX
REGSET_VSX,
#endif
#ifdef CONFIG_SPE
REGSET_SPE,
#endif
};
static const struct user_regset native_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(long), .align = sizeof(long),
.get = gpr_get, .set = gpr_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_VSX
[REGSET_VSX] = {
.core_note_type = NT_PPC_VSX, .n = 32,
.size = sizeof(double), .align = sizeof(double),
.active = vsr_active, .get = vsr_get, .set = vsr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
};
static const struct user_regset_view user_ppc_native_view = {
.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
};
#ifdef CONFIG_PPC64
#include <linux/compat.h>
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const unsigned long *regs = &target->thread.regs->gpr[0];
compat_ulong_t *k = kbuf;
compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
int i;
if (target->thread.regs == NULL)
return -EIO;
if (!FULL_REGS(target->thread.regs)) {
/* We have a partial register set. Fill 14-31 with bogus values */
for (i = 14; i < 32; i++)
target->thread.regs->gpr[i] = NV_REG_POISON;
}
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_MSR; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
if (count > 0 && pos == PT_MSR) {
reg = get_user_msr(target);
if (kbuf)
*k++ = reg;
else if (__put_user(reg, u++))
return -EFAULT;
++pos;
--count;
}
if (kbuf)
for (; count > 0 && pos < PT_REGS_COUNT; --count)
*k++ = regs[pos++];
else
for (; count > 0 && pos < PT_REGS_COUNT; --count)
if (__put_user((compat_ulong_t) regs[pos++], u++))
return -EFAULT;
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
PT_REGS_COUNT * sizeof(reg), -1);
}
static int gpr32_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
unsigned long *regs = &target->thread.regs->gpr[0];
const compat_ulong_t *k = kbuf;
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
if (target->thread.regs == NULL)
return -EIO;
CHECK_FULL_REGS(target->thread.regs);
pos /= sizeof(reg);
count /= sizeof(reg);
if (kbuf)
for (; count > 0 && pos < PT_MSR; --count)
regs[pos++] = *k++;
else
for (; count > 0 && pos < PT_MSR; --count) {
if (__get_user(reg, u++))
return -EFAULT;
regs[pos++] = reg;
}
if (count > 0 && pos == PT_MSR) {
if (kbuf)
reg = *k++;
else if (__get_user(reg, u++))
return -EFAULT;
set_user_msr(target, reg);
++pos;
--count;
}
if (kbuf) {
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
regs[pos++] = *k++;
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
++k;
} else {
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
if (__get_user(reg, u++))
return -EFAULT;
regs[pos++] = reg;
}
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
if (__get_user(reg, u++))
return -EFAULT;
}
if (count > 0 && pos == PT_TRAP) {
if (kbuf)
reg = *k++;
else if (__get_user(reg, u++))
return -EFAULT;
set_user_trap(target, reg);
++pos;
--count;
}
kbuf = k;
ubuf = u;
pos *= sizeof(reg);
count *= sizeof(reg);
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
}
/*
* These are the regset flavors matching the CONFIG_PPC32 native set.
*/
static const struct user_regset compat_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
.get = gpr32_get, .set = gpr32_set
},
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.get = fpr_get, .set = fpr_set
},
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
#endif
#ifdef CONFIG_SPE
[REGSET_SPE] = {
.core_note_type = NT_PPC_SPE, .n = 35,
.size = sizeof(u32), .align = sizeof(u32),
.active = evr_active, .get = evr_get, .set = evr_set
},
#endif
};
static const struct user_regset_view user_ppc_compat_view = {
.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
};
#endif /* CONFIG_PPC64 */
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_PPC64
if (test_tsk_thread_flag(task, TIF_32BIT))
return &user_ppc_compat_view;
#endif
return &user_ppc_native_view;
}
void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.dbcr0 &= ~DBCR0_BT;
task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_BE;
regs->msr |= MSR_SE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
task->thread.dbcr0 &= ~DBCR0_IC;
task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
regs->msr |= MSR_DE;
#else
regs->msr &= ~MSR_SE;
regs->msr |= MSR_BE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
if (regs != NULL) {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* The logic to disable single stepping should be as
* simple as turning off the Instruction Complete flag.
* And, after doing so, if all debug flags are off, turn
* off DBCR0(IDM) and MSR(DE) .... Torez
*/
task->thread.dbcr0 &= ~DBCR0_IC;
/*
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
*/
if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
task->thread.dbcr1)) {
/*
* All debug events were off.....
*/
task->thread.dbcr0 &= ~DBCR0_IDM;
regs->msr &= ~MSR_DE;
}
#else
regs->msr &= ~(MSR_SE | MSR_BE);
#endif
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions in PPC64.
* The SIGTRAP signal is generated automatically for us in do_dabr().
* We don't have to do anything about that here
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
unsigned long data)
{
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &(task->thread);
struct perf_event *bp;
struct perf_event_attr attr;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
* For embedded processors we support one DAC and no IAC's at the
* moment.
*/
if (addr > 0)
return -EINVAL;
/* The bottom 3 bits in dabr are flags */
if ((data & ~0x7UL) >= TASK_SIZE)
return -EIO;
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
* It was assumed, on previous implementations, that 3 bits were
* passed together with the data address, fitting the design of the
* DABR register, as follows:
*
* bit 0: Read flag
* bit 1: Write flag
* bit 2: Breakpoint translation
*
* Thus, we use them here as so.
*/
/* Ensure breakpoint translation bit is set */
if (data && !(data & DABR_TRANSLATION))
return -EIO;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
if (ptrace_get_breakpoints(task) < 0)
return -ESRCH;
bp = thread->ptrace_bps[0];
if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[0] = NULL;
}
ptrace_put_breakpoints(task);
return 0;
}
if (bp) {
attr = bp->attr;
attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
arch_bp_generic_fields(data &
(DABR_DATA_WRITE | DABR_DATA_READ),
&attr.bp_type);
ret = modify_user_hw_breakpoint(bp, &attr);
if (ret) {
ptrace_put_breakpoints(task);
return ret;
}
thread->ptrace_bps[0] = bp;
ptrace_put_breakpoints(task);
thread->dabr = data;
return 0;
}
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = data & ~HW_BREAKPOINT_ALIGN;
arch_bp_generic_fields(data & (DABR_DATA_WRITE | DABR_DATA_READ),
&attr.bp_type);
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
ptrace_triggered, NULL, task);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
ptrace_put_breakpoints(task);
return PTR_ERR(bp);
}
ptrace_put_breakpoints(task);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* Move contents to the DABR register */
task->thread.dabr = data;
#else /* CONFIG_PPC_ADV_DEBUG_REGS */
/* As described above, it was assumed 3 bits were passed with the data
* address, but we will assume only the mode bits will be passed
* as to not cause alignment restrictions for DAC-based processors.
*/
/* DAC's hold the whole address without any mode flags */
task->thread.dac1 = data & ~0x3UL;
if (task->thread.dac1 == 0) {
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (!DBCR_ACTIVE_EVENTS(task->thread.dbcr0,
task->thread.dbcr1)) {
task->thread.regs->msr &= ~MSR_DE;
task->thread.dbcr0 &= ~DBCR0_IDM;
}
return 0;
}
/* Read or Write bits must be set */
if (!(data & 0x3UL))
return -EINVAL;
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
register */
task->thread.dbcr0 |= DBCR0_IDM;
/* Check for write and read flags and set DBCR0
accordingly */
dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
if (data & 0x1UL)
dbcr_dac(task) |= DBCR_DAC1R;
if (data & 0x2UL)
dbcr_dac(task) |= DBCR_DAC1W;
task->thread.regs->msr |= MSR_DE;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
return 0;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* make sure the single step bit is not set. */
user_disable_single_step(child);
}
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
static long set_intruction_bp(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int slot;
int slot1_in_use = ((child->thread.dbcr0 & DBCR0_IAC1) != 0);
int slot2_in_use = ((child->thread.dbcr0 & DBCR0_IAC2) != 0);
int slot3_in_use = ((child->thread.dbcr0 & DBCR0_IAC3) != 0);
int slot4_in_use = ((child->thread.dbcr0 & DBCR0_IAC4) != 0);
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
slot2_in_use = 1;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
slot4_in_use = 1;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
/* Make sure range is valid. */
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
/* We need a pair of IAC regsisters */
if ((!slot1_in_use) && (!slot2_in_use)) {
slot = 1;
child->thread.iac1 = bp_info->addr;
child->thread.iac2 = bp_info->addr2;
child->thread.dbcr0 |= DBCR0_IAC1;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC12X;
else
dbcr_iac_range(child) |= DBCR_IAC12I;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if ((!slot3_in_use) && (!slot4_in_use)) {
slot = 3;
child->thread.iac3 = bp_info->addr;
child->thread.iac4 = bp_info->addr2;
child->thread.dbcr0 |= DBCR0_IAC3;
if (bp_info->addr_mode ==
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
dbcr_iac_range(child) |= DBCR_IAC34X;
else
dbcr_iac_range(child) |= DBCR_IAC34I;
#endif
} else
return -ENOSPC;
} else {
/* We only need one. If possible leave a pair free in
* case a range is needed later
*/
if (!slot1_in_use) {
/*
* Don't use iac1 if iac1-iac2 are free and either
* iac3 or iac4 (but not both) are free
*/
if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
slot = 1;
child->thread.iac1 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC1;
goto out;
}
}
if (!slot2_in_use) {
slot = 2;
child->thread.iac2 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC2;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
} else if (!slot3_in_use) {
slot = 3;
child->thread.iac3 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC3;
} else if (!slot4_in_use) {
slot = 4;
child->thread.iac4 = bp_info->addr;
child->thread.dbcr0 |= DBCR0_IAC4;
#endif
} else
return -ENOSPC;
}
out:
child->thread.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot;
}
static int del_instruction_bp(struct task_struct *child, int slot)
{
switch (slot) {
case 1:
if ((child->thread.dbcr0 & DBCR0_IAC1) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
/* address range - clear slots 1 & 2 */
child->thread.iac2 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
}
child->thread.iac1 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC1;
break;
case 2:
if ((child->thread.dbcr0 & DBCR0_IAC2) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
/* used in a range */
return -EINVAL;
child->thread.iac2 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC2;
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case 3:
if ((child->thread.dbcr0 & DBCR0_IAC3) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
/* address range - clear slots 3 & 4 */
child->thread.iac4 = 0;
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
}
child->thread.iac3 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC3;
break;
case 4:
if ((child->thread.dbcr0 & DBCR0_IAC4) == 0)
return -ENOENT;
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
/* Used in a range */
return -EINVAL;
child->thread.iac4 = 0;
child->thread.dbcr0 &= ~DBCR0_IAC4;
break;
#endif
default:
return -EINVAL;
}
return 0;
}
static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
{
int byte_enable =
(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
& 0xf;
int condition_mode =
bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
int slot;
if (byte_enable && (condition_mode == 0))
return -EINVAL;
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
slot = 1;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC1R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC1W;
child->thread.dac1 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.dvc1 =
(unsigned long)bp_info->condition_value;
child->thread.dbcr2 |=
((byte_enable << DBCR2_DVC1BE_SHIFT) |
(condition_mode << DBCR2_DVC1M_SHIFT));
}
#endif
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
} else if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
/* Both dac1 and dac2 are part of a range */
return -ENOSPC;
#endif
} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
slot = 2;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dbcr_dac(child) |= DBCR_DAC2R;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dbcr_dac(child) |= DBCR_DAC2W;
child->thread.dac2 = (unsigned long)bp_info->addr;
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
if (byte_enable) {
child->thread.dvc2 =
(unsigned long)bp_info->condition_value;
child->thread.dbcr2 |=
((byte_enable << DBCR2_DVC2BE_SHIFT) |
(condition_mode << DBCR2_DVC2M_SHIFT));
}
#endif
} else
return -ENOSPC;
child->thread.dbcr0 |= DBCR0_IDM;
child->thread.regs->msr |= MSR_DE;
return slot + 4;
}
static int del_dac(struct task_struct *child, int slot)
{
if (slot == 1) {
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
return -ENOENT;
child->thread.dac1 = 0;
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.dbcr2 & DBCR2_DAC12MODE) {
child->thread.dac2 = 0;
child->thread.dbcr2 &= ~DBCR2_DAC12MODE;
}
child->thread.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.dvc1 = 0;
#endif
} else if (slot == 2) {
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
return -ENOENT;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
if (child->thread.dbcr2 & DBCR2_DAC12MODE)
/* Part of a range */
return -EINVAL;
child->thread.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
#endif
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
child->thread.dvc2 = 0;
#endif
child->thread.dac2 = 0;
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
} else
return -EINVAL;
return 0;
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
static int set_dac_range(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
/* We don't allow range watchpoints to be used with DVC */
if (bp_info->condition_mode)
return -EINVAL;
/*
* Best effort to verify the address range. The user/supervisor bits
* prevent trapping in kernel space, but let's fail on an obvious bad
* range. The simple test on the mask is not fool-proof, and any
* exclusive range will spill over into kernel space.
*/
if (bp_info->addr >= TASK_SIZE)
return -EIO;
if (mode == PPC_BREAKPOINT_MODE_MASK) {
/*
* dac2 is a bitmask. Don't allow a mask that makes a
* kernel space address from a valid dac1 value
*/
if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
return -EIO;
} else {
/*
* For range breakpoints, addr2 must also be a valid address
*/
if (bp_info->addr2 >= TASK_SIZE)
return -EIO;
}
if (child->thread.dbcr0 &
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
return -ENOSPC;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
child->thread.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
child->thread.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
child->thread.dac1 = bp_info->addr;
child->thread.dac2 = bp_info->addr2;
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
child->thread.dbcr2 |= DBCR2_DAC12M;
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
child->thread.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
child->thread.dbcr2 |= DBCR2_DAC12MM;
child->thread.regs->msr |= MSR_DE;
return 5;
}
#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
static long ppc_set_hwdebug(struct task_struct *child,
struct ppc_hw_breakpoint *bp_info)
{
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
unsigned long dabr;
#endif
if (bp_info->version != 1)
return -ENOTSUPP;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* Check for invalid flags and combinations
*/
if ((bp_info->trigger_type == 0) ||
(bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
PPC_BREAKPOINT_TRIGGER_RW)) ||
(bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
(bp_info->condition_mode &
~(PPC_BREAKPOINT_CONDITION_MODE |
PPC_BREAKPOINT_CONDITION_BE_ALL)))
return -EINVAL;
#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
#endif
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
(bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
return -EINVAL;
return set_intruction_bp(child, bp_info);
}
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
return set_dac(child, bp_info);
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
return set_dac_range(child, bp_info);
#else
return -EINVAL;
#endif
#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
/*
* We only support one data breakpoint
*/
if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
(bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
return -EINVAL;
if (child->thread.dabr)
return -ENOSPC;
if ((unsigned long)bp_info->addr >= TASK_SIZE)
return -EIO;
dabr = (unsigned long)bp_info->addr & ~7UL;
dabr |= DABR_TRANSLATION;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
dabr |= DABR_DATA_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
dabr |= DABR_DATA_WRITE;
child->thread.dabr = dabr;
return 1;
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
}
static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
int rc;
if (data <= 4)
rc = del_instruction_bp(child, (int)data);
else
rc = del_dac(child, (int)data - 4);
if (!rc) {
if (!DBCR_ACTIVE_EVENTS(child->thread.dbcr0,
child->thread.dbcr1)) {
child->thread.dbcr0 &= ~DBCR0_IDM;
child->thread.regs->msr &= ~MSR_DE;
}
}
return rc;
#else
if (data != 1)
return -EINVAL;
if (child->thread.dabr == 0)
return -ENOENT;
child->thread.dabr = 0;
return 0;
#endif
}
/*
* Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
* we mark them as obsolete now, they will be removed in a future version
*/
static long arch_ptrace_old(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
void __user *datavp = (void __user *) data;
switch (request) {
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
datavp);
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR, 0, 32 * sizeof(long),
datavp);
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
datavp);
case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR, 0, 32 * sizeof(double),
datavp);
}
return -EPERM;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret = -EPERM;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = datavp;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long index, tmp;
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
tmp = ptrace_get_reg(child, (int) index);
} else {
unsigned int fpidx = index - PT_FPR0;
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
tmp = ((unsigned long *)child->thread.fpr)
[fpidx * TS_FPRWIDTH];
else
tmp = child->thread.fpscr.val;
}
ret = put_user(tmp, datalp);
break;
}
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
ret = -EIO;
/* convert to index and check */
#ifdef CONFIG_PPC32
index = addr >> 2;
if ((addr & 3) || (index > PT_FPSCR)
|| (child->thread.regs == NULL))
#else
index = addr >> 3;
if ((addr & 7) || (index > PT_FPSCR))
#endif
break;
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
unsigned int fpidx = index - PT_FPR0;
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
((unsigned long *)child->thread.fpr)
[fpidx * TS_FPRWIDTH] = data;
else
child->thread.fpscr.val = data;
ret = 0;
}
break;
}
case PPC_PTRACE_GETHWDBGINFO: {
struct ppc_debug_info dbginfo;
dbginfo.version = 1;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
dbginfo.data_bp_alignment = 4;
dbginfo.sizeof_condition = 4;
dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
PPC_DEBUG_FEATURE_INSN_BP_MASK;
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
dbginfo.features |=
PPC_DEBUG_FEATURE_DATA_BP_RANGE |
PPC_DEBUG_FEATURE_DATA_BP_MASK;
#endif
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
dbginfo.num_instruction_bps = 0;
dbginfo.num_data_bps = 1;
dbginfo.num_condition_regs = 0;
#ifdef CONFIG_PPC64
dbginfo.data_bp_alignment = 8;
#else
dbginfo.data_bp_alignment = 4;
#endif
dbginfo.sizeof_condition = 0;
dbginfo.features = 0;
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
if (!access_ok(VERIFY_WRITE, datavp,
sizeof(struct ppc_debug_info)))
return -EFAULT;
ret = __copy_to_user(datavp, &dbginfo,
sizeof(struct ppc_debug_info)) ?
-EFAULT : 0;
break;
}
case PPC_PTRACE_SETHWDEBUG: {
struct ppc_hw_breakpoint bp_info;
if (!access_ok(VERIFY_READ, datavp,
sizeof(struct ppc_hw_breakpoint)))
return -EFAULT;
ret = __copy_from_user(&bp_info, datavp,
sizeof(struct ppc_hw_breakpoint)) ?
-EFAULT : 0;
if (!ret)
ret = ppc_set_hwdebug(child, &bp_info);
break;
}
case PPC_PTRACE_DELHWDEBUG: {
ret = ppc_del_hwdebug(child, addr, data);
break;
}
case PTRACE_GET_DEBUGREG: {
ret = -EINVAL;
/* We only support one DABR and no IABRS at the moment */
if (addr > 0)
break;
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
ret = put_user(child->thread.dac1, datalp);
#else
ret = put_user(child->thread.dabr, datalp);
#endif
break;
}
case PTRACE_SET_DEBUGREG:
ret = ptrace_set_debugreg(child, addr, data);
break;
#ifdef CONFIG_PPC64
case PTRACE_GETREGS64:
#endif
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
datavp);
#ifdef CONFIG_PPC64
case PTRACE_SETREGS64:
#endif
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR,
0, sizeof(struct pt_regs),
datavp);
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_FPR,
0, sizeof(elf_fpregset_t),
datavp);
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
case PTRACE_SETVRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VMX,
0, (33 * sizeof(vector128) +
sizeof(u32)),
datavp);
#endif
#ifdef CONFIG_VSX
case PTRACE_GETVSRREGS:
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
case PTRACE_SETVSRREGS:
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_VSX,
0, 32 * sizeof(double),
datavp);
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_SPE, 0, 35 * sizeof(u32),
datavp);
#endif
/* Old reverse args ptrace callss */
case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
ret = arch_ptrace_old(child, request, addr, data);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
/*
* We must return the syscall number to actually look up in the table.
* This can be -1L to skip running any syscall at all.
*/
long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
secure_computing(regs->gpr[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->gpr[0].
*/
ret = -1L;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->gpr[0]);
#ifdef CONFIG_PPC64
if (!is_32bit_task())
audit_syscall_entry(AUDIT_ARCH_PPC64,
regs->gpr[0],
regs->gpr[3], regs->gpr[4],
regs->gpr[5], regs->gpr[6]);
else
#endif
audit_syscall_entry(AUDIT_ARCH_PPC,
regs->gpr[0],
regs->gpr[3] & 0xffffffff,
regs->gpr[4] & 0xffffffff,
regs->gpr[5] & 0xffffffff,
regs->gpr[6] & 0xffffffff);
return ret ?: regs->gpr[0];
}
void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->result);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
| gpl-2.0 |
chasmodo/android_kernel_sony_apq8064 | sound/soc/codecs/wm8974.c | 4944 | 19153 | /*
* wm8974.c -- WM8974 ALSA Soc Audio driver
*
* Copyright 2006-2009 Wolfson Microelectronics PLC.
*
* Author: Liam Girdwood <Liam.Girdwood@wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8974.h"
static const u16 wm8974_reg[WM8974_CACHEREGNUM] = {
0x0000, 0x0000, 0x0000, 0x0000,
0x0050, 0x0000, 0x0140, 0x0000,
0x0000, 0x0000, 0x0000, 0x00ff,
0x0000, 0x0000, 0x0100, 0x00ff,
0x0000, 0x0000, 0x012c, 0x002c,
0x002c, 0x002c, 0x002c, 0x0000,
0x0032, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0038, 0x000b, 0x0032, 0x0000,
0x0008, 0x000c, 0x0093, 0x00e9,
0x0000, 0x0000, 0x0000, 0x0000,
0x0003, 0x0010, 0x0000, 0x0000,
0x0000, 0x0002, 0x0000, 0x0000,
0x0000, 0x0000, 0x0039, 0x0000,
0x0000,
};
#define WM8974_POWER1_BIASEN 0x08
#define WM8974_POWER1_BUFIOEN 0x04
#define wm8974_reset(c) snd_soc_write(c, WM8974_RESET, 0)
static const char *wm8974_companding[] = {"Off", "NC", "u-law", "A-law" };
static const char *wm8974_deemp[] = {"None", "32kHz", "44.1kHz", "48kHz" };
static const char *wm8974_eqmode[] = {"Capture", "Playback" };
static const char *wm8974_bw[] = {"Narrow", "Wide" };
static const char *wm8974_eq1[] = {"80Hz", "105Hz", "135Hz", "175Hz" };
static const char *wm8974_eq2[] = {"230Hz", "300Hz", "385Hz", "500Hz" };
static const char *wm8974_eq3[] = {"650Hz", "850Hz", "1.1kHz", "1.4kHz" };
static const char *wm8974_eq4[] = {"1.8kHz", "2.4kHz", "3.2kHz", "4.1kHz" };
static const char *wm8974_eq5[] = {"5.3kHz", "6.9kHz", "9kHz", "11.7kHz" };
static const char *wm8974_alc[] = {"ALC", "Limiter" };
static const struct soc_enum wm8974_enum[] = {
SOC_ENUM_SINGLE(WM8974_COMP, 1, 4, wm8974_companding), /* adc */
SOC_ENUM_SINGLE(WM8974_COMP, 3, 4, wm8974_companding), /* dac */
SOC_ENUM_SINGLE(WM8974_DAC, 4, 4, wm8974_deemp),
SOC_ENUM_SINGLE(WM8974_EQ1, 8, 2, wm8974_eqmode),
SOC_ENUM_SINGLE(WM8974_EQ1, 5, 4, wm8974_eq1),
SOC_ENUM_SINGLE(WM8974_EQ2, 8, 2, wm8974_bw),
SOC_ENUM_SINGLE(WM8974_EQ2, 5, 4, wm8974_eq2),
SOC_ENUM_SINGLE(WM8974_EQ3, 8, 2, wm8974_bw),
SOC_ENUM_SINGLE(WM8974_EQ3, 5, 4, wm8974_eq3),
SOC_ENUM_SINGLE(WM8974_EQ4, 8, 2, wm8974_bw),
SOC_ENUM_SINGLE(WM8974_EQ4, 5, 4, wm8974_eq4),
SOC_ENUM_SINGLE(WM8974_EQ5, 8, 2, wm8974_bw),
SOC_ENUM_SINGLE(WM8974_EQ5, 5, 4, wm8974_eq5),
SOC_ENUM_SINGLE(WM8974_ALC3, 8, 2, wm8974_alc),
};
static const char *wm8974_auxmode_text[] = { "Buffer", "Mixer" };
static const struct soc_enum wm8974_auxmode =
SOC_ENUM_SINGLE(WM8974_INPUT, 3, 2, wm8974_auxmode_text);
static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0);
static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0);
static const struct snd_kcontrol_new wm8974_snd_controls[] = {
SOC_SINGLE("Digital Loopback Switch", WM8974_COMP, 0, 1, 0),
SOC_ENUM("DAC Companding", wm8974_enum[1]),
SOC_ENUM("ADC Companding", wm8974_enum[0]),
SOC_ENUM("Playback De-emphasis", wm8974_enum[2]),
SOC_SINGLE("DAC Inversion Switch", WM8974_DAC, 0, 1, 0),
SOC_SINGLE_TLV("PCM Volume", WM8974_DACVOL, 0, 255, 0, digital_tlv),
SOC_SINGLE("High Pass Filter Switch", WM8974_ADC, 8, 1, 0),
SOC_SINGLE("High Pass Cut Off", WM8974_ADC, 4, 7, 0),
SOC_SINGLE("ADC Inversion Switch", WM8974_ADC, 0, 1, 0),
SOC_SINGLE_TLV("Capture Volume", WM8974_ADCVOL, 0, 255, 0, digital_tlv),
SOC_ENUM("Equaliser Function", wm8974_enum[3]),
SOC_ENUM("EQ1 Cut Off", wm8974_enum[4]),
SOC_SINGLE_TLV("EQ1 Volume", WM8974_EQ1, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ2 Bandwith", wm8974_enum[5]),
SOC_ENUM("EQ2 Cut Off", wm8974_enum[6]),
SOC_SINGLE_TLV("EQ2 Volume", WM8974_EQ2, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ3 Bandwith", wm8974_enum[7]),
SOC_ENUM("EQ3 Cut Off", wm8974_enum[8]),
SOC_SINGLE_TLV("EQ3 Volume", WM8974_EQ3, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ4 Bandwith", wm8974_enum[9]),
SOC_ENUM("EQ4 Cut Off", wm8974_enum[10]),
SOC_SINGLE_TLV("EQ4 Volume", WM8974_EQ4, 0, 24, 1, eq_tlv),
SOC_ENUM("Equaliser EQ5 Bandwith", wm8974_enum[11]),
SOC_ENUM("EQ5 Cut Off", wm8974_enum[12]),
SOC_SINGLE_TLV("EQ5 Volume", WM8974_EQ5, 0, 24, 1, eq_tlv),
SOC_SINGLE("DAC Playback Limiter Switch", WM8974_DACLIM1, 8, 1, 0),
SOC_SINGLE("DAC Playback Limiter Decay", WM8974_DACLIM1, 4, 15, 0),
SOC_SINGLE("DAC Playback Limiter Attack", WM8974_DACLIM1, 0, 15, 0),
SOC_SINGLE("DAC Playback Limiter Threshold", WM8974_DACLIM2, 4, 7, 0),
SOC_SINGLE("DAC Playback Limiter Boost", WM8974_DACLIM2, 0, 15, 0),
SOC_SINGLE("ALC Enable Switch", WM8974_ALC1, 8, 1, 0),
SOC_SINGLE("ALC Capture Max Gain", WM8974_ALC1, 3, 7, 0),
SOC_SINGLE("ALC Capture Min Gain", WM8974_ALC1, 0, 7, 0),
SOC_SINGLE("ALC Capture ZC Switch", WM8974_ALC2, 8, 1, 0),
SOC_SINGLE("ALC Capture Hold", WM8974_ALC2, 4, 7, 0),
SOC_SINGLE("ALC Capture Target", WM8974_ALC2, 0, 15, 0),
SOC_ENUM("ALC Capture Mode", wm8974_enum[13]),
SOC_SINGLE("ALC Capture Decay", WM8974_ALC3, 4, 15, 0),
SOC_SINGLE("ALC Capture Attack", WM8974_ALC3, 0, 15, 0),
SOC_SINGLE("ALC Capture Noise Gate Switch", WM8974_NGATE, 3, 1, 0),
SOC_SINGLE("ALC Capture Noise Gate Threshold", WM8974_NGATE, 0, 7, 0),
SOC_SINGLE("Capture PGA ZC Switch", WM8974_INPPGA, 7, 1, 0),
SOC_SINGLE_TLV("Capture PGA Volume", WM8974_INPPGA, 0, 63, 0, inpga_tlv),
SOC_SINGLE("Speaker Playback ZC Switch", WM8974_SPKVOL, 7, 1, 0),
SOC_SINGLE("Speaker Playback Switch", WM8974_SPKVOL, 6, 1, 1),
SOC_SINGLE_TLV("Speaker Playback Volume", WM8974_SPKVOL, 0, 63, 0, spk_tlv),
SOC_ENUM("Aux Mode", wm8974_auxmode),
SOC_SINGLE("Capture Boost(+20dB)", WM8974_ADCBOOST, 8, 1, 0),
SOC_SINGLE("Mono Playback Switch", WM8974_MONOMIX, 6, 1, 1),
/* DAC / ADC oversampling */
SOC_SINGLE("DAC 128x Oversampling Switch", WM8974_DAC, 8, 1, 0),
SOC_SINGLE("ADC 128x Oversampling Switch", WM8974_ADC, 8, 1, 0),
};
/* Speaker Output Mixer */
static const struct snd_kcontrol_new wm8974_speaker_mixer_controls[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_SPKMIX, 1, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_SPKMIX, 5, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_SPKMIX, 0, 1, 0),
};
/* Mono Output Mixer */
static const struct snd_kcontrol_new wm8974_mono_mixer_controls[] = {
SOC_DAPM_SINGLE("Line Bypass Switch", WM8974_MONOMIX, 1, 1, 0),
SOC_DAPM_SINGLE("Aux Playback Switch", WM8974_MONOMIX, 2, 1, 0),
SOC_DAPM_SINGLE("PCM Playback Switch", WM8974_MONOMIX, 0, 1, 0),
};
/* Boost mixer */
static const struct snd_kcontrol_new wm8974_boost_mixer[] = {
SOC_DAPM_SINGLE("Aux Switch", WM8974_INPPGA, 6, 1, 0),
};
/* Input PGA */
static const struct snd_kcontrol_new wm8974_inpga[] = {
SOC_DAPM_SINGLE("Aux Switch", WM8974_INPUT, 2, 1, 0),
SOC_DAPM_SINGLE("MicN Switch", WM8974_INPUT, 1, 1, 0),
SOC_DAPM_SINGLE("MicP Switch", WM8974_INPUT, 0, 1, 0),
};
/* AUX Input boost vol */
static const struct snd_kcontrol_new wm8974_aux_boost_controls =
SOC_DAPM_SINGLE("Aux Volume", WM8974_ADCBOOST, 0, 7, 0);
/* Mic Input boost vol */
static const struct snd_kcontrol_new wm8974_mic_boost_controls =
SOC_DAPM_SINGLE("Mic Volume", WM8974_ADCBOOST, 4, 7, 0);
static const struct snd_soc_dapm_widget wm8974_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("Speaker Mixer", WM8974_POWER3, 2, 0,
&wm8974_speaker_mixer_controls[0],
ARRAY_SIZE(wm8974_speaker_mixer_controls)),
SND_SOC_DAPM_MIXER("Mono Mixer", WM8974_POWER3, 3, 0,
&wm8974_mono_mixer_controls[0],
ARRAY_SIZE(wm8974_mono_mixer_controls)),
SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM8974_POWER3, 0, 0),
SND_SOC_DAPM_ADC("ADC", "HiFi Capture", WM8974_POWER2, 0, 0),
SND_SOC_DAPM_PGA("Aux Input", WM8974_POWER1, 6, 0, NULL, 0),
SND_SOC_DAPM_PGA("SpkN Out", WM8974_POWER3, 5, 0, NULL, 0),
SND_SOC_DAPM_PGA("SpkP Out", WM8974_POWER3, 6, 0, NULL, 0),
SND_SOC_DAPM_PGA("Mono Out", WM8974_POWER3, 7, 0, NULL, 0),
SND_SOC_DAPM_MIXER("Input PGA", WM8974_POWER2, 2, 0, wm8974_inpga,
ARRAY_SIZE(wm8974_inpga)),
SND_SOC_DAPM_MIXER("Boost Mixer", WM8974_POWER2, 4, 0,
wm8974_boost_mixer, ARRAY_SIZE(wm8974_boost_mixer)),
SND_SOC_DAPM_SUPPLY("Mic Bias", WM8974_POWER1, 4, 0, NULL, 0),
SND_SOC_DAPM_INPUT("MICN"),
SND_SOC_DAPM_INPUT("MICP"),
SND_SOC_DAPM_INPUT("AUX"),
SND_SOC_DAPM_OUTPUT("MONOOUT"),
SND_SOC_DAPM_OUTPUT("SPKOUTP"),
SND_SOC_DAPM_OUTPUT("SPKOUTN"),
};
static const struct snd_soc_dapm_route wm8974_dapm_routes[] = {
/* Mono output mixer */
{"Mono Mixer", "PCM Playback Switch", "DAC"},
{"Mono Mixer", "Aux Playback Switch", "Aux Input"},
{"Mono Mixer", "Line Bypass Switch", "Boost Mixer"},
/* Speaker output mixer */
{"Speaker Mixer", "PCM Playback Switch", "DAC"},
{"Speaker Mixer", "Aux Playback Switch", "Aux Input"},
{"Speaker Mixer", "Line Bypass Switch", "Boost Mixer"},
/* Outputs */
{"Mono Out", NULL, "Mono Mixer"},
{"MONOOUT", NULL, "Mono Out"},
{"SpkN Out", NULL, "Speaker Mixer"},
{"SpkP Out", NULL, "Speaker Mixer"},
{"SPKOUTN", NULL, "SpkN Out"},
{"SPKOUTP", NULL, "SpkP Out"},
/* Boost Mixer */
{"ADC", NULL, "Boost Mixer"},
{"Boost Mixer", "Aux Switch", "Aux Input"},
{"Boost Mixer", NULL, "Input PGA"},
{"Boost Mixer", NULL, "MICP"},
/* Input PGA */
{"Input PGA", "Aux Switch", "Aux Input"},
{"Input PGA", "MicN Switch", "MICN"},
{"Input PGA", "MicP Switch", "MICP"},
/* Inputs */
{"Aux Input", NULL, "AUX"},
};
struct pll_ {
unsigned int pre_div:1;
unsigned int n:4;
unsigned int k;
};
/* The size in bits of the pll divide multiplied by 10
* to allow rounding later */
#define FIXED_PLL_SIZE ((1 << 24) * 10)
static void pll_factors(struct pll_ *pll_div,
unsigned int target, unsigned int source)
{
unsigned long long Kpart;
unsigned int K, Ndiv, Nmod;
/* There is a fixed divide by 4 in the output path */
target *= 4;
Ndiv = target / source;
if (Ndiv < 6) {
source /= 2;
pll_div->pre_div = 1;
Ndiv = target / source;
} else
pll_div->pre_div = 0;
if ((Ndiv < 6) || (Ndiv > 12))
printk(KERN_WARNING
"WM8974 N value %u outwith recommended range!\n",
Ndiv);
pll_div->n = Ndiv;
Nmod = target % source;
Kpart = FIXED_PLL_SIZE * (long long)Nmod;
do_div(Kpart, source);
K = Kpart & 0xFFFFFFFF;
/* Check if we need to round */
if ((K % 10) >= 5)
K += 5;
/* Move down to proper range now rounding is done */
K /= 10;
pll_div->k = K;
}
static int wm8974_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
{
struct snd_soc_codec *codec = codec_dai->codec;
struct pll_ pll_div;
u16 reg;
if (freq_in == 0 || freq_out == 0) {
/* Clock CODEC directly from MCLK */
reg = snd_soc_read(codec, WM8974_CLOCK);
snd_soc_write(codec, WM8974_CLOCK, reg & 0x0ff);
/* Turn off PLL */
reg = snd_soc_read(codec, WM8974_POWER1);
snd_soc_write(codec, WM8974_POWER1, reg & 0x1df);
return 0;
}
pll_factors(&pll_div, freq_out, freq_in);
snd_soc_write(codec, WM8974_PLLN, (pll_div.pre_div << 4) | pll_div.n);
snd_soc_write(codec, WM8974_PLLK1, pll_div.k >> 18);
snd_soc_write(codec, WM8974_PLLK2, (pll_div.k >> 9) & 0x1ff);
snd_soc_write(codec, WM8974_PLLK3, pll_div.k & 0x1ff);
reg = snd_soc_read(codec, WM8974_POWER1);
snd_soc_write(codec, WM8974_POWER1, reg | 0x020);
/* Run CODEC from PLL instead of MCLK */
reg = snd_soc_read(codec, WM8974_CLOCK);
snd_soc_write(codec, WM8974_CLOCK, reg | 0x100);
return 0;
}
/*
* Configure WM8974 clock dividers.
*/
static int wm8974_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
int div_id, int div)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 reg;
switch (div_id) {
case WM8974_OPCLKDIV:
reg = snd_soc_read(codec, WM8974_GPIO) & 0x1cf;
snd_soc_write(codec, WM8974_GPIO, reg | div);
break;
case WM8974_MCLKDIV:
reg = snd_soc_read(codec, WM8974_CLOCK) & 0x11f;
snd_soc_write(codec, WM8974_CLOCK, reg | div);
break;
case WM8974_BCLKDIV:
reg = snd_soc_read(codec, WM8974_CLOCK) & 0x1e3;
snd_soc_write(codec, WM8974_CLOCK, reg | div);
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8974_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
u16 iface = 0;
u16 clk = snd_soc_read(codec, WM8974_CLOCK) & 0x1fe;
/* set master/slave audio interface */
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
clk |= 0x0001;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
/* interface format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
iface |= 0x0010;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x0008;
break;
case SND_SOC_DAIFMT_DSP_A:
iface |= 0x00018;
break;
default:
return -EINVAL;
}
/* clock inversion */
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
iface |= 0x0180;
break;
case SND_SOC_DAIFMT_IB_NF:
iface |= 0x0100;
break;
case SND_SOC_DAIFMT_NB_IF:
iface |= 0x0080;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8974_IFACE, iface);
snd_soc_write(codec, WM8974_CLOCK, clk);
return 0;
}
static int wm8974_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec = dai->codec;
u16 iface = snd_soc_read(codec, WM8974_IFACE) & 0x19f;
u16 adn = snd_soc_read(codec, WM8974_ADD) & 0x1f1;
/* bit size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
break;
case SNDRV_PCM_FORMAT_S20_3LE:
iface |= 0x0020;
break;
case SNDRV_PCM_FORMAT_S24_LE:
iface |= 0x0040;
break;
case SNDRV_PCM_FORMAT_S32_LE:
iface |= 0x0060;
break;
}
/* filter coefficient */
switch (params_rate(params)) {
case 8000:
adn |= 0x5 << 1;
break;
case 11025:
adn |= 0x4 << 1;
break;
case 16000:
adn |= 0x3 << 1;
break;
case 22050:
adn |= 0x2 << 1;
break;
case 32000:
adn |= 0x1 << 1;
break;
case 44100:
case 48000:
break;
}
snd_soc_write(codec, WM8974_IFACE, iface);
snd_soc_write(codec, WM8974_ADD, adn);
return 0;
}
static int wm8974_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec = dai->codec;
u16 mute_reg = snd_soc_read(codec, WM8974_DAC) & 0xffbf;
if (mute)
snd_soc_write(codec, WM8974_DAC, mute_reg | 0x40);
else
snd_soc_write(codec, WM8974_DAC, mute_reg);
return 0;
}
/* liam need to make this lower power with dapm */
static int wm8974_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
u16 power1 = snd_soc_read(codec, WM8974_POWER1) & ~0x3;
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
power1 |= 0x1; /* VMID 50k */
snd_soc_write(codec, WM8974_POWER1, power1);
break;
case SND_SOC_BIAS_STANDBY:
power1 |= WM8974_POWER1_BIASEN | WM8974_POWER1_BUFIOEN;
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
snd_soc_cache_sync(codec);
/* Initial cap charge at VMID 5k */
snd_soc_write(codec, WM8974_POWER1, power1 | 0x3);
mdelay(100);
}
power1 |= 0x2; /* VMID 500k */
snd_soc_write(codec, WM8974_POWER1, power1);
break;
case SND_SOC_BIAS_OFF:
snd_soc_write(codec, WM8974_POWER1, 0);
snd_soc_write(codec, WM8974_POWER2, 0);
snd_soc_write(codec, WM8974_POWER3, 0);
break;
}
codec->dapm.bias_level = level;
return 0;
}
#define WM8974_RATES (SNDRV_PCM_RATE_8000_48000)
#define WM8974_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops wm8974_ops = {
.hw_params = wm8974_pcm_hw_params,
.digital_mute = wm8974_mute,
.set_fmt = wm8974_set_dai_fmt,
.set_clkdiv = wm8974_set_dai_clkdiv,
.set_pll = wm8974_set_dai_pll,
};
static struct snd_soc_dai_driver wm8974_dai = {
.name = "wm8974-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2, /* Only 1 channel of data */
.rates = WM8974_RATES,
.formats = WM8974_FORMATS,},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2, /* Only 1 channel of data */
.rates = WM8974_RATES,
.formats = WM8974_FORMATS,},
.ops = &wm8974_ops,
.symmetric_rates = 1,
};
static int wm8974_suspend(struct snd_soc_codec *codec)
{
wm8974_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static int wm8974_resume(struct snd_soc_codec *codec)
{
wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
}
static int wm8974_probe(struct snd_soc_codec *codec)
{
int ret = 0;
ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_I2C);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
ret = wm8974_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset\n");
return ret;
}
wm8974_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return ret;
}
/* power down chip */
static int wm8974_remove(struct snd_soc_codec *codec)
{
wm8974_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8974 = {
.probe = wm8974_probe,
.remove = wm8974_remove,
.suspend = wm8974_suspend,
.resume = wm8974_resume,
.set_bias_level = wm8974_set_bias_level,
.reg_cache_size = ARRAY_SIZE(wm8974_reg),
.reg_word_size = sizeof(u16),
.reg_cache_default = wm8974_reg,
.controls = wm8974_snd_controls,
.num_controls = ARRAY_SIZE(wm8974_snd_controls),
.dapm_widgets = wm8974_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8974_dapm_widgets),
.dapm_routes = wm8974_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm8974_dapm_routes),
};
static __devinit int wm8974_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
int ret;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8974, &wm8974_dai, 1);
return ret;
}
static __devexit int wm8974_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id wm8974_i2c_id[] = {
{ "wm8974", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8974_i2c_id);
static struct i2c_driver wm8974_i2c_driver = {
.driver = {
.name = "wm8974",
.owner = THIS_MODULE,
},
.probe = wm8974_i2c_probe,
.remove = __devexit_p(wm8974_i2c_remove),
.id_table = wm8974_i2c_id,
};
static int __init wm8974_modinit(void)
{
int ret = 0;
ret = i2c_add_driver(&wm8974_i2c_driver);
if (ret != 0) {
printk(KERN_ERR "Failed to register wm8974 I2C driver: %d\n",
ret);
}
return ret;
}
module_init(wm8974_modinit);
static void __exit wm8974_exit(void)
{
i2c_del_driver(&wm8974_i2c_driver);
}
module_exit(wm8974_exit);
MODULE_DESCRIPTION("ASoC WM8974 driver");
MODULE_AUTHOR("Liam Girdwood");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jab2/android_kernel_lge_l45c | arch/um/drivers/xterm_kern.c | 4944 | 1580 | /*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/irqreturn.h>
#include <asm/irq.h>
#include "irq_kern.h"
#include "os.h"
struct xterm_wait {
struct completion ready;
int fd;
int pid;
int new_fd;
};
static irqreturn_t xterm_interrupt(int irq, void *data)
{
struct xterm_wait *xterm = data;
int fd;
fd = os_rcv_fd(xterm->fd, &xterm->pid);
if (fd == -EAGAIN)
return IRQ_NONE;
xterm->new_fd = fd;
complete(&xterm->ready);
return IRQ_HANDLED;
}
int xterm_fd(int socket, int *pid_out)
{
struct xterm_wait *data;
int err, ret;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL) {
printk(KERN_ERR "xterm_fd : failed to allocate xterm_wait\n");
return -ENOMEM;
}
/* This is a locked semaphore... */
*data = ((struct xterm_wait) { .fd = socket,
.pid = -1,
.new_fd = -1 });
init_completion(&data->ready);
err = um_request_irq(XTERM_IRQ, socket, IRQ_READ, xterm_interrupt,
IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
"xterm", data);
if (err) {
printk(KERN_ERR "xterm_fd : failed to get IRQ for xterm, "
"err = %d\n", err);
ret = err;
goto out;
}
/* ... so here we wait for an xterm interrupt.
*
* XXX Note, if the xterm doesn't work for some reason (eg. DISPLAY
* isn't set) this will hang... */
wait_for_completion(&data->ready);
free_irq(XTERM_IRQ, data);
ret = data->new_fd;
*pid_out = data->pid;
out:
kfree(data);
return ret;
}
| gpl-2.0 |
showliu/android_kernel_xiaomi_aries-1 | arch/m68k/mac/psc.c | 6992 | 4259 | /*
* Apple Peripheral System Controller (PSC)
*
* The PSC is used on the AV Macs to control IO functions not handled
* by the VIAs (Ethernet, DSP, SCC).
*
* TO DO:
*
* Try to figure out what's going on in pIFR5 and pIFR6. There seem to be
* persisant interrupt conditions in those registers and I have no idea what
* they are. Granted it doesn't affect since we're not enabling any interrupts
* on those levels at the moment, but it would be nice to know. I have a feeling
* they aren't actually interrupt lines but data lines (to the DSP?)
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <asm/traps.h>
#include <asm/bootinfo.h>
#include <asm/macintosh.h>
#include <asm/macints.h>
#include <asm/mac_psc.h>
#define DEBUG_PSC
int psc_present;
volatile __u8 *psc;
/*
* Debugging dump, used in various places to see what's going on.
*/
static void psc_debug_dump(void)
{
int i;
if (!psc_present) return;
for (i = 0x30 ; i < 0x70 ; i += 0x10) {
printk("PSC #%d: IFR = 0x%02X IER = 0x%02X\n",
i >> 4,
(int) psc_read_byte(pIFRbase + i),
(int) psc_read_byte(pIERbase + i));
}
}
/*
* Try to kill all DMA channels on the PSC. Not sure how this his
* supposed to work; this is code lifted from macmace.c and then
* expanded to cover what I think are the other 7 channels.
*/
static void psc_dma_die_die_die(void)
{
int i;
printk("Killing all PSC DMA channels...");
for (i = 0 ; i < 9 ; i++) {
psc_write_word(PSC_CTL_BASE + (i << 4), 0x8800);
psc_write_word(PSC_CTL_BASE + (i << 4), 0x1000);
psc_write_word(PSC_CMD_BASE + (i << 5), 0x1100);
psc_write_word(PSC_CMD_BASE + (i << 5) + 0x10, 0x1100);
}
printk("done!\n");
}
/*
* Initialize the PSC. For now this just involves shutting down all
* interrupt sources using the IERs.
*/
void __init psc_init(void)
{
int i;
if (macintosh_config->ident != MAC_MODEL_C660
&& macintosh_config->ident != MAC_MODEL_Q840)
{
psc = NULL;
psc_present = 0;
return;
}
/*
* The PSC is always at the same spot, but using psc
* keeps things consistent with the psc_xxxx functions.
*/
psc = (void *) PSC_BASE;
psc_present = 1;
printk("PSC detected at %p\n", psc);
psc_dma_die_die_die();
#ifdef DEBUG_PSC
psc_debug_dump();
#endif
/*
* Mask and clear all possible interrupts
*/
for (i = 0x30 ; i < 0x70 ; i += 0x10) {
psc_write_byte(pIERbase + i, 0x0F);
psc_write_byte(pIFRbase + i, 0x0F);
}
}
/*
* PSC interrupt handler. It's a lot like the VIA interrupt handler.
*/
static void psc_irq(unsigned int irq, struct irq_desc *desc)
{
unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc);
int pIFR = pIFRbase + offset;
int pIER = pIERbase + offset;
int irq_num;
unsigned char irq_bit, events;
#ifdef DEBUG_IRQS
printk("psc_irq: irq %u pIFR = 0x%02X pIER = 0x%02X\n",
irq, (int) psc_read_byte(pIFR), (int) psc_read_byte(pIER));
#endif
events = psc_read_byte(pIFR) & psc_read_byte(pIER) & 0xF;
if (!events)
return;
irq_num = irq << 3;
irq_bit = 1;
do {
if (events & irq_bit) {
psc_write_byte(pIFR, irq_bit);
generic_handle_irq(irq_num);
}
irq_num++;
irq_bit <<= 1;
} while (events >= irq_bit);
}
/*
* Register the PSC interrupt dispatchers for autovector interrupts 3-6.
*/
void __init psc_register_interrupts(void)
{
irq_set_chained_handler(IRQ_AUTO_3, psc_irq);
irq_set_handler_data(IRQ_AUTO_3, (void *)0x30);
irq_set_chained_handler(IRQ_AUTO_4, psc_irq);
irq_set_handler_data(IRQ_AUTO_4, (void *)0x40);
irq_set_chained_handler(IRQ_AUTO_5, psc_irq);
irq_set_handler_data(IRQ_AUTO_5, (void *)0x50);
irq_set_chained_handler(IRQ_AUTO_6, psc_irq);
irq_set_handler_data(IRQ_AUTO_6, (void *)0x60);
}
void psc_irq_enable(int irq) {
int irq_src = IRQ_SRC(irq);
int irq_idx = IRQ_IDX(irq);
int pIER = pIERbase + (irq_src << 4);
#ifdef DEBUG_IRQUSE
printk("psc_irq_enable(%d)\n", irq);
#endif
psc_write_byte(pIER, (1 << irq_idx) | 0x80);
}
void psc_irq_disable(int irq) {
int irq_src = IRQ_SRC(irq);
int irq_idx = IRQ_IDX(irq);
int pIER = pIERbase + (irq_src << 4);
#ifdef DEBUG_IRQUSE
printk("psc_irq_disable(%d)\n", irq);
#endif
psc_write_byte(pIER, 1 << irq_idx);
}
| gpl-2.0 |
timduru/kernel-asus-tf101 | arch/sh/boards/mach-sh03/setup.c | 7504 | 2444 | /*
* linux/arch/sh/boards/sh03/setup.c
*
* Copyright (C) 2004 Interface Co.,Ltd. Saito.K
*
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <asm/io.h>
#include <asm/rtc.h>
#include <mach-sh03/mach/io.h>
#include <mach-sh03/mach/sh03.h>
#include <asm/addrspace.h>
static void __init init_sh03_IRQ(void)
{
plat_irq_setup_pins(IRQ_MODE_IRQ);
}
/* arch/sh/boards/sh03/rtc.c */
void sh03_time_init(void);
static void __init sh03_setup(char **cmdline_p)
{
board_time_init = sh03_time_init;
}
static struct resource cf_ide_resources[] = {
[0] = {
.start = 0x1f0,
.end = 0x1f0 + 8,
.flags = IORESOURCE_IO,
},
[1] = {
.start = 0x1f0 + 0x206,
.end = 0x1f0 +8 + 0x206 + 8,
.flags = IORESOURCE_IO,
},
[2] = {
.start = IRL2_IRQ,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device cf_ide_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(cf_ide_resources),
.resource = cf_ide_resources,
};
static struct resource heartbeat_resources[] = {
[0] = {
.start = 0xa0800000,
.end = 0xa0800000,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device heartbeat_device = {
.name = "heartbeat",
.id = -1,
.num_resources = ARRAY_SIZE(heartbeat_resources),
.resource = heartbeat_resources,
};
static struct platform_device *sh03_devices[] __initdata = {
&heartbeat_device,
&cf_ide_device,
};
static int __init sh03_devices_setup(void)
{
pgprot_t prot;
unsigned long paddrbase;
void *cf_ide_base;
/* open I/O area window */
paddrbase = virt_to_phys((void *)PA_AREA5_IO);
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
if (!cf_ide_base) {
printk("allocate_cf_area : can't open CF I/O window!\n");
return -ENOMEM;
}
/* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */
cf_ide_resources[0].start += (unsigned long)cf_ide_base;
cf_ide_resources[0].end += (unsigned long)cf_ide_base;
cf_ide_resources[1].start += (unsigned long)cf_ide_base;
cf_ide_resources[1].end += (unsigned long)cf_ide_base;
return platform_add_devices(sh03_devices, ARRAY_SIZE(sh03_devices));
}
device_initcall(sh03_devices_setup);
static struct sh_machine_vector mv_sh03 __initmv = {
.mv_name = "Interface (CTP/PCI-SH03)",
.mv_setup = sh03_setup,
.mv_nr_irqs = 48,
.mv_init_irq = init_sh03_IRQ,
};
| gpl-2.0 |
nychitman1/android_kernel_asus_flo | lib/pci_iomap.c | 8016 | 1383 | /*
* Implement the default iomap interfaces
*
* (C) Copyright 2004 Linus Torvalds
*/
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/export.h>
#ifdef CONFIG_PCI
/**
* pci_iomap - create a virtual mapping cookie for a PCI BAR
* @dev: PCI device that owns the BAR
* @bar: BAR number
* @maxlen: length of the memory to map
*
* Using this function you will get a __iomem address to your device BAR.
* You can access it using ioread*() and iowrite*(). These functions hide
* the details if this is a MMIO or PIO address space and will just do what
* you expect from them in the correct way.
*
* @maxlen specifies the maximum length to map. If you want to get access to
* the complete BAR without checking for its length first, pass %0 here.
* */
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
resource_size_t start = pci_resource_start(dev, bar);
resource_size_t len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
if (!len || !start)
return NULL;
if (maxlen && len > maxlen)
len = maxlen;
if (flags & IORESOURCE_IO)
return __pci_ioport_map(dev, start, len);
if (flags & IORESOURCE_MEM) {
if (flags & IORESOURCE_CACHEABLE)
return ioremap(start, len);
return ioremap_nocache(start, len);
}
/* What? */
return NULL;
}
EXPORT_SYMBOL(pci_iomap);
#endif /* CONFIG_PCI */
| gpl-2.0 |
Krabappel2548/kernel_msm8x60 | drivers/mtd/maps/cdb89712.c | 8016 | 5933 | /*
* Flash on Cirrus CDB89712
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <asm/io.h>
#include <mach/hardware.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
/* dynamic ioremap() areas */
#define FLASH_START 0x00000000
#define FLASH_SIZE 0x800000
#define FLASH_WIDTH 4
#define SRAM_START 0x60000000
#define SRAM_SIZE 0xc000
#define SRAM_WIDTH 4
#define BOOTROM_START 0x70000000
#define BOOTROM_SIZE 0x80
#define BOOTROM_WIDTH 4
static struct mtd_info *flash_mtd;
struct map_info cdb89712_flash_map = {
.name = "flash",
.size = FLASH_SIZE,
.bankwidth = FLASH_WIDTH,
.phys = FLASH_START,
};
struct resource cdb89712_flash_resource = {
.name = "Flash",
.start = FLASH_START,
.end = FLASH_START + FLASH_SIZE - 1,
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
static int __init init_cdb89712_flash (void)
{
int err;
if (request_resource (&ioport_resource, &cdb89712_flash_resource)) {
printk(KERN_NOTICE "Failed to reserve Cdb89712 FLASH space\n");
err = -EBUSY;
goto out;
}
cdb89712_flash_map.virt = ioremap(FLASH_START, FLASH_SIZE);
if (!cdb89712_flash_map.virt) {
printk(KERN_NOTICE "Failed to ioremap Cdb89712 FLASH space\n");
err = -EIO;
goto out_resource;
}
simple_map_init(&cdb89712_flash_map);
flash_mtd = do_map_probe("cfi_probe", &cdb89712_flash_map);
if (!flash_mtd) {
flash_mtd = do_map_probe("map_rom", &cdb89712_flash_map);
if (flash_mtd)
flash_mtd->erasesize = 0x10000;
}
if (!flash_mtd) {
printk("FLASH probe failed\n");
err = -ENXIO;
goto out_ioremap;
}
flash_mtd->owner = THIS_MODULE;
if (mtd_device_register(flash_mtd, NULL, 0)) {
printk("FLASH device addition failed\n");
err = -ENOMEM;
goto out_probe;
}
return 0;
out_probe:
map_destroy(flash_mtd);
flash_mtd = 0;
out_ioremap:
iounmap((void *)cdb89712_flash_map.virt);
out_resource:
release_resource (&cdb89712_flash_resource);
out:
return err;
}
static struct mtd_info *sram_mtd;
struct map_info cdb89712_sram_map = {
.name = "SRAM",
.size = SRAM_SIZE,
.bankwidth = SRAM_WIDTH,
.phys = SRAM_START,
};
struct resource cdb89712_sram_resource = {
.name = "SRAM",
.start = SRAM_START,
.end = SRAM_START + SRAM_SIZE - 1,
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
static int __init init_cdb89712_sram (void)
{
int err;
if (request_resource (&ioport_resource, &cdb89712_sram_resource)) {
printk(KERN_NOTICE "Failed to reserve Cdb89712 SRAM space\n");
err = -EBUSY;
goto out;
}
cdb89712_sram_map.virt = ioremap(SRAM_START, SRAM_SIZE);
if (!cdb89712_sram_map.virt) {
printk(KERN_NOTICE "Failed to ioremap Cdb89712 SRAM space\n");
err = -EIO;
goto out_resource;
}
simple_map_init(&cdb89712_sram_map);
sram_mtd = do_map_probe("map_ram", &cdb89712_sram_map);
if (!sram_mtd) {
printk("SRAM probe failed\n");
err = -ENXIO;
goto out_ioremap;
}
sram_mtd->owner = THIS_MODULE;
sram_mtd->erasesize = 16;
if (mtd_device_register(sram_mtd, NULL, 0)) {
printk("SRAM device addition failed\n");
err = -ENOMEM;
goto out_probe;
}
return 0;
out_probe:
map_destroy(sram_mtd);
sram_mtd = 0;
out_ioremap:
iounmap((void *)cdb89712_sram_map.virt);
out_resource:
release_resource (&cdb89712_sram_resource);
out:
return err;
}
static struct mtd_info *bootrom_mtd;
struct map_info cdb89712_bootrom_map = {
.name = "BootROM",
.size = BOOTROM_SIZE,
.bankwidth = BOOTROM_WIDTH,
.phys = BOOTROM_START,
};
struct resource cdb89712_bootrom_resource = {
.name = "BootROM",
.start = BOOTROM_START,
.end = BOOTROM_START + BOOTROM_SIZE - 1,
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
static int __init init_cdb89712_bootrom (void)
{
int err;
if (request_resource (&ioport_resource, &cdb89712_bootrom_resource)) {
printk(KERN_NOTICE "Failed to reserve Cdb89712 BOOTROM space\n");
err = -EBUSY;
goto out;
}
cdb89712_bootrom_map.virt = ioremap(BOOTROM_START, BOOTROM_SIZE);
if (!cdb89712_bootrom_map.virt) {
printk(KERN_NOTICE "Failed to ioremap Cdb89712 BootROM space\n");
err = -EIO;
goto out_resource;
}
simple_map_init(&cdb89712_bootrom_map);
bootrom_mtd = do_map_probe("map_rom", &cdb89712_bootrom_map);
if (!bootrom_mtd) {
printk("BootROM probe failed\n");
err = -ENXIO;
goto out_ioremap;
}
bootrom_mtd->owner = THIS_MODULE;
bootrom_mtd->erasesize = 0x10000;
if (mtd_device_register(bootrom_mtd, NULL, 0)) {
printk("BootROM device addition failed\n");
err = -ENOMEM;
goto out_probe;
}
return 0;
out_probe:
map_destroy(bootrom_mtd);
bootrom_mtd = 0;
out_ioremap:
iounmap((void *)cdb89712_bootrom_map.virt);
out_resource:
release_resource (&cdb89712_bootrom_resource);
out:
return err;
}
static int __init init_cdb89712_maps(void)
{
printk(KERN_INFO "Cirrus CDB89712 MTD mappings:\n Flash 0x%x at 0x%x\n SRAM 0x%x at 0x%x\n BootROM 0x%x at 0x%x\n",
FLASH_SIZE, FLASH_START, SRAM_SIZE, SRAM_START, BOOTROM_SIZE, BOOTROM_START);
init_cdb89712_flash();
init_cdb89712_sram();
init_cdb89712_bootrom();
return 0;
}
static void __exit cleanup_cdb89712_maps(void)
{
if (sram_mtd) {
mtd_device_unregister(sram_mtd);
map_destroy(sram_mtd);
iounmap((void *)cdb89712_sram_map.virt);
release_resource (&cdb89712_sram_resource);
}
if (flash_mtd) {
mtd_device_unregister(flash_mtd);
map_destroy(flash_mtd);
iounmap((void *)cdb89712_flash_map.virt);
release_resource (&cdb89712_flash_resource);
}
if (bootrom_mtd) {
mtd_device_unregister(bootrom_mtd);
map_destroy(bootrom_mtd);
iounmap((void *)cdb89712_bootrom_map.virt);
release_resource (&cdb89712_bootrom_resource);
}
}
module_init(init_cdb89712_maps);
module_exit(cleanup_cdb89712_maps);
MODULE_AUTHOR("Ray L");
MODULE_DESCRIPTION("ARM CDB89712 map driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
josalaito/android_kernel_motorola_msm8226 | drivers/hid/hid-ortek.c | 8272 | 1927 | /*
* HID driver for various devices which are apparently based on the same chipset
* from certain vendor which produces chips that contain wrong LogicalMaximum
* value in their HID report descriptor. Currently supported devices are:
*
* Ortek PKB-1700
* Ortek WKB-2000
* Skycable wireless presenter
*
* Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com>
* Copyright (c) 2011 Jiri Kosina
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include "hid-ids.h"
static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
if (*rsize >= 56 && rdesc[54] == 0x25 && rdesc[55] == 0x01) {
hid_info(hdev, "Fixing up logical minimum in report descriptor (Ortek)\n");
rdesc[55] = 0x92;
} else if (*rsize >= 54 && rdesc[52] == 0x25 && rdesc[53] == 0x01) {
hid_info(hdev, "Fixing up logical minimum in report descriptor (Skycable)\n");
rdesc[53] = 0x65;
}
return rdesc;
}
static const struct hid_device_id ortek_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
{ }
};
MODULE_DEVICE_TABLE(hid, ortek_devices);
static struct hid_driver ortek_driver = {
.name = "ortek",
.id_table = ortek_devices,
.report_fixup = ortek_report_fixup
};
static int __init ortek_init(void)
{
return hid_register_driver(&ortek_driver);
}
static void __exit ortek_exit(void)
{
hid_unregister_driver(&ortek_driver);
}
module_init(ortek_init);
module_exit(ortek_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
MoKee/android_kernel_samsung_espresso10 | drivers/staging/wlags49_h2/dhf.c | 9040 | 14962 |
/* vim:tw=110:ts=4: */
/**************************************************************************************************************
*
* FILE : DHF.C
*
* DATE : $Date: 2004/07/19 08:16:14 $ $Revision: 1.2 $
* Original : 2004/05/28 14:05:34 Revision: 1.36 Tag: hcf7_t20040602_01
* Original : 2004/05/11 06:22:57 Revision: 1.32 Tag: hcf7_t7_20040513_01
* Original : 2004/04/15 09:24:42 Revision: 1.28 Tag: hcf7_t7_20040415_01
* Original : 2004/04/08 15:18:16 Revision: 1.27 Tag: t7_20040413_01
* Original : 2004/04/01 15:32:55 Revision: 1.25 Tag: t7_20040401_01
* Original : 2004/03/10 15:39:28 Revision: 1.21 Tag: t20040310_01
* Original : 2004/03/04 11:03:37 Revision: 1.19 Tag: t20040304_01
* Original : 2004/03/02 09:27:11 Revision: 1.17 Tag: t20040302_03
* Original : 2004/02/24 13:00:28 Revision: 1.15 Tag: t20040224_01
* Original : 2004/02/19 10:57:28 Revision: 1.14 Tag: t20040219_01
* Original : 2003/11/27 09:00:09 Revision: 1.3 Tag: t20021216_01
*
* AUTHOR : John Meertens
* Nico Valster
*
* SPECIFICATION: ........
*
* DESC : generic functions to handle the download of NIC firmware
* Local Support Routines for above procedures
*
* Customizable via HCFCFG.H, which is included by HCF.H
*
*
* DHF is (intended to be) platform-independent.
* DHF is a module that provides a number of routines to download firmware
* images (the names primary, station, access point, secondary and tertiary
* are used or have been used) to volatile or nonvolatile memory
* in WaveLAN/IEEE NICs. To achieve this DHF makes use of the WaveLAN/IEEE
* WCI as implemented by the HCF-module.
*
* Download to non-volatile memory is used to update a WaveLAN/IEEE NIC to new
* firmware. Normally this will be an upgrade to newer firmware, although
* downgrading to older firmware is possible too.
*
* Note: relative to Asserts, the following can be observed:
* Since the IFB is not known inside the routine, the macro HCFASSERT is replaced with MMDASSERT.
* Also the line number reported in the assert is raised by FILE_NAME_OFFSET (10000) to discriminate the
* DHF Asserts from HCF and MMD asserts.
*
***************************************************************************************************************
*
*
* SOFTWARE LICENSE
*
* This software is provided subject to the following terms and conditions,
* which you should read carefully before using the software. Using this
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
* COPYRIGHT (C) 1999 - 2000 by Lucent Technologies. All Rights Reserved
* COPYRIGHT (C) 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
* modifications, are permitted provided that the following conditions are met:
*
* . Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following Disclaimer as comments in the code as
* well as in the documentation and/or other materials provided with the
* distribution.
*
* . Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following Disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* . Neither the name of Agere Systems Inc. nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Disclaimer
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
* RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*
**************************************************************************************************************/
#include "hcf.h"
#include "hcfdef.h"
#include "dhf.h"
#include "mmd.h"
/* to distinguish MMD from HCF asserts by means of line number */
#undef FILE_NAME_OFFSET
#define FILE_NAME_OFFSET MMD_FILE_NAME_OFFSET
/*-----------------------------------------------------------------------------
*
* Defines, data structures, and global variables
*
*---------------------------------------------------------------------------*/
/* 12345678901234 */
char signature[14] = "FUPU7D37dhfwci";
/*-----------------------------------------------------------------------------
*
* LTV-records retrieved from the NIC to:
* - determine compatibility between NIC and image
* - ((setup the buffer size dynamically for non-volatile download (see note below) ))
* - supply plugging information contained in the PDA (H-I only)
*
*---------------------------------------------------------------------------*/
/* for USB/H1 we needed a smaller value than the CFG_DL_BUF_STRCT reported 8192
for the time being it seems simpler to always use 2000 for USB/H1 as well as all other cases rather than
using the "fixed anyway" CFG_DL_BUF_STRCT. */
#define DL_SIZE 2000
/* CFG_IDENTITY_STRCT pri_identity = { LOF(CFG_IDENTITY_STRCT), CFG_PRI_IDENTITY }; */
CFG_SUP_RANGE_STRCT mfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_MFI_SUP_RANGE };
CFG_SUP_RANGE_STRCT cfi_sup = { LOF(CFG_SUP_RANGE_STRCT), CFG_NIC_CFI_SUP_RANGE };
/* Note: could be used rather than the above explained and defined DL_SIZE if need arises
* CFG_DL_BUF_STRCT dl_buf = { LOF(CFG_DL_BUF_STRCT), CFG_DL_BUF };
*/
/*-----------------------------------------------------------------------------
* Array ltv_info stores NIC information (in the form of LTV-records)
* needed for download. A NULL record indicates the end of the array.
*---------------------------------------------------------------------------*/
/* The LTV_INFO_STRUCT is needed to save the sizes of the structs, because after a GET_INFO()
* the len field is changed to the real len of the RID by the called routine.
* This is only relevant if the DHF used without reloading the driver/utility.
*/
LTV_INFO_STRUCT ltv_info[] = {
{ (LTVP)&mfi_sup, LOF(CFG_SUP_RANGE_STRCT) } ,
{ (LTVP)&cfi_sup, LOF(CFG_SUP_RANGE_STRCT) } ,
{ (LTVP) NULL, 0 }
};
/***********************************************************************************************************/
/*************************************** PROTOTYPES ******************************************************/
/***********************************************************************************************************/
static int check_comp_fw(memimage *fw);
/************************************************************************************************************
*.SUBMODULE int check_comp_fw( memimage *fw )
*.PURPOSE Checks compatibility of CFI and MFI, NIC as supplier, station/AP firmware image as supplier.
*
*.ARGUMENTS
* fw F/W image to be downloaded
*
*.RETURNS
* HFC_SUCCESS - firmware OK
* DHF_ERR_INCOMP_FW
*
*.DESCRIPTION
* This function uses compatibility and identity information that has been
* retrieved from the card which is currently inserted to check whether the
* station firmware image to be downloaded is compatible.
*.ENDDOC END DOCUMENTATION
*************************************************************************************************************/
int
check_comp_fw(memimage *fw)
{
CFG_RANGE20_STRCT *p;
int rc = HCF_SUCCESS;
CFG_RANGE_SPEC_STRCT *i;
switch (fw->identity->typ) {
case CFG_FW_IDENTITY: /* Station F/W */
case COMP_ID_FW_AP_FAKE: /* ;?is this useful (used to be: CFG_AP_IDENTITY) */
break;
default:
MMDASSERT(DO_ASSERT, fw->identity->typ) /* unknown/unsupported firmware_type: */
rc = DHF_ERR_INCOMP_FW;
return rc; /* ;? how useful is this anyway,
* till that is sorted out might as well violate my own single exit principle
*/
}
p = fw->compat;
i = NULL;
while (p->len && i == NULL) { /* check the MFI ranges */
if (p->typ == CFG_MFI_ACT_RANGES_STA) {
i = mmd_check_comp((void *)p, &mfi_sup);
}
p++;
}
MMDASSERT(i, 0) /* MFI: NIC Supplier not compatible with F/W image Actor */
if (i) {
p = fw->compat;
i = NULL;
while (p->len && i == NULL) { /* check the CFI ranges */
if (p->typ == CFG_CFI_ACT_RANGES_STA) {
i = mmd_check_comp((void *)p, &cfi_sup);
}
p++;
}
MMDASSERT(i, 0) /* CFI: NIC Supplier not compatible with F/W image Actor */
}
if (i == NULL) {
rc = DHF_ERR_INCOMP_FW;
}
return rc;
} /* check_comp_fw */
/*-----------------------------------------------------------------------------
*
* Exported functions
*
*---------------------------------------------------------------------------*/
/*************************************************************************************************************
*
*.MODULE int dhf_download_binary( void *ifbp, memimage *fw )
*.PURPOSE Downloads a complete (primary, station, or access point) firmware image to the NIC.
*
*.ARGUMENTS
* ifbp address of the Interface Block
* fw F/W image to be downloaded
*
*.RETURNS
* HCF_SUCCESS - download completed successfully.
* DHF_ERR_INCOMP_FW - firmware not compatible
*
*.DESCRIPTION
* Initialize global variables
* Connect to the DHF
* Check the compatibility of the image (For primary firmware images it is checked first
* whether download is necessary).
* If everything's download the firmware.
* Disconnect from the DHF.
*
*
*.DIAGRAM
*
*.NOTICE:
MMDASSERT is unacceptable because some drivers call dhf_download_binary before hcf_connect
* The old comment was:
*.ENDDOC END DOCUMENTATION
*************************************************************************************************************/
int
dhf_download_binary(memimage *fw)
{
int rc = HCF_SUCCESS;
CFG_PROG_STRCT *p;
int i;
/* validate the image */
for (i = 0; i < sizeof(signature) && fw->signature[i] == signature[i]; i++)
; /* NOP */
if (i != sizeof(signature) ||
fw->signature[i] != 0x01 ||
/* test for Little/Big Endian Binary flag */
fw->signature[i+1] != (/* HCF_BIG_ENDIAN ? 'B' : */ 'L'))
rc = DHF_ERR_INCOMP_FW;
else { /* Little Endian Binary format */
fw->codep = (CFG_PROG_STRCT FAR*)((char *)fw->codep + (hcf_32)fw);
fw->identity = (CFG_IDENTITY_STRCT FAR*)((char *)fw->identity + (hcf_32)fw);
fw->compat = (CFG_RANGE20_STRCT FAR*)((char *)fw->compat + (hcf_32)fw);
for (i = 0; fw->p[i]; i++)
fw->p[i] = ((char *)fw->p[i] + (hcf_32)fw);
p = fw->codep;
while (p->len) {
p->host_addr = (char *)p->host_addr + (hcf_32)fw;
p++;
}
}
return rc;
} /* dhf_download_binary */
/*************************************************************************************************************
*
*.MODULE int dhf_download_fw( void *ifbp, memimage *fw )
*.PURPOSE Downloads a complete (primary or tertiary) firmware image to the NIC.
*
*.ARGUMENTS
* ifbp address of the Interface Block
* fw F/W image to be downloaded
*
*.RETURNS
* HCF_SUCCESS - download completed successfully.
* HCF_ERR_NO_NIC - no NIC present
* DHF_ERR_INCOMP_FW - firmware not compatible
*
*.DESCRIPTION
* - check the signature of the image
* - get the compatibility information from the components on the NIC
* - Primary Firmware Identity
* - Modem - Firmware I/F
* - Controller - Firmware I/F
*!! - if necessary ( i.e. H-I) get the PDA contents from the NIC
* - check the compatibility of the MFI and CFI of the NIC with the F/W image
* Note: the Primary F/W compatibility is only relevant for the "running" HCF and is already verified in
* hcf_connect
*!! - if necessary ( i.e. H-I)
*!! - verify the sumcheck of the PDA
*!! - plug the image (based on the PDA and the default plug records)
* - loop over all the download LTVs in the image which consists of a sequence of
* - CFG_PROG_VOLATILE/CFG_PROG_NON_VOLATILE
* - 1 or more sequences of CFG_PROG_ADDR, CFG_PROG_DATA,....,CFG_PROG_DATA
* - CFG_PROG_STOP
*
*.DIAGRAM
*
*.NOTICE
* The old comment was:
* // Download primary firmware if necessary and allowed. This is done silently (without telling
* // the user) and only if the firmware in the download image is newer than the firmware in the
* // card. In Major version 4 of the primary firmware functions of Hermes and Shark were
* // combined. Prior to that two separate versions existed. We only have to download primary
* // firmware if major version of primary firmware in the NIC < 4.
* // download = pri_identity.version_major < 4;
* // if ( download ) {
* // rc = check_comp_primary( fw );
* // }
* It is my understanding that Pri Variant 1 must be updated by Pri Variant 2. The test on
* major version < 4 should amount to the same result but be "principally" less correct
* In deliberation with the Architecture team, it was decided that this upgrade for old H-I
* NICs, is an aspect which belongs on the WSU level not on the DHF level
*
*.ENDDOC END DOCUMENTATION
*************************************************************************************************************/
int
dhf_download_fw(void *ifbp, memimage *fw)
{
int rc = HCF_SUCCESS;
LTV_INFO_STRUCT_PTR pp = ltv_info;
CFG_PROG_STRCT *p = fw->codep;
LTVP ltvp;
int i;
MMDASSERT(fw != NULL, 0)
/* validate the image */
for (i = 0; i < sizeof(signature) && fw->signature[i] == signature[i]; i++)
; /* NOP */
if (i != sizeof(signature) ||
fw->signature[i] != 0x01 ||
/* check for binary image */
(fw->signature[i+1] != 'C' && fw->signature[i+1] != (/*HCF_BIG_ENDIAN ? 'B' : */ 'L')))
rc = DHF_ERR_INCOMP_FW;
/* Retrieve all information needed for download from the NIC */
while ((rc == HCF_SUCCESS) && ((ltvp = pp->ltvp) != NULL)) {
ltvp->len = pp++->len; /* Set len to original len. This len is changed to real len by GET_INFO() */
rc = GET_INFO(ltvp);
MMDASSERT(rc == HCF_SUCCESS, rc)
MMDASSERT(rc == HCF_SUCCESS, ltvp->typ)
MMDASSERT(rc == HCF_SUCCESS, ltvp->len)
}
if (rc == HCF_SUCCESS)
rc = check_comp_fw(fw);
if (rc == HCF_SUCCESS) {
while (rc == HCF_SUCCESS && p->len) {
rc = PUT_INFO(p);
p++;
}
}
MMDASSERT(rc == HCF_SUCCESS, rc)
return rc;
} /* dhf_download_fw */
| gpl-2.0 |
jwhitham/ppc_linux | drivers/net/ethernet/xilinx/xilinx_emaclite.c | 81 | 38173 | /*
* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
*
* This is a new flat driver which is based on the original emac_lite
* driver from John Williams <john.williams@xilinx.com>.
*
* 2007 - 2013 (c) Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/interrupt.h>
#define DRIVER_NAME "xilinx_emaclite"
/* Register offsets for the EmacLite Core */
#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
#define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */
#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */
#define XEL_TSR_OFFSET 0x07FC /* Tx status */
#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */
#define XEL_RXBUFF_OFFSET 0x1000 /* Receive Buffer */
#define XEL_RPLR_OFFSET 0x100C /* Rx packet length */
#define XEL_RSR_OFFSET 0x17FC /* Rx status */
#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */
/* MDIO Address Register Bit Masks */
#define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */
#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */
#define XEL_MDIOADDR_PHYADR_SHIFT 5
#define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */
/* MDIO Write Data Register Bit Masks */
#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */
/* MDIO Read Data Register Bit Masks */
#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */
/* MDIO Control Register Bit Masks */
#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */
#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
/* Global Interrupt Enable Register (GIER) Bit Masks */
#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
/* Transmit Status Register (TSR) Bit Masks */
#define XEL_TSR_XMIT_BUSY_MASK 0x00000001 /* Tx complete */
#define XEL_TSR_PROGRAM_MASK 0x00000002 /* Program the MAC address */
#define XEL_TSR_XMIT_IE_MASK 0x00000008 /* Tx interrupt enable bit */
#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000 /* Buffer is active, SW bit
* only. This is not documented
* in the HW spec */
/* Define for programming the MAC address into the EmacLite */
#define XEL_TSR_PROG_MAC_ADDR (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
/* Receive Status Register (RSR) */
#define XEL_RSR_RECV_DONE_MASK 0x00000001 /* Rx complete */
#define XEL_RSR_RECV_IE_MASK 0x00000008 /* Rx interrupt enable bit */
/* Transmit Packet Length Register (TPLR) */
#define XEL_TPLR_LENGTH_MASK 0x0000FFFF /* Tx packet length */
/* Receive Packet Length Register (RPLR) */
#define XEL_RPLR_LENGTH_MASK 0x0000FFFF /* Rx packet length */
#define XEL_HEADER_OFFSET 12 /* Offset to length field */
#define XEL_HEADER_SHIFT 16 /* Shift value for length */
/* General Ethernet Definitions */
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
#define TX_TIMEOUT (60*HZ) /* Tx timeout is 60 seconds. */
#define ALIGNMENT 4
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
/**
* struct net_local - Our private per device data
* @ndev: instance of the network device
* @tx_ping_pong: indicates whether Tx Pong buffer is configured in HW
* @rx_ping_pong: indicates whether Rx Pong buffer is configured in HW
* @next_tx_buf_to_use: next Tx buffer to write to
* @next_rx_buf_to_use: next Rx buffer to read from
* @base_addr: base address of the Emaclite device
* @reset_lock: lock used for synchronization
* @deferred_skb: holds an skb (for transmission at a later time) when the
* Tx buffer is not free
* @phy_dev: pointer to the PHY device
* @phy_node: pointer to the PHY device node
* @mii_bus: pointer to the MII bus
* @mdio_irqs: IRQs table for MDIO bus
* @last_link: last link status
* @has_mdio: indicates whether MDIO is included in the HW
*/
struct net_local {
struct net_device *ndev;
bool tx_ping_pong;
bool rx_ping_pong;
u32 next_tx_buf_to_use;
u32 next_rx_buf_to_use;
void __iomem *base_addr;
spinlock_t reset_lock;
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
struct device_node *phy_node;
struct mii_bus *mii_bus;
int mdio_irqs[PHY_MAX_ADDR];
int last_link;
bool has_mdio;
};
/*************************/
/* EmacLite driver calls */
/*************************/
/**
* xemaclite_enable_interrupts - Enable the interrupts for the EmacLite device
* @drvdata: Pointer to the Emaclite device private data
*
* This function enables the Tx and Rx interrupts for the Emaclite device along
* with the Global Interrupt Enable.
*/
static void xemaclite_enable_interrupts(struct net_local *drvdata)
{
u32 reg_data;
/* Enable the Tx interrupts for the first Buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
__raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Tx interrupts for the second Buffer if
* configured in HW */
if (drvdata->tx_ping_pong != 0) {
reg_data = __raw_readl(drvdata->base_addr +
XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
__raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
}
/* Enable the Rx interrupts for the first buffer */
__raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Rx interrupts for the second Buffer if
* configured in HW */
if (drvdata->rx_ping_pong != 0) {
__raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr +
XEL_BUFFER_OFFSET + XEL_RSR_OFFSET);
}
/* Enable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
}
/**
* xemaclite_disable_interrupts - Disable the interrupts for the EmacLite device
* @drvdata: Pointer to the Emaclite device private data
*
* This function disables the Tx and Rx interrupts for the Emaclite device,
* along with the Global Interrupt Enable.
*/
static void xemaclite_disable_interrupts(struct net_local *drvdata)
{
u32 reg_data;
/* Disable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
__raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
drvdata->base_addr + XEL_TSR_OFFSET);
/* Disable the Tx interrupts for the second Buffer
* if configured in HW */
if (drvdata->tx_ping_pong != 0) {
reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
__raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
}
/* Disable the Rx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
__raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
drvdata->base_addr + XEL_RSR_OFFSET);
/* Disable the Rx interrupts for the second buffer
* if configured in HW */
if (drvdata->rx_ping_pong != 0) {
reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_RSR_OFFSET);
__raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
drvdata->base_addr + XEL_BUFFER_OFFSET +
XEL_RSR_OFFSET);
}
}
/**
* xemaclite_aligned_write - Write from 16-bit aligned to 32-bit aligned address
* @src_ptr: Void pointer to the 16-bit aligned source address
* @dest_ptr: Pointer to the 32-bit aligned destination address
* @length: Number bytes to write from source to destination
*
* This function writes data from a 16-bit aligned buffer to a 32-bit aligned
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
unsigned length)
{
u32 align_buffer;
u32 *to_u32_ptr;
u16 *from_u16_ptr, *to_u16_ptr;
to_u32_ptr = dest_ptr;
from_u16_ptr = src_ptr;
align_buffer = 0;
for (; length > 3; length -= 4) {
to_u16_ptr = (u16 *)&align_buffer;
*to_u16_ptr++ = *from_u16_ptr++;
*to_u16_ptr++ = *from_u16_ptr++;
/* Output a word */
*to_u32_ptr++ = align_buffer;
}
if (length) {
u8 *from_u8_ptr, *to_u8_ptr;
/* Set up to output the remaining data */
align_buffer = 0;
to_u8_ptr = (u8 *) &align_buffer;
from_u8_ptr = (u8 *) from_u16_ptr;
/* Output the remaining data */
for (; length > 0; length--)
*to_u8_ptr++ = *from_u8_ptr++;
*to_u32_ptr = align_buffer;
}
}
/**
* xemaclite_aligned_read - Read from 32-bit aligned to 16-bit aligned buffer
* @src_ptr: Pointer to the 32-bit aligned source address
* @dest_ptr: Pointer to the 16-bit aligned destination address
* @length: Number bytes to read from source to destination
*
* This function reads data from a 32-bit aligned address in the EmacLite device
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
unsigned length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
u32 align_buffer;
from_u32_ptr = src_ptr;
to_u16_ptr = (u16 *) dest_ptr;
for (; length > 3; length -= 4) {
/* Copy each word into the temporary buffer */
align_buffer = *from_u32_ptr++;
from_u16_ptr = (u16 *)&align_buffer;
/* Read data from source */
*to_u16_ptr++ = *from_u16_ptr++;
*to_u16_ptr++ = *from_u16_ptr++;
}
if (length) {
u8 *to_u8_ptr, *from_u8_ptr;
/* Set up to read the remaining data */
to_u8_ptr = (u8 *) to_u16_ptr;
align_buffer = *from_u32_ptr++;
from_u8_ptr = (u8 *) &align_buffer;
/* Read the remaining data */
for (; length > 0; length--)
*to_u8_ptr = *from_u8_ptr;
}
}
/**
* xemaclite_send_data - Send an Ethernet frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Pointer to the data to be sent
* @byte_count: Total frame size, including header
*
* This function checks if the Tx buffer of the Emaclite device is free to send
* data. If so, it fills the Tx buffer with data for transmission. Otherwise, it
* returns an error.
*
* Return: 0 upon success or -1 if the buffer(s) are full.
*
* Note: The maximum Tx packet size can not be more than Ethernet header
* (14 Bytes) + Maximum MTU (1500 bytes). This is excluding FCS.
*/
static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
unsigned int byte_count)
{
u32 reg_data;
void __iomem *addr;
/* Determine the expected Tx buffer address */
addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
/* If the length is too large, truncate it */
if (byte_count > ETH_FRAME_LEN)
byte_count = ETH_FRAME_LEN;
/* Check if the expected buffer is available */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
} else if (drvdata->tx_ping_pong != 0) {
/* If the expected buffer is full, try the other buffer,
* if it is configured in HW */
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
} else
return -1; /* Buffer was full, return failure */
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
__raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
addr + XEL_TPLR_OFFSET);
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
* has been transmitted */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
__raw_writel(reg_data, addr + XEL_TSR_OFFSET);
return 0;
}
/**
* xemaclite_recv_data - Receive a frame
* @drvdata: Pointer to the Emaclite device private data
* @data: Address where the data is to be received
*
* This function is intended to be called from the interrupt context or
* with a wrapper which waits for the receive frame to be available.
*
* Return: Total number of bytes received
*/
static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
{
void __iomem *addr;
u16 length, proto_type;
u32 reg_data;
/* Determine the expected buffer address */
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
/* Verify which buffer has valid data */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
if (drvdata->rx_ping_pong != 0)
drvdata->next_rx_buf_to_use ^= XEL_BUFFER_OFFSET;
} else {
/* The instance is out of sync, try other buffer if other
* buffer is configured, return 0 otherwise. If the instance is
* out of sync, do not update the 'next_rx_buf_to_use' since it
* will correct on subsequent calls */
if (drvdata->rx_ping_pong != 0)
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
else
return 0; /* No data was available */
/* Verify that buffer has valid data */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
XEL_RSR_RECV_DONE_MASK)
return 0; /* No data was available */
}
/* Get the protocol type of the ethernet frame that arrived */
proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */
if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
if (proto_type == ETH_P_IP) {
length = ((ntohl(__raw_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP)
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
else
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it */
length = ETH_FRAME_LEN + ETH_FCS_LEN;
} else
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
reg_data &= ~XEL_RSR_RECV_DONE_MASK;
__raw_writel(reg_data, addr + XEL_RSR_OFFSET);
return length;
}
/**
* xemaclite_update_address - Update the MAC address in the device
* @drvdata: Pointer to the Emaclite device private data
* @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value)
*
* Tx must be idle and Rx should be idle for deterministic results.
* It is recommended that this function should be called after the
* initialization and before transmission of any packets from the device.
* The MAC address can be programmed using any of the two transmit
* buffers (if configured).
*/
static void xemaclite_update_address(struct net_local *drvdata,
u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
/* Determine the expected Tx buffer address */
addr = drvdata->base_addr + drvdata->next_tx_buf_to_use;
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
__raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
/* Update the MAC address in the EmacLite */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
__raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
while ((__raw_readl(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0)
;
}
/**
* xemaclite_set_mac_address - Set the MAC address for this device
* @dev: Pointer to the network device instance
* @addr: Void pointer to the sockaddr structure
*
* This function copies the HW address from the sockaddr strucutre to the
* net_device structure and updates the address in HW.
*
* Return: Error if the net device is busy or 0 if the addr is set
* successfully
*/
static int xemaclite_set_mac_address(struct net_device *dev, void *address)
{
struct net_local *lp = netdev_priv(dev);
struct sockaddr *addr = address;
if (netif_running(dev))
return -EBUSY;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
xemaclite_update_address(lp, dev->dev_addr);
return 0;
}
/**
* xemaclite_tx_timeout - Callback for Tx Timeout
* @dev: Pointer to the network device
*
* This function is called when Tx time out occurs for Emaclite device.
*/
static void xemaclite_tx_timeout(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
unsigned long flags;
dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
TX_TIMEOUT * 1000UL / HZ);
dev->stats.tx_errors++;
/* Reset the device */
spin_lock_irqsave(&lp->reset_lock, flags);
/* Shouldn't really be necessary, but shouldn't hurt */
netif_stop_queue(dev);
xemaclite_disable_interrupts(lp);
xemaclite_enable_interrupts(lp);
if (lp->deferred_skb) {
dev_kfree_skb(lp->deferred_skb);
lp->deferred_skb = NULL;
dev->stats.tx_errors++;
}
/* To exclude tx timeout */
dev->trans_start = jiffies; /* prevent tx timeout */
/* We're all ready to go. Start the queue */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->reset_lock, flags);
}
/**********************/
/* Interrupt Handlers */
/**********************/
/**
* xemaclite_tx_handler - Interrupt handler for frames sent
* @dev: Pointer to the network device
*
* This function updates the number of packets transmitted and handles the
* deferred skb, if there is one.
*/
static void xemaclite_tx_handler(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
dev->stats.tx_packets++;
if (lp->deferred_skb) {
if (xemaclite_send_data(lp,
(u8 *) lp->deferred_skb->data,
lp->deferred_skb->len) != 0)
return;
else {
dev->stats.tx_bytes += lp->deferred_skb->len;
dev_kfree_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
}
}
/**
* xemaclite_rx_handler- Interrupt handler for frames received
* @dev: Pointer to the network device
*
* This function allocates memory for a socket buffer, fills it with data
* received and hands it over to the TCP/IP stack.
*/
static void xemaclite_rx_handler(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
unsigned int align;
u32 len;
len = ETH_FRAME_LEN + ETH_FCS_LEN;
skb = netdev_alloc_skb(dev, len + ALIGNMENT);
if (!skb) {
/* Couldn't get memory. */
dev->stats.rx_dropped++;
dev_err(&lp->ndev->dev, "Could not allocate receive buffer\n");
return;
}
/*
* A new skb should have the data halfword aligned, but this code is
* here just in case that isn't true. Calculate how many
* bytes we should reserve to get the data to start on a word
* boundary */
align = BUFFER_ALIGN(skb->data);
if (align)
skb_reserve(skb, align);
skb_reserve(skb, 2);
len = xemaclite_recv_data(lp, (u8 *) skb->data);
if (!len) {
dev->stats.rx_errors++;
dev_kfree_skb_irq(skb);
return;
}
skb_put(skb, len); /* Tell the skb how much data we got */
skb->protocol = eth_type_trans(skb, dev);
skb_checksum_none_assert(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb); /* Send the packet upstream */
}
/**
* xemaclite_interrupt - Interrupt handler for this driver
* @irq: Irq of the Emaclite device
* @dev_id: Void pointer to the network device instance used as callback
* reference
*
* This function handles the Tx and Rx interrupts of the EmacLite device.
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
{
bool tx_complete = false;
struct net_device *dev = dev_id;
struct net_local *lp = netdev_priv(dev);
void __iomem *base_addr = lp->base_addr;
u32 tx_status;
/* Check if there is Rx Data available */
if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
XEL_RSR_RECV_DONE_MASK) ||
(__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
& XEL_RSR_RECV_DONE_MASK))
xemaclite_rx_handler(dev);
/* Check if the Transmission for the first buffer is completed */
tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
__raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
__raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
tx_complete = true;
}
/* If there was a Tx interrupt, call the Tx Handler */
if (tx_complete != 0)
xemaclite_tx_handler(dev);
return IRQ_HANDLED;
}
/**********************/
/* MDIO Bus functions */
/**********************/
/**
* xemaclite_mdio_wait - Wait for the MDIO to be ready to use
* @lp: Pointer to the Emaclite device private data
*
* This function waits till the device is ready to accept a new MDIO
* request.
*
* Return: 0 for success or ETIMEDOUT for a timeout
*/
static int xemaclite_mdio_wait(struct net_local *lp)
{
long end = jiffies + 2;
/* wait for the MDIO interface to not be busy or timeout
after some time.
*/
while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (end - jiffies <= 0) {
WARN_ON(1);
return -ETIMEDOUT;
}
msleep(1);
}
return 0;
}
/**
* xemaclite_mdio_read - Read from a given MII management register
* @bus: the mii_bus struct
* @phy_id: the phy address
* @reg: register number to read from
*
* This function waits till the device is ready to accept a new MDIO
* request and then writes the phy address to the MDIO Address register
* and reads data from MDIO Read Data register, when its available.
*
* Return: Value read from the MII management register
*/
static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
struct net_local *lp = bus->priv;
u32 ctrl_reg;
u32 rc;
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
/* Write the PHY address, register number and set the OP bit in the
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
__raw_writel(XEL_MDIOADDR_OP_MASK |
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
phy_id, reg, rc);
return rc;
}
/**
* xemaclite_mdio_write - Write to a given MII management register
* @bus: the mii_bus struct
* @phy_id: the phy address
* @reg: register number to write to
* @val: value to write to the register number specified by reg
*
* This function waits till the device is ready to accept a new MDIO
* request and then writes the val to the MDIO Write Data register.
*/
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
u16 val)
{
struct net_local *lp = bus->priv;
u32 ctrl_reg;
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
phy_id, reg, val);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
/* Write the PHY address, register number and clear the OP bit in the
* MDIO Address register and then write the value into the MDIO Write
* Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction.
*/
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
__raw_writel(~XEL_MDIOADDR_OP_MASK &
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
__raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
return 0;
}
/**
* xemaclite_mdio_reset - Reset the mdio bus.
* @bus: Pointer to the MII bus
*
* This function is required(?) as per Documentation/networking/phy.txt.
* There is no reset in this device; this function always returns 0.
*/
static int xemaclite_mdio_reset(struct mii_bus *bus)
{
return 0;
}
/**
* xemaclite_mdio_setup - Register mii_bus for the Emaclite device
* @lp: Pointer to the Emaclite device private data
* @ofdev: Pointer to OF device structure
*
* This function enables MDIO bus in the Emaclite device and registers a
* mii_bus.
*
* Return: 0 upon success or a negative error upon failure
*/
static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
{
struct mii_bus *bus;
int rc;
struct resource res;
struct device_node *np = of_get_parent(lp->phy_node);
struct device_node *npp;
/* Don't register the MDIO bus if the phy_node or its parent node
* can't be found.
*/
if (!np) {
dev_err(dev, "Failed to register mdio bus.\n");
return -ENODEV;
}
npp = of_get_parent(np);
of_address_to_resource(npp, 0, &res);
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
phydev = of_phy_find_device(lp->phy_node);
if (!phydev)
dev_info(dev,
"MDIO of the phy is not registered yet\n");
return 0;
}
/* Enable the MDIO bus by asserting the enable bit in MDIO Control
* register.
*/
__raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
bus = mdiobus_alloc();
if (!bus) {
dev_err(dev, "Failed to allocate mdiobus\n");
return -ENOMEM;
}
snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
(unsigned long long)res.start);
bus->priv = lp;
bus->name = "Xilinx Emaclite MDIO";
bus->read = xemaclite_mdio_read;
bus->write = xemaclite_mdio_write;
bus->reset = xemaclite_mdio_reset;
bus->parent = dev;
bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
lp->mii_bus = bus;
rc = of_mdiobus_register(bus, np);
if (rc) {
dev_err(dev, "Failed to register mdio bus.\n");
goto err_register;
}
return 0;
err_register:
mdiobus_free(bus);
return rc;
}
/**
* xemaclite_adjust_link - Link state callback for the Emaclite device
* @ndev: pointer to net_device struct
*
* There's nothing in the Emaclite device to be configured when the link
* state changes. We just print the status.
*/
static void xemaclite_adjust_link(struct net_device *ndev)
{
struct net_local *lp = netdev_priv(ndev);
struct phy_device *phy = lp->phy_dev;
int link_state;
/* hash together the state values to decide if something has changed */
link_state = phy->speed | (phy->duplex << 1) | phy->link;
if (lp->last_link != link_state) {
lp->last_link = link_state;
phy_print_status(phy);
}
}
/**
* xemaclite_open - Open the network device
* @dev: Pointer to the network device
*
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
* It also connects to the phy device, if MDIO is included in Emaclite device.
*/
static int xemaclite_open(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
int retval;
/* Just to be safe, stop the device first */
xemaclite_disable_interrupts(lp);
if (lp->phy_node) {
u32 bmcr;
lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
xemaclite_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (!lp->phy_dev) {
dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
return -ENODEV;
}
/* EmacLite doesn't support giga-bit speeds */
lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
lp->phy_dev->advertising = lp->phy_dev->supported;
/* Don't advertise 1000BASE-T Full/Half duplex speeds */
phy_write(lp->phy_dev, MII_CTRL1000, 0);
/* Advertise only 10 and 100mbps full/half duplex speeds */
phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
ADVERTISE_CSMA);
/* Restart auto negotiation */
bmcr = phy_read(lp->phy_dev, MII_BMCR);
bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
phy_write(lp->phy_dev, MII_BMCR, bmcr);
phy_start(lp->phy_dev);
}
/* Set the MAC address each time opened */
xemaclite_update_address(lp, dev->dev_addr);
/* Grab the IRQ */
retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
if (retval) {
dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
dev->irq);
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
return retval;
}
/* Enable Interrupts */
xemaclite_enable_interrupts(lp);
/* We're ready to go */
netif_start_queue(dev);
return 0;
}
/**
* xemaclite_close - Close the network device
* @dev: Pointer to the network device
*
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
* It also disconnects the phy device associated with the Emaclite device.
*/
static int xemaclite_close(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
netif_stop_queue(dev);
xemaclite_disable_interrupts(lp);
free_irq(dev->irq, dev);
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
return 0;
}
/**
* xemaclite_send - Transmit a frame
* @orig_skb: Pointer to the socket buffer to be transmitted
* @dev: Pointer to the network device
*
* This function checks if the Tx buffer of the Emaclite device is free to send
* data. If so, it fills the Tx buffer with data from socket buffer data,
* updates the stats and frees the socket buffer. The Tx completion is signaled
* by an interrupt. If the Tx buffer isn't free, then the socket buffer is
* deferred and the Tx queue is stopped so that the deferred socket buffer can
* be transmitted when the Emaclite device is free to transmit data.
*
* Return: 0, always.
*/
static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *new_skb;
unsigned int len;
unsigned long flags;
len = orig_skb->len;
new_skb = orig_skb;
spin_lock_irqsave(&lp->reset_lock, flags);
if (xemaclite_send_data(lp, (u8 *) new_skb->data, len) != 0) {
/* If the Emaclite Tx buffer is busy, stop the Tx queue and
* defer the skb for transmission during the ISR, after the
* current transmission is complete */
netif_stop_queue(dev);
lp->deferred_skb = new_skb;
/* Take the time stamp now, since we can't do this in an ISR. */
skb_tx_timestamp(new_skb);
spin_unlock_irqrestore(&lp->reset_lock, flags);
return 0;
}
spin_unlock_irqrestore(&lp->reset_lock, flags);
skb_tx_timestamp(new_skb);
dev->stats.tx_bytes += len;
dev_kfree_skb(new_skb);
return 0;
}
/**
* xemaclite_remove_ndev - Free the network device
* @ndev: Pointer to the network device to be freed
*
* This function un maps the IO region of the Emaclite device and frees the net
* device.
*/
static void xemaclite_remove_ndev(struct net_device *ndev,
struct platform_device *pdev)
{
if (ndev) {
struct net_local *lp = netdev_priv(ndev);
if (lp->base_addr)
devm_iounmap(&pdev->dev, lp->base_addr);
free_netdev(ndev);
}
}
/**
* get_bool - Get a parameter from the OF device
* @ofdev: Pointer to OF device structure
* @s: Property to be retrieved
*
* This function looks for a property in the device node and returns the value
* of the property if its found or 0 if the property is not found.
*
* Return: Value of the parameter if the parameter is found, or 0 otherwise
*/
static bool get_bool(struct platform_device *ofdev, const char *s)
{
u32 *p = (u32 *)of_get_property(ofdev->dev.of_node, s, NULL);
if (p) {
return (bool)*p;
} else {
dev_warn(&ofdev->dev, "Parameter %s not found,"
"defaulting to false\n", s);
return 0;
}
}
static struct net_device_ops xemaclite_netdev_ops;
/**
* xemaclite_of_probe - Probe method for the Emaclite device.
* @ofdev: Pointer to OF device structure
* @match: Pointer to the structure used for matching a device
*
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC
* address and registers the network device.
* It also registers a mii_bus for the Emaclite device, if MDIO is included
* in the device.
*
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
*/
static int xemaclite_of_probe(struct platform_device *ofdev)
{
struct resource *res;
struct net_device *ndev = NULL;
struct net_local *lp = NULL;
struct device *dev = &ofdev->dev;
const void *mac_address;
int rc = 0;
dev_info(dev, "Device Tree Probing\n");
/* Create an ethernet device instance */
ndev = alloc_etherdev(sizeof(struct net_local));
if (!ndev)
return -ENOMEM;
dev_set_drvdata(dev, ndev);
SET_NETDEV_DEV(ndev, &ofdev->dev);
lp = netdev_priv(ndev);
lp->ndev = ndev;
/* Get IRQ for the device */
res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "no IRQ found\n");
goto error;
}
ndev->irq = res->start;
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
if (IS_ERR(lp->base_addr)) {
rc = PTR_ERR(lp->base_addr);
goto error;
}
ndev->mem_start = res->start;
ndev->mem_end = res->end;
spin_lock_init(&lp->reset_lock);
lp->next_tx_buf_to_use = 0x0;
lp->next_rx_buf_to_use = 0x0;
lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
mac_address = of_get_mac_address(ofdev->dev.of_node);
if (mac_address)
/* Set the MAC address. */
memcpy(ndev->dev_addr, mac_address, 6);
else
dev_warn(dev, "No MAC address found\n");
/* Clear the Tx CSR's in case this is a restart */
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
__raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);
lp->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
rc = xemaclite_mdio_setup(lp, &ofdev->dev);
if (rc)
dev_warn(&ofdev->dev, "error registering MDIO bus\n");
dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
ndev->netdev_ops = &xemaclite_netdev_ops;
ndev->flags &= ~IFF_MULTICAST;
ndev->watchdog_timeo = TX_TIMEOUT;
/* Finally, register the device */
rc = register_netdev(ndev);
if (rc) {
dev_err(dev,
"Cannot register network device, aborting\n");
goto error;
}
dev_info(dev,
"Xilinx EmacLite at 0x%08X mapped to 0x%08X, irq=%d\n",
(unsigned int __force)ndev->mem_start,
(unsigned int __force)lp->base_addr, ndev->irq);
return 0;
error:
xemaclite_remove_ndev(ndev, ofdev);
return rc;
}
/**
* xemaclite_of_remove - Unbind the driver from the Emaclite device.
* @of_dev: Pointer to OF device structure
*
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
*
* Return: 0, always.
*/
static int xemaclite_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = platform_get_drvdata(of_dev);
struct net_local *lp = netdev_priv(ndev);
/* Un-register the mii_bus, if configured */
if (lp->has_mdio) {
mdiobus_unregister(lp->mii_bus);
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
lp->mii_bus = NULL;
}
unregister_netdev(ndev);
if (lp->phy_node)
of_node_put(lp->phy_node);
lp->phy_node = NULL;
xemaclite_remove_ndev(ndev, of_dev);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void
xemaclite_poll_controller(struct net_device *ndev)
{
disable_irq(ndev->irq);
xemaclite_interrupt(ndev->irq, ndev);
enable_irq(ndev->irq);
}
#endif
static struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xemaclite_poll_controller,
#endif
};
/* Match table for OF platform binding */
static struct of_device_id xemaclite_of_match[] = {
{ .compatible = "xlnx,opb-ethernetlite-1.01.a", },
{ .compatible = "xlnx,opb-ethernetlite-1.01.b", },
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.01.a", },
{ .compatible = "xlnx,xps-ethernetlite-3.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xemaclite_of_match);
static struct platform_driver xemaclite_of_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = xemaclite_of_match,
},
.probe = xemaclite_of_probe,
.remove = xemaclite_of_remove,
};
module_platform_driver(xemaclite_of_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
onyx-intl/ak98_kernel | drivers/video/via/tblDPASetting.c | 1873 | 5129 | /*
* Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation;
* either version 2, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.See the GNU General Public License
* for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "global.h"
/* For VT3324: */
struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3324[] = {
/* Panel ID, CLK_SEL_ST1[09], CLK_SEL_ST2[08] */
{LCD_PANEL_ID0_640X480, 0x00, 0x00}, /* For 640x480 */
{LCD_PANEL_ID1_800X600, 0x00, 0x00}, /* For 800x600 */
{LCD_PANEL_ID2_1024X768, 0x00, 0x00}, /* For 1024x768 */
{LCD_PANEL_ID3_1280X768, 0x00, 0x00}, /* For 1280x768 */
{LCD_PANEL_ID4_1280X1024, 0x00, 0x00}, /* For 1280x1024 */
{LCD_PANEL_ID5_1400X1050, 0x00, 0x00}, /* For 1400x1050 */
{LCD_PANEL_ID6_1600X1200, 0x0B, 0x03} /* For 1600x1200 */
};
struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3324[] = {
/* ClkRange, DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
DVP1Driving, DFPHigh, DFPLow */
/* CR96, SR2A[5], SR1B[1], SR2A[4], SR1E[2], CR9B,
SR65, CR97, CR99 */
/* LCK/VCK < 30000000 will use this value */
{DPA_CLK_RANGE_30M, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00,
0x00},
/* 30000000 < LCK/VCK < 50000000 will use this value */
{DPA_CLK_RANGE_30_50M, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00,
0x00},
/* 50000000 < LCK/VCK < 70000000 will use this value */
{DPA_CLK_RANGE_50_70M, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00},
/* 70000000 < LCK/VCK < 100000000 will use this value */
{DPA_CLK_RANGE_70_100M, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00},
/* 100000000 < LCK/VCK < 15000000 will use this value */
{DPA_CLK_RANGE_100_150M, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00},
/* 15000000 < LCK/VCK will use this value */
{DPA_CLK_RANGE_150M, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x0E, 0x00,
0x00},
};
/* For VT3327: */
struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3327[] = {
/* Panel ID, CLK_SEL_ST1[09], CLK_SEL_ST2[08] */
{LCD_PANEL_ID0_640X480, 0x00, 0x00}, /* For 640x480 */
{LCD_PANEL_ID1_800X600, 0x00, 0x00}, /* For 800x600 */
{LCD_PANEL_ID2_1024X768, 0x00, 0x00}, /* For 1024x768 */
{LCD_PANEL_ID3_1280X768, 0x00, 0x00}, /* For 1280x768 */
{LCD_PANEL_ID4_1280X1024, 0x00, 0x00}, /* For 1280x1024 */
{LCD_PANEL_ID5_1400X1050, 0x00, 0x00}, /* For 1400x1050 */
{LCD_PANEL_ID6_1600X1200, 0x00, 0x00} /* For 1600x1200 */
};
struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3327[] = {
/* ClkRange,DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
DVP1Driving, DFPHigh, DFPLow */
/* CR96, SR2A[5], SR1B[1], SR2A[4], SR1E[2], CR9B,
SR65, CR97, CR99 */
/* LCK/VCK < 30000000 will use this value */
{DPA_CLK_RANGE_30M, 0x07, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x08, 0x01},
/* 30000000 < LCK/VCK < 50000000 will use this value */
{DPA_CLK_RANGE_30_50M, 0x07, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x08, 0x01},
/* 50000000 < LCK/VCK < 70000000 will use this value */
{DPA_CLK_RANGE_50_70M, 0x06, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x08, 0x01},
/* 70000000 < LCK/VCK < 100000000 will use this value */
{DPA_CLK_RANGE_70_100M, 0x03, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x08, 0x03},
/* 100000000 < LCK/VCK < 15000000 will use this value */
{DPA_CLK_RANGE_100_150M, 0x03, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x01, 0x02},
/* 15000000 < LCK/VCK will use this value */
{DPA_CLK_RANGE_150M, 0x00, 0x20, 0x00, 0x10, 0x00, 0x03, 0x00, 0x0D, 0x03},
};
/* For VT3364: */
struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3364[] = {
/* ClkRange,DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
DVP1Driving, DFPHigh, DFPLow */
/* CR96, SR2A[5], SR1B[1], SR2A[4], SR1E[2], CR9B,
SR65, CR97, CR99 */
/* LCK/VCK < 30000000 will use this value */
{DPA_CLK_RANGE_30M, 0x07, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x08},
/* 30000000 < LCK/VCK < 50000000 will use this value */
{DPA_CLK_RANGE_30_50M, 0x07, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x08},
/* 50000000 < LCK/VCK < 70000000 will use this value */
{DPA_CLK_RANGE_50_70M, 0x07, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x08},
/* 70000000 < LCK/VCK < 100000000 will use this value */
{DPA_CLK_RANGE_70_100M, 0x07, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x08},
/* 100000000 < LCK/VCK < 15000000 will use this value */
{DPA_CLK_RANGE_100_150M, 0x03, 0x00, 0x02, 0x00, 0x00, 0x03, 0x00, 0x00, 0x08},
/* 15000000 < LCK/VCK will use this value */
{DPA_CLK_RANGE_150M, 0x01, 0x00, 0x02, 0x10, 0x00, 0x03, 0x00, 0x00, 0x08},
};
| gpl-2.0 |
TeamBeast/BeastPro-Kernel-Samsung-StarPro | arch/arm/plat-omap/counter_32k.c | 2129 | 5396 | /*
* OMAP 32ksynctimer/counter_32k-related code
*
* Copyright (C) 2009 Texas Instruments
* Copyright (C) 2010 Nokia Corporation
* Tony Lindgren <tony@atomide.com>
* Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* NOTE: This timer is not the same timer as the old OMAP1 MPU timer.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/sched.h>
#include <asm/sched_clock.h>
#include <plat/common.h>
#include <plat/board.h>
#include <plat/clock.h>
/*
* 32KHz clocksource ... always available, on pretty most chips except
* OMAP 730 and 1510. Other timers could be used as clocksources, with
* higher resolution in free-running counter modes (e.g. 12 MHz xtal),
* but systems won't necessarily want to spend resources that way.
*/
#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
#include <linux/clocksource.h>
/*
* offset_32k holds the init time counter value. It is then subtracted
* from every counter read to achieve a counter that counts time from the
* kernel boot (needed for sched_clock()).
*/
static u32 offset_32k __read_mostly;
#ifdef CONFIG_ARCH_OMAP16XX
static cycle_t notrace omap16xx_32k_read(struct clocksource *cs)
{
return omap_readl(OMAP16XX_TIMER_32K_SYNCHRONIZED) - offset_32k;
}
#else
#define omap16xx_32k_read NULL
#endif
#ifdef CONFIG_SOC_OMAP2420
static cycle_t notrace omap2420_32k_read(struct clocksource *cs)
{
return omap_readl(OMAP2420_32KSYNCT_BASE + 0x10) - offset_32k;
}
#else
#define omap2420_32k_read NULL
#endif
#ifdef CONFIG_SOC_OMAP2430
static cycle_t notrace omap2430_32k_read(struct clocksource *cs)
{
return omap_readl(OMAP2430_32KSYNCT_BASE + 0x10) - offset_32k;
}
#else
#define omap2430_32k_read NULL
#endif
#ifdef CONFIG_ARCH_OMAP3
static cycle_t notrace omap34xx_32k_read(struct clocksource *cs)
{
return omap_readl(OMAP3430_32KSYNCT_BASE + 0x10) - offset_32k;
}
#else
#define omap34xx_32k_read NULL
#endif
#ifdef CONFIG_ARCH_OMAP4
static cycle_t notrace omap44xx_32k_read(struct clocksource *cs)
{
return omap_readl(OMAP4430_32KSYNCT_BASE + 0x10) - offset_32k;
}
#else
#define omap44xx_32k_read NULL
#endif
/*
* Kernel assumes that sched_clock can be called early but may not have
* things ready yet.
*/
static cycle_t notrace omap_32k_read_dummy(struct clocksource *cs)
{
return 0;
}
static struct clocksource clocksource_32k = {
.name = "32k_counter",
.rating = 250,
.read = omap_32k_read_dummy,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/*
* Returns current time from boot in nsecs. It's OK for this to wrap
* around for now, as it's just a relative time stamp.
*/
static DEFINE_CLOCK_DATA(cd);
/*
* Constants generated by clocks_calc_mult_shift(m, s, 32768, NSEC_PER_SEC, 60).
* This gives a resolution of about 30us and a wrap period of about 36hrs.
*/
#define SC_MULT 4000000000u
#define SC_SHIFT 17
static inline unsigned long long notrace _omap_32k_sched_clock(void)
{
u32 cyc = clocksource_32k.read(&clocksource_32k);
return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
}
#ifndef CONFIG_OMAP_MPU_TIMER
unsigned long long notrace sched_clock(void)
{
return _omap_32k_sched_clock();
}
#else
unsigned long long notrace omap_32k_sched_clock(void)
{
return _omap_32k_sched_clock();
}
#endif
static void notrace omap_update_sched_clock(void)
{
u32 cyc = clocksource_32k.read(&clocksource_32k);
update_sched_clock(&cd, cyc, (u32)~0);
}
/**
* read_persistent_clock - Return time from a persistent clock.
*
* Reads the time from a source which isn't disabled during PM, the
* 32k sync timer. Convert the cycles elapsed since last read into
* nsecs and adds to a monotonically increasing timespec.
*/
static struct timespec persistent_ts;
static cycles_t cycles, last_cycles;
void read_persistent_clock(struct timespec *ts)
{
unsigned long long nsecs;
cycles_t delta;
struct timespec *tsp = &persistent_ts;
last_cycles = cycles;
cycles = clocksource_32k.read(&clocksource_32k);
delta = cycles - last_cycles;
nsecs = clocksource_cyc2ns(delta,
clocksource_32k.mult, clocksource_32k.shift);
timespec_add_ns(tsp, nsecs);
*ts = *tsp;
}
int __init omap_init_clocksource_32k(void)
{
static char err[] __initdata = KERN_ERR
"%s: can't register clocksource!\n";
if (cpu_is_omap16xx() || cpu_class_is_omap2()) {
struct clk *sync_32k_ick;
if (cpu_is_omap16xx())
clocksource_32k.read = omap16xx_32k_read;
else if (cpu_is_omap2420())
clocksource_32k.read = omap2420_32k_read;
else if (cpu_is_omap2430())
clocksource_32k.read = omap2430_32k_read;
else if (cpu_is_omap34xx())
clocksource_32k.read = omap34xx_32k_read;
else if (cpu_is_omap44xx())
clocksource_32k.read = omap44xx_32k_read;
else
return -ENODEV;
sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
if (!IS_ERR(sync_32k_ick))
clk_enable(sync_32k_ick);
offset_32k = clocksource_32k.read(&clocksource_32k);
if (clocksource_register_hz(&clocksource_32k, 32768))
printk(err, clocksource_32k.name);
init_fixed_sched_clock(&cd, omap_update_sched_clock, 32,
32768, SC_MULT, SC_SHIFT);
}
return 0;
}
| gpl-2.0 |
AndroidDeveloperAlliance/kernel_samsung_d2 | drivers/staging/gma500/psb_intel_sdvo.c | 2385 | 36324 | /*
* Copyright (c) 2006-2007 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/i2c.h>
#include <linux/delay.h>
/* #include <drm/drm_crtc.h> */
#include <drm/drmP.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_sdvo_regs.h"
struct psb_intel_sdvo_priv {
struct psb_intel_i2c_chan *i2c_bus;
int slaveaddr;
int output_device;
u16 active_outputs;
struct psb_intel_sdvo_caps caps;
int pixel_clock_min, pixel_clock_max;
int save_sdvo_mult;
u16 save_active_outputs;
struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
struct psb_intel_sdvo_dtd save_output_dtd[16];
u32 save_SDVOX;
u8 in_out_map[4];
u8 by_input_wiring;
u32 active_device;
};
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output,
u32 val)
{
struct drm_device *dev = psb_intel_output->base.dev;
struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
u32 bval = val, cval = val;
int i;
if (sdvo_priv->output_device == SDVOB)
cval = REG_READ(SDVOC);
else
bval = REG_READ(SDVOB);
/*
* Write the registers twice for luck. Sometimes,
* writing them only once doesn't appear to 'stick'.
* The BIOS does this too. Yay, magic
*/
for (i = 0; i < 2; i++) {
REG_WRITE(SDVOB, bval);
REG_READ(SDVOB);
REG_WRITE(SDVOC, cval);
REG_READ(SDVOC);
}
}
static bool psb_intel_sdvo_read_byte(
struct psb_intel_output *psb_intel_output,
u8 addr, u8 *ch)
{
struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
u8 out_buf[2];
u8 buf[2];
int ret;
struct i2c_msg msgs[] = {
{
.addr = sdvo_priv->i2c_bus->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = sdvo_priv->i2c_bus->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
out_buf[0] = addr;
out_buf[1] = 0;
ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
if (ret == 2) {
/* DRM_DEBUG("got back from addr %02X = %02x\n",
* out_buf[0], buf[0]);
*/
*ch = buf[0];
return true;
}
DRM_DEBUG("i2c transfer returned %d\n", ret);
return false;
}
static bool psb_intel_sdvo_write_byte(
struct psb_intel_output *psb_intel_output,
int addr, u8 ch)
{
u8 out_buf[2];
struct i2c_msg msgs[] = {
{
.addr = psb_intel_output->i2c_bus->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
}
};
out_buf[0] = addr;
out_buf[1] = ch;
if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
return true;
return false;
}
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
u8 cmd;
char *name;
} sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY
(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY
(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
SDVO_CMD_NAME_ENTRY
(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
SDVO_CMD_NAME_ENTRY
(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
SDVO_CMD_NAME_ENTRY
(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY
(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY
(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY
(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
#define SDVO_NAME(dev_priv) \
((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output,
u8 cmd,
void *args,
int args_len)
{
struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
int i;
if (0) {
DRM_DEBUG("%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
for (i = 0; i < args_len; i++)
printk(KERN_INFO"%02X ", ((u8 *) args)[i]);
for (; i < 8; i++)
printk(" ");
for (i = 0;
i <
sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
printk("(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i ==
sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
printk("(%02X)", cmd);
printk("\n");
}
for (i = 0; i < args_len; i++) {
psb_intel_sdvo_write_byte(psb_intel_output,
SDVO_I2C_ARG_0 - i,
((u8 *) args)[i]);
}
psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
}
static const char *const cmd_status_names[] = {
"Power on",
"Success",
"Not supported",
"Invalid arg",
"Pending",
"Target not specified",
"Scaling not supported"
};
static u8 psb_intel_sdvo_read_response(
struct psb_intel_output *psb_intel_output,
void *response, int response_len)
{
struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
int i;
u8 status;
u8 retry = 50;
while (retry--) {
/* Read the command response */
for (i = 0; i < response_len; i++) {
psb_intel_sdvo_read_byte(psb_intel_output,
SDVO_I2C_RETURN_0 + i,
&((u8 *) response)[i]);
}
/* read the return status */
psb_intel_sdvo_read_byte(psb_intel_output,
SDVO_I2C_CMD_STATUS,
&status);
if (0) {
DRM_DEBUG("%s: R: ", SDVO_NAME(sdvo_priv));
for (i = 0; i < response_len; i++)
printk(KERN_INFO"%02X ", ((u8 *) response)[i]);
for (; i < 8; i++)
printk(" ");
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
printk(KERN_INFO"(%s)",
cmd_status_names[status]);
else
printk(KERN_INFO"(??? %d)", status);
printk("\n");
}
if (status != SDVO_CMD_STATUS_PENDING)
return status;
mdelay(50);
}
return status;
}
int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
{
if (mode->clock >= 100000)
return 1;
else if (mode->clock >= 50000)
return 2;
else
return 4;
}
/**
* Don't check status code from this as it switches the bus back to the
* SDVO chips which defeats the purpose of doing a bus switch in the first
* place.
*/
void psb_intel_sdvo_set_control_bus_switch(
struct psb_intel_output *psb_intel_output,
u8 target)
{
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&target,
1);
}
static bool psb_intel_sdvo_set_target_input(
struct psb_intel_output *psb_intel_output,
bool target_0, bool target_1)
{
struct psb_intel_sdvo_set_target_input_args targets = { 0 };
u8 status;
if (target_0 && target_1)
return SDVO_CMD_STATUS_NOTSUPP;
if (target_1)
targets.target_1 = 1;
psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
&targets, sizeof(targets));
status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
return status == SDVO_CMD_STATUS_SUCCESS;
}
/**
* Return whether each input is trained.
*
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
*psb_intel_output, bool *input_1,
bool *input_2)
{
struct psb_intel_sdvo_get_trained_inputs_response response;
u8 status;
psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
NULL, 0);
status =
psb_intel_sdvo_read_response(psb_intel_output, &response,
sizeof(response));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
*input_1 = response.input0_trained;
*input_2 = response.input1_trained;
return true;
}
static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
*psb_intel_output, u16 *outputs)
{
u8 status;
psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
NULL, 0);
status =
psb_intel_sdvo_read_response(psb_intel_output, outputs,
sizeof(*outputs));
return status == SDVO_CMD_STATUS_SUCCESS;
}
static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
*psb_intel_output, u16 outputs)
{
u8 status;
psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
&outputs, sizeof(outputs));
status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
return status == SDVO_CMD_STATUS_SUCCESS;
}
static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
*psb_intel_output, int mode)
{
u8 status, state = SDVO_ENCODER_STATE_ON;
switch (mode) {
case DRM_MODE_DPMS_ON:
state = SDVO_ENCODER_STATE_ON;
break;
case DRM_MODE_DPMS_STANDBY:
state = SDVO_ENCODER_STATE_STANDBY;
break;
case DRM_MODE_DPMS_SUSPEND:
state = SDVO_ENCODER_STATE_SUSPEND;
break;
case DRM_MODE_DPMS_OFF:
state = SDVO_ENCODER_STATE_OFF;
break;
}
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
sizeof(state));
status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
return status == SDVO_CMD_STATUS_SUCCESS;
}
static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
*psb_intel_output,
int *clock_min,
int *clock_max)
{
struct psb_intel_sdvo_pixel_clock_range clocks;
u8 status;
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
0);
status =
psb_intel_sdvo_read_response(psb_intel_output, &clocks,
sizeof(clocks));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
/* Convert the values from units of 10 kHz to kHz. */
*clock_min = clocks.min * 10;
*clock_max = clocks.max * 10;
return true;
}
static bool psb_intel_sdvo_set_target_output(
struct psb_intel_output *psb_intel_output,
u16 outputs)
{
u8 status;
psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
&outputs, sizeof(outputs));
status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
return status == SDVO_CMD_STATUS_SUCCESS;
}
static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
u8 cmd, struct psb_intel_sdvo_dtd *dtd)
{
u8 status;
psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
sizeof(dtd->part1));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
sizeof(dtd->part2));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
static bool psb_intel_sdvo_get_input_timing(
struct psb_intel_output *psb_intel_output,
struct psb_intel_sdvo_dtd *dtd)
{
return psb_intel_sdvo_get_timing(psb_intel_output,
SDVO_CMD_GET_INPUT_TIMINGS_PART1,
dtd);
}
static bool psb_intel_sdvo_set_timing(
struct psb_intel_output *psb_intel_output,
u8 cmd,
struct psb_intel_sdvo_dtd *dtd)
{
u8 status;
psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
sizeof(dtd->part1));
status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
sizeof(dtd->part2));
status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
static bool psb_intel_sdvo_set_input_timing(
struct psb_intel_output *psb_intel_output,
struct psb_intel_sdvo_dtd *dtd)
{
return psb_intel_sdvo_set_timing(psb_intel_output,
SDVO_CMD_SET_INPUT_TIMINGS_PART1,
dtd);
}
static bool psb_intel_sdvo_set_output_timing(
struct psb_intel_output *psb_intel_output,
struct psb_intel_sdvo_dtd *dtd)
{
return psb_intel_sdvo_set_timing(psb_intel_output,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
dtd);
}
static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
*psb_intel_output)
{
u8 response, status;
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_GET_CLOCK_RATE_MULT,
NULL,
0);
status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
if (status != SDVO_CMD_STATUS_SUCCESS) {
DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
return SDVO_CLOCK_RATE_MULT_1X;
} else {
DRM_DEBUG("Current clock rate multiplier: %d\n", response);
}
return response;
}
static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
*psb_intel_output, u8 val)
{
u8 status;
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_SET_CLOCK_RATE_MULT,
&val,
1);
status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output,
u32 in0outputmask,
u32 in1outputmask)
{
u8 byArgs[4];
u8 status;
int i;
struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
/* Make all fields of the args/ret to zero */
memset(byArgs, 0, sizeof(byArgs));
/* Fill up the argument values; */
byArgs[0] = (u8) (in0outputmask & 0xFF);
byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
byArgs[2] = (u8) (in1outputmask & 0xFF);
byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
/*save inoutmap arg here*/
for (i = 0; i < 4; i++)
sdvo_priv->in_out_map[i] = byArgs[0];
psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
status = psb_intel_sdvo_read_response(output, NULL, 0);
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output)
{
u32 dwCurrentSDVOIn0 = 0;
u32 dwCurrentSDVOIn1 = 0;
u32 dwDevMask = 0;
struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
/* Please DO NOT change the following code. */
/* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
/* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) {
switch (sdvo_priv->active_device) {
case SDVO_DEVICE_LVDS:
dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
break;
case SDVO_DEVICE_TMDS:
dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
break;
case SDVO_DEVICE_TV:
dwDevMask =
SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
break;
case SDVO_DEVICE_CRT:
dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
break;
}
dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
} else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) {
switch (sdvo_priv->active_device) {
case SDVO_DEVICE_LVDS:
dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
break;
case SDVO_DEVICE_TMDS:
dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
break;
case SDVO_DEVICE_TV:
dwDevMask =
SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
break;
case SDVO_DEVICE_CRT:
dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
break;
}
dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
}
psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
dwCurrentSDVOIn1);
}
static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
* device will be told of the multiplier during mode_set.
*/
adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
return true;
}
static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
struct psb_intel_output *psb_intel_output =
enc_to_psb_intel_output(encoder);
struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
u16 width, height;
u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
u16 h_sync_offset, v_sync_offset;
u32 sdvox;
struct psb_intel_sdvo_dtd output_dtd;
int sdvo_pixel_multiply;
if (!mode)
return;
psb_intel_sdvo_set_target_output(psb_intel_output, 0);
width = mode->crtc_hdisplay;
height = mode->crtc_vdisplay;
/* do some mode translations */
h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
output_dtd.part1.clock = mode->clock / 10;
output_dtd.part1.h_active = width & 0xff;
output_dtd.part1.h_blank = h_blank_len & 0xff;
output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
((h_blank_len >> 8) & 0xf);
output_dtd.part1.v_active = height & 0xff;
output_dtd.part1.v_blank = v_blank_len & 0xff;
output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
((v_blank_len >> 8) & 0xf);
output_dtd.part2.h_sync_off = h_sync_offset;
output_dtd.part2.h_sync_width = h_sync_len & 0xff;
output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
(v_sync_len & 0xf);
output_dtd.part2.sync_off_width_high =
((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
output_dtd.part2.dtd_flags = 0x18;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
output_dtd.part2.dtd_flags |= 0x2;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
output_dtd.part2.dtd_flags |= 0x4;
output_dtd.part2.sdvo_flags = 0;
output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
output_dtd.part2.reserved = 0;
/* Set the output timing to the screen */
psb_intel_sdvo_set_target_output(psb_intel_output,
sdvo_priv->active_outputs);
/* Set the input timing to the screen. Assume always input 0. */
psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
/* We would like to use i830_sdvo_create_preferred_input_timing() to
* provide the device with a timing it can support, if it supports that
* feature. However, presumably we would need to adjust the CRTC to
* output the preferred timing, and we don't support that currently.
*/
psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
case 1:
psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
SDVO_CLOCK_RATE_MULT_1X);
break;
case 2:
psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
SDVO_CLOCK_RATE_MULT_2X);
break;
case 4:
psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
SDVO_CLOCK_RATE_MULT_4X);
break;
}
/* Set the SDVO control regs. */
sdvox = REG_READ(sdvo_priv->output_device);
switch (sdvo_priv->output_device) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
case SDVOC:
sdvox &= SDVOC_PRESERVE_MASK;
break;
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
if (psb_intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
psb_intel_sdvo_set_iomap(psb_intel_output);
}
static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct psb_intel_output *psb_intel_output =
enc_to_psb_intel_output(encoder);
struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
u32 temp;
if (mode != DRM_MODE_DPMS_ON) {
psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
if (0)
psb_intel_sdvo_set_encoder_power_state(
psb_intel_output,
mode);
if (mode == DRM_MODE_DPMS_OFF) {
temp = REG_READ(sdvo_priv->output_device);
if ((temp & SDVO_ENABLE) != 0) {
psb_intel_sdvo_write_sdvox(psb_intel_output,
temp &
~SDVO_ENABLE);
}
}
} else {
bool input1, input2;
int i;
u8 status;
temp = REG_READ(sdvo_priv->output_device);
if ((temp & SDVO_ENABLE) == 0)
psb_intel_sdvo_write_sdvox(psb_intel_output,
temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
psb_intel_wait_for_vblank(dev);
status =
psb_intel_sdvo_get_trained_inputs(psb_intel_output,
&input1,
&input2);
/* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
DRM_DEBUG
("First %s output reported failure to sync\n",
SDVO_NAME(sdvo_priv));
}
if (0)
psb_intel_sdvo_set_encoder_power_state(
psb_intel_output,
mode);
psb_intel_sdvo_set_active_outputs(psb_intel_output,
sdvo_priv->active_outputs);
}
return;
}
static void psb_intel_sdvo_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct psb_intel_output *psb_intel_output =
to_psb_intel_output(connector);
struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
/*int o;*/
sdvo_priv->save_sdvo_mult =
psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
psb_intel_sdvo_get_active_outputs(psb_intel_output,
&sdvo_priv->save_active_outputs);
if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
psb_intel_sdvo_set_target_input(psb_intel_output,
true,
false);
psb_intel_sdvo_get_input_timing(psb_intel_output,
&sdvo_priv->save_input_dtd_1);
}
if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
psb_intel_sdvo_set_target_input(psb_intel_output,
false,
true);
psb_intel_sdvo_get_input_timing(psb_intel_output,
&sdvo_priv->save_input_dtd_2);
}
sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
/*TODO: save the in_out_map state*/
}
static void psb_intel_sdvo_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct psb_intel_output *psb_intel_output =
to_psb_intel_output(connector);
struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
/*int o;*/
int i;
bool input1, input2;
u8 status;
psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
psb_intel_sdvo_set_input_timing(psb_intel_output,
&sdvo_priv->save_input_dtd_1);
}
if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
psb_intel_sdvo_set_input_timing(psb_intel_output,
&sdvo_priv->save_input_dtd_2);
}
psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
sdvo_priv->save_sdvo_mult);
REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
for (i = 0; i < 2; i++)
psb_intel_wait_for_vblank(dev);
status =
psb_intel_sdvo_get_trained_inputs(psb_intel_output,
&input1,
&input2);
if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
DRM_DEBUG
("First %s output reported failure to sync\n",
SDVO_NAME(sdvo_priv));
}
psb_intel_sdvo_set_active_outputs(psb_intel_output,
sdvo_priv->save_active_outputs);
/*TODO: restore in_out_map*/
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_SET_IN_OUT_MAP,
sdvo_priv->in_out_map,
4);
psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
}
static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct psb_intel_output *psb_intel_output =
to_psb_intel_output(connector);
struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (sdvo_priv->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
if (sdvo_priv->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static bool psb_intel_sdvo_get_capabilities(
struct psb_intel_output *psb_intel_output,
struct psb_intel_sdvo_caps *caps)
{
u8 status;
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_GET_DEVICE_CAPS,
NULL,
0);
status = psb_intel_sdvo_read_response(psb_intel_output,
caps,
sizeof(*caps));
if (status != SDVO_CMD_STATUS_SUCCESS)
return false;
return true;
}
struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
struct psb_intel_output *iout = NULL;
struct psb_intel_sdvo_priv *sdvo;
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
iout = to_psb_intel_output(connector);
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
sdvo = iout->dev_priv;
if (sdvo->output_device == SDVOB && sdvoB)
return connector;
if (sdvo->output_device == SDVOC && !sdvoB)
return connector;
}
return NULL;
}
int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
u8 status;
struct psb_intel_output *psb_intel_output;
DRM_DEBUG("\n");
if (!connector)
return 0;
psb_intel_output = to_psb_intel_output(connector);
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_GET_HOT_PLUG_SUPPORT,
NULL,
0);
status = psb_intel_sdvo_read_response(psb_intel_output,
&response,
2);
if (response[0] != 0)
return 1;
return 0;
}
void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
{
u8 response[2];
u8 status;
struct psb_intel_output *psb_intel_output =
to_psb_intel_output(connector);
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_GET_ACTIVE_HOT_PLUG,
NULL,
0);
psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
if (on) {
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
0);
status = psb_intel_sdvo_read_response(psb_intel_output,
&response,
2);
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_SET_ACTIVE_HOT_PLUG,
&response, 2);
} else {
response[0] = 0;
response[1] = 0;
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_SET_ACTIVE_HOT_PLUG,
&response, 2);
}
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_GET_ACTIVE_HOT_PLUG,
NULL,
0);
psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
}
static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
*connector, bool force)
{
u8 response[2];
u8 status;
struct psb_intel_output *psb_intel_output =
to_psb_intel_output(connector);
psb_intel_sdvo_write_cmd(psb_intel_output,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
NULL,
0);
status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
if ((response[0] != 0) || (response[1] != 0))
return connector_status_connected;
else
return connector_status_disconnected;
}
static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
{
struct psb_intel_output *psb_intel_output =
to_psb_intel_output(connector);
/* set the bus switch and get the modes */
psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
SDVO_CONTROL_BUS_DDC2);
psb_intel_ddc_get_modes(psb_intel_output);
if (list_empty(&connector->probed_modes))
return 0;
return 1;
}
static void psb_intel_sdvo_destroy(struct drm_connector *connector)
{
struct psb_intel_output *psb_intel_output =
to_psb_intel_output(connector);
if (psb_intel_output->i2c_bus)
psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(psb_intel_output);
}
static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
.dpms = psb_intel_sdvo_dpms,
.mode_fixup = psb_intel_sdvo_mode_fixup,
.prepare = psb_intel_encoder_prepare,
.mode_set = psb_intel_sdvo_mode_set,
.commit = psb_intel_encoder_commit,
};
static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = psb_intel_sdvo_save,
.restore = psb_intel_sdvo_restore,
.detect = psb_intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = psb_intel_sdvo_destroy,
};
static const struct drm_connector_helper_funcs
psb_intel_sdvo_connector_helper_funcs = {
.get_modes = psb_intel_sdvo_get_modes,
.mode_valid = psb_intel_sdvo_mode_valid,
.best_encoder = psb_intel_best_encoder,
};
void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
}
static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
.destroy = psb_intel_sdvo_enc_destroy,
};
void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
{
struct drm_connector *connector;
struct psb_intel_output *psb_intel_output;
struct psb_intel_sdvo_priv *sdvo_priv;
struct psb_intel_i2c_chan *i2cbus = NULL;
int connector_type;
u8 ch[0x40];
int i;
int encoder_type, output_id;
psb_intel_output =
kcalloc(sizeof(struct psb_intel_output) +
sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
if (!psb_intel_output)
return;
connector = &psb_intel_output->base;
drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
DRM_MODE_CONNECTOR_Unknown);
drm_connector_helper_add(connector,
&psb_intel_sdvo_connector_helper_funcs);
sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
psb_intel_output->type = INTEL_OUTPUT_SDVO;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
/* setup the DDC bus. */
if (output_device == SDVOB)
i2cbus =
psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
else
i2cbus =
psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
if (!i2cbus)
goto err_connector;
sdvo_priv->i2c_bus = i2cbus;
if (output_device == SDVOB) {
output_id = 1;
sdvo_priv->by_input_wiring = SDVOB_IN0;
sdvo_priv->i2c_bus->slave_addr = 0x38;
} else {
output_id = 2;
sdvo_priv->i2c_bus->slave_addr = 0x39;
}
sdvo_priv->output_device = output_device;
psb_intel_output->i2c_bus = i2cbus;
psb_intel_output->dev_priv = sdvo_priv;
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
DRM_DEBUG("No SDVO device found on SDVO%c\n",
output_device == SDVOB ? 'B' : 'C');
goto err_i2c;
}
}
psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
memset(&sdvo_priv->active_outputs, 0,
sizeof(sdvo_priv->active_outputs));
/* TODO, CVBS, SVID, YPRPB & SCART outputs. */
if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
sdvo_priv->active_device = SDVO_DEVICE_CRT;
connector->display_info.subpixel_order =
SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_DAC;
connector_type = DRM_MODE_CONNECTOR_VGA;
} else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
sdvo_priv->active_outputs = SDVO_DEVICE_CRT;
connector->display_info.subpixel_order =
SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_DAC;
connector_type = DRM_MODE_CONNECTOR_VGA;
} else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
sdvo_priv->active_device = SDVO_DEVICE_TMDS;
connector->display_info.subpixel_order =
SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_TMDS;
connector_type = DRM_MODE_CONNECTOR_DVID;
} else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
sdvo_priv->active_device = SDVO_DEVICE_TMDS;
connector->display_info.subpixel_order =
SubPixelHorizontalRGB;
encoder_type = DRM_MODE_ENCODER_TMDS;
connector_type = DRM_MODE_CONNECTOR_DVID;
} else {
unsigned char bytes[2];
memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
DRM_DEBUG
("%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
goto err_i2c;
}
drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
encoder_type);
drm_encoder_helper_add(&psb_intel_output->enc,
&psb_intel_sdvo_helper_funcs);
connector->connector_type = connector_type;
drm_mode_connector_attach_encoder(&psb_intel_output->base,
&psb_intel_output->enc);
drm_sysfs_connector_add(connector);
/* Set the input timing to the screen. Assume always input 0. */
psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
&sdvo_priv->pixel_clock_min,
&sdvo_priv->
pixel_clock_max);
DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
SDVO_NAME(sdvo_priv),
sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
sdvo_priv->caps.device_rev_id,
sdvo_priv->pixel_clock_min / 1000,
sdvo_priv->pixel_clock_max / 1000,
(sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
(sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
/* check currently supported outputs */
sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
sdvo_priv->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
psb_intel_output->ddc_bus = i2cbus;
return;
err_i2c:
psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
err_connector:
drm_connector_cleanup(connector);
kfree(psb_intel_output);
return;
}
| gpl-2.0 |
MoKee/android_kernel_samsung_espresso10 | arch/powerpc/platforms/chrp/pci.c | 3921 | 10679 | /*
* CHRP pci routines.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
#include <asm/grackle.h>
#include <asm/rtas.h>
#include "chrp.h"
#include "gg2.h"
/* LongTrail */
void __iomem *gg2_pci_config_base;
/*
* The VLSI Golden Gate II has only 512K of PCI configuration space, so we
* limit the bus number to 3 bits
*/
int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
int len, u32 *val)
{
volatile void __iomem *cfg_data;
struct pci_controller *hose = pci_bus_to_host(bus);
if (bus->number > 7)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that off is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off);
switch (len) {
case 1:
*val = in_8(cfg_data);
break;
case 2:
*val = in_le16(cfg_data);
break;
default:
*val = in_le32(cfg_data);
break;
}
return PCIBIOS_SUCCESSFUL;
}
int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
int len, u32 val)
{
volatile void __iomem *cfg_data;
struct pci_controller *hose = pci_bus_to_host(bus);
if (bus->number > 7)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that off is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off);
switch (len) {
case 1:
out_8(cfg_data, val);
break;
case 2:
out_le16(cfg_data, val);
break;
default:
out_le32(cfg_data, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops gg2_pci_ops =
{
.read = gg2_read_config,
.write = gg2_write_config,
};
/*
* Access functions for PCI config space using RTAS calls.
*/
int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
| (((bus->number - hose->first_busno) & 0xff) << 16)
| (hose->global_number << 24);
int ret = -1;
int rval;
rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len);
*val = ret;
return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
}
int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
| (((bus->number - hose->first_busno) & 0xff) << 16)
| (hose->global_number << 24);
int rval;
rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL,
addr, len, val);
return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
}
static struct pci_ops rtas_pci_ops =
{
.read = rtas_read_config,
.write = rtas_write_config,
};
volatile struct Hydra __iomem *Hydra = NULL;
int __init
hydra_init(void)
{
struct device_node *np;
struct resource r;
np = of_find_node_by_name(NULL, "mac-io");
if (np == NULL || of_address_to_resource(np, 0, &r)) {
of_node_put(np);
return 0;
}
of_node_put(np);
Hydra = ioremap(r.start, r.end-r.start);
printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start);
printk("Hydra Feature_Control was %x",
in_le32(&Hydra->Feature_Control));
out_le32(&Hydra->Feature_Control, (HYDRA_FC_SCC_CELL_EN |
HYDRA_FC_SCSI_CELL_EN |
HYDRA_FC_SCCA_ENABLE |
HYDRA_FC_SCCB_ENABLE |
HYDRA_FC_ARB_BYPASS |
HYDRA_FC_MPIC_ENABLE |
HYDRA_FC_SLOW_SCC_PCLK |
HYDRA_FC_MPIC_IS_MASTER));
printk(", now %x\n", in_le32(&Hydra->Feature_Control));
return 1;
}
#define PRG_CL_RESET_VALID 0x00010000
static void __init
setup_python(struct pci_controller *hose, struct device_node *dev)
{
u32 __iomem *reg;
u32 val;
struct resource r;
if (of_address_to_resource(dev, 0, &r)) {
printk(KERN_ERR "No address for Python PCI controller\n");
return;
}
/* Clear the magic go-slow bit */
reg = ioremap(r.start + 0xf6000, 0x40);
BUG_ON(!reg);
val = in_be32(®[12]);
if (val & PRG_CL_RESET_VALID) {
out_be32(®[12], val & ~PRG_CL_RESET_VALID);
in_be32(®[12]);
}
iounmap(reg);
setup_indirect_pci(hose, r.start + 0xf8000, r.start + 0xf8010, 0);
}
/* Marvell Discovery II based Pegasos 2 */
static void __init setup_peg2(struct pci_controller *hose, struct device_node *dev)
{
struct device_node *root = of_find_node_by_path("/");
struct device_node *rtas;
rtas = of_find_node_by_name (root, "rtas");
if (rtas) {
hose->ops = &rtas_pci_ops;
of_node_put(rtas);
} else {
printk ("RTAS supporting Pegasos OF not found, please upgrade"
" your firmware\n");
}
ppc_pci_add_flags(PPC_PCI_REASSIGN_ALL_BUS);
/* keep the reference to the root node */
}
void __init
chrp_find_bridges(void)
{
struct device_node *dev;
const int *bus_range;
int len, index = -1;
struct pci_controller *hose;
const unsigned int *dma;
const char *model, *machine;
int is_longtrail = 0, is_mot = 0, is_pegasos = 0;
struct device_node *root = of_find_node_by_path("/");
struct resource r;
/*
* The PCI host bridge nodes on some machines don't have
* properties to adequately identify them, so we have to
* look at what sort of machine this is as well.
*/
machine = of_get_property(root, "model", NULL);
if (machine != NULL) {
is_longtrail = strncmp(machine, "IBM,LongTrail", 13) == 0;
is_mot = strncmp(machine, "MOT", 3) == 0;
if (strncmp(machine, "Pegasos2", 8) == 0)
is_pegasos = 2;
else if (strncmp(machine, "Pegasos", 7) == 0)
is_pegasos = 1;
}
for (dev = root->child; dev != NULL; dev = dev->sibling) {
if (dev->type == NULL || strcmp(dev->type, "pci") != 0)
continue;
++index;
/* The GG2 bridge on the LongTrail doesn't have an address */
if (of_address_to_resource(dev, 0, &r) && !is_longtrail) {
printk(KERN_WARNING "Can't use %s: no address\n",
dev->full_name);
continue;
}
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %s\n",
dev->full_name);
continue;
}
if (bus_range[1] == bus_range[0])
printk(KERN_INFO "PCI bus %d", bus_range[0]);
else
printk(KERN_INFO "PCI buses %d..%d",
bus_range[0], bus_range[1]);
printk(" controlled by %s", dev->full_name);
if (!is_longtrail)
printk(" at %llx", (unsigned long long)r.start);
printk("\n");
hose = pcibios_alloc_controller(dev);
if (!hose) {
printk("Can't allocate PCI controller structure for %s\n",
dev->full_name);
continue;
}
hose->first_busno = hose->self_busno = bus_range[0];
hose->last_busno = bus_range[1];
model = of_get_property(dev, "model", NULL);
if (model == NULL)
model = "<none>";
if (strncmp(model, "IBM, Python", 11) == 0) {
setup_python(hose, dev);
} else if (is_mot
|| strncmp(model, "Motorola, Grackle", 17) == 0) {
setup_grackle(hose);
} else if (is_longtrail) {
void __iomem *p = ioremap(GG2_PCI_CONFIG_BASE, 0x80000);
hose->ops = &gg2_pci_ops;
hose->cfg_data = p;
gg2_pci_config_base = p;
} else if (is_pegasos == 1) {
setup_indirect_pci(hose, 0xfec00cf8, 0xfee00cfc, 0);
} else if (is_pegasos == 2) {
setup_peg2(hose, dev);
} else if (!strncmp(model, "IBM,CPC710", 10)) {
setup_indirect_pci(hose,
r.start + 0x000f8000,
r.start + 0x000f8010,
0);
if (index == 0) {
dma = of_get_property(dev, "system-dma-base",
&len);
if (dma && len >= sizeof(*dma)) {
dma = (unsigned int *)
(((unsigned long)dma) +
len - sizeof(*dma));
pci_dram_offset = *dma;
}
}
} else {
printk("No methods for %s (model %s), using RTAS\n",
dev->full_name, model);
hose->ops = &rtas_pci_ops;
}
pci_process_bridge_OF_ranges(hose, dev, index == 0);
/* check the first bridge for a property that we can
use to set pci_dram_offset */
dma = of_get_property(dev, "ibm,dma-ranges", &len);
if (index == 0 && dma != NULL && len >= 6 * sizeof(*dma)) {
pci_dram_offset = dma[2] - dma[3];
printk("pci_dram_offset = %lx\n", pci_dram_offset);
}
}
of_node_put(root);
}
/* SL82C105 IDE Control/Status Register */
#define SL82C105_IDECSR 0x40
/* Fixup for Winbond ATA quirk, required for briq mostly because the
* 8259 is configured for level sensitive IRQ 14 and so wants the
* ATA controller to be set to fully native mode or bad things
* will happen.
*/
static void __devinit chrp_pci_fixup_winbond_ata(struct pci_dev *sl82c105)
{
u8 progif;
/* If non-briq machines need that fixup too, please speak up */
if (!machine_is(chrp) || _chrp_type != _CHRP_briq)
return;
if ((sl82c105->class & 5) != 5) {
printk("W83C553: Switching SL82C105 IDE to PCI native mode\n");
/* Enable SL82C105 PCI native IDE mode */
pci_read_config_byte(sl82c105, PCI_CLASS_PROG, &progif);
pci_write_config_byte(sl82c105, PCI_CLASS_PROG, progif | 0x05);
sl82c105->class |= 0x05;
/* Disable SL82C105 second port */
pci_write_config_word(sl82c105, SL82C105_IDECSR, 0x0003);
/* Clear IO BARs, they will be reassigned */
pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_0, 0);
pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_1, 0);
pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_2, 0);
pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_3, 0);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
chrp_pci_fixup_winbond_ata);
/* Pegasos2 firmware version 20040810 configures the built-in IDE controller
* in legacy mode, but sets the PCI registers to PCI native mode.
* The chip can only operate in legacy mode, so force the PCI class into legacy
* mode as well. The same fixup must be done to the class-code property in
* the IDE node /pci@80000000/ide@C,1
*/
static void chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide)
{
u8 progif;
struct pci_dev *viaisa;
if (!machine_is(chrp) || _chrp_type != _CHRP_Pegasos)
return;
if (viaide->irq != 14)
return;
viaisa = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
if (!viaisa)
return;
dev_info(&viaide->dev, "Fixing VIA IDE, force legacy mode on\n");
pci_read_config_byte(viaide, PCI_CLASS_PROG, &progif);
pci_write_config_byte(viaide, PCI_CLASS_PROG, progif & ~0x5);
viaide->class &= ~0x5;
pci_dev_put(viaisa);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, chrp_pci_fixup_vt8231_ata);
| gpl-2.0 |
IKGapirov/android_kernel_SM-G800H | drivers/i2c/i2c-dev.c | 4433 | 17957 | /*
i2c-dev.c - i2c-bus driver, char device interface
Copyright (C) 1995-97 Simon G. Vogl
Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA.
*/
/* Note that this is a complete rewrite of Simon Vogl's i2c-dev module.
But I have used so much of his original code and ideas that it seems
only fair to recognize him as co-author -- Frodo */
/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
/*
* An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a
* slave (i2c_client) with which messages will be exchanged. It's coupled
* with a character special file which is accessed by user mode drivers.
*
* The list of i2c_dev structures is parallel to the i2c_adapter lists
* maintained by the driver model, and is updated using bus notifications.
*/
struct i2c_dev {
struct list_head list;
struct i2c_adapter *adap;
struct device *dev;
};
#define I2C_MINORS 256
static LIST_HEAD(i2c_dev_list);
static DEFINE_SPINLOCK(i2c_dev_list_lock);
static struct i2c_dev *i2c_dev_get_by_minor(unsigned index)
{
struct i2c_dev *i2c_dev;
spin_lock(&i2c_dev_list_lock);
list_for_each_entry(i2c_dev, &i2c_dev_list, list) {
if (i2c_dev->adap->nr == index)
goto found;
}
i2c_dev = NULL;
found:
spin_unlock(&i2c_dev_list_lock);
return i2c_dev;
}
static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
{
struct i2c_dev *i2c_dev;
if (adap->nr >= I2C_MINORS) {
printk(KERN_ERR "i2c-dev: Out of device minors (%d)\n",
adap->nr);
return ERR_PTR(-ENODEV);
}
i2c_dev = kzalloc(sizeof(*i2c_dev), GFP_KERNEL);
if (!i2c_dev)
return ERR_PTR(-ENOMEM);
i2c_dev->adap = adap;
spin_lock(&i2c_dev_list_lock);
list_add_tail(&i2c_dev->list, &i2c_dev_list);
spin_unlock(&i2c_dev_list_lock);
return i2c_dev;
}
static void return_i2c_dev(struct i2c_dev *i2c_dev)
{
spin_lock(&i2c_dev_list_lock);
list_del(&i2c_dev->list);
spin_unlock(&i2c_dev_list_lock);
kfree(i2c_dev);
}
static ssize_t show_adapter_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt));
if (!i2c_dev)
return -ENODEV;
return sprintf(buf, "%s\n", i2c_dev->adap->name);
}
static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
/* ------------------------------------------------------------------------- */
/*
* After opening an instance of this character special file, a file
* descriptor starts out associated only with an i2c_adapter (and bus).
*
* Using the I2C_RDWR ioctl(), you can then *immediately* issue i2c_msg
* traffic to any devices on the bus used by that adapter. That's because
* the i2c_msg vectors embed all the addressing information they need, and
* are submitted directly to an i2c_adapter. However, SMBus-only adapters
* don't support that interface.
*
* To use read()/write() system calls on that file descriptor, or to use
* SMBus interfaces (and work with SMBus-only hosts!), you must first issue
* an I2C_SLAVE (or I2C_SLAVE_FORCE) ioctl. That configures an anonymous
* (never registered) i2c_client so it holds the addressing information
* needed by those system calls and by this SMBus interface.
*/
static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
loff_t *offset)
{
char *tmp;
int ret;
struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
tmp = kmalloc(count, GFP_KERNEL);
if (tmp == NULL)
return -ENOMEM;
pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
kfree(tmp);
return ret;
}
static ssize_t i2cdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
int ret;
char *tmp;
struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
tmp = memdup_user(buf, count);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
iminor(file->f_path.dentry->d_inode), count);
ret = i2c_master_send(client, tmp, count);
kfree(tmp);
return ret;
}
static int i2cdev_check(struct device *dev, void *addrp)
{
struct i2c_client *client = i2c_verify_client(dev);
if (!client || client->addr != *(unsigned int *)addrp)
return 0;
return dev->driver ? -EBUSY : 0;
}
/* walk up mux tree */
static int i2cdev_check_mux_parents(struct i2c_adapter *adapter, int addr)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
int result;
result = device_for_each_child(&adapter->dev, &addr, i2cdev_check);
if (!result && parent)
result = i2cdev_check_mux_parents(parent, addr);
return result;
}
/* recurse down mux tree */
static int i2cdev_check_mux_children(struct device *dev, void *addrp)
{
int result;
if (dev->type == &i2c_adapter_type)
result = device_for_each_child(dev, addrp,
i2cdev_check_mux_children);
else
result = i2cdev_check(dev, addrp);
return result;
}
/* This address checking function differs from the one in i2c-core
in that it considers an address with a registered device, but no
driver bound to it, as NOT busy. */
static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
int result = 0;
if (parent)
result = i2cdev_check_mux_parents(parent, addr);
if (!result)
result = device_for_each_child(&adapter->dev, &addr,
i2cdev_check_mux_children);
return result;
}
static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
unsigned long arg)
{
struct i2c_rdwr_ioctl_data rdwr_arg;
struct i2c_msg *rdwr_pa;
u8 __user **data_ptrs;
int i, res;
if (copy_from_user(&rdwr_arg,
(struct i2c_rdwr_ioctl_data __user *)arg,
sizeof(rdwr_arg)))
return -EFAULT;
/* Put an arbitrary limit on the number of messages that can
* be sent at once */
if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
return -EINVAL;
rdwr_pa = memdup_user(rdwr_arg.msgs,
rdwr_arg.nmsgs * sizeof(struct i2c_msg));
if (IS_ERR(rdwr_pa))
return PTR_ERR(rdwr_pa);
data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
if (data_ptrs == NULL) {
kfree(rdwr_pa);
return -ENOMEM;
}
res = 0;
for (i = 0; i < rdwr_arg.nmsgs; i++) {
/* Limit the size of the message to a sane amount;
* and don't let length change either. */
if ((rdwr_pa[i].len > 8192) ||
(rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
res = -EINVAL;
break;
}
data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
if (IS_ERR(rdwr_pa[i].buf)) {
res = PTR_ERR(rdwr_pa[i].buf);
break;
}
}
if (res < 0) {
int j;
for (j = 0; j < i; ++j)
kfree(rdwr_pa[j].buf);
kfree(data_ptrs);
kfree(rdwr_pa);
return res;
}
res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs);
while (i-- > 0) {
if (res >= 0 && (rdwr_pa[i].flags & I2C_M_RD)) {
if (copy_to_user(data_ptrs[i], rdwr_pa[i].buf,
rdwr_pa[i].len))
res = -EFAULT;
}
kfree(rdwr_pa[i].buf);
}
kfree(data_ptrs);
kfree(rdwr_pa);
return res;
}
static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
unsigned long arg)
{
struct i2c_smbus_ioctl_data data_arg;
union i2c_smbus_data temp;
int datasize, res;
if (copy_from_user(&data_arg,
(struct i2c_smbus_ioctl_data __user *) arg,
sizeof(struct i2c_smbus_ioctl_data)))
return -EFAULT;
if ((data_arg.size != I2C_SMBUS_BYTE) &&
(data_arg.size != I2C_SMBUS_QUICK) &&
(data_arg.size != I2C_SMBUS_BYTE_DATA) &&
(data_arg.size != I2C_SMBUS_WORD_DATA) &&
(data_arg.size != I2C_SMBUS_PROC_CALL) &&
(data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
(data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
(data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
(data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
dev_dbg(&client->adapter->dev,
"size out of range (%x) in ioctl I2C_SMBUS.\n",
data_arg.size);
return -EINVAL;
}
/* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1,
so the check is valid if size==I2C_SMBUS_QUICK too. */
if ((data_arg.read_write != I2C_SMBUS_READ) &&
(data_arg.read_write != I2C_SMBUS_WRITE)) {
dev_dbg(&client->adapter->dev,
"read_write out of range (%x) in ioctl I2C_SMBUS.\n",
data_arg.read_write);
return -EINVAL;
}
/* Note that command values are always valid! */
if ((data_arg.size == I2C_SMBUS_QUICK) ||
((data_arg.size == I2C_SMBUS_BYTE) &&
(data_arg.read_write == I2C_SMBUS_WRITE)))
/* These are special: we do not use data */
return i2c_smbus_xfer(client->adapter, client->addr,
client->flags, data_arg.read_write,
data_arg.command, data_arg.size, NULL);
if (data_arg.data == NULL) {
dev_dbg(&client->adapter->dev,
"data is NULL pointer in ioctl I2C_SMBUS.\n");
return -EINVAL;
}
if ((data_arg.size == I2C_SMBUS_BYTE_DATA) ||
(data_arg.size == I2C_SMBUS_BYTE))
datasize = sizeof(data_arg.data->byte);
else if ((data_arg.size == I2C_SMBUS_WORD_DATA) ||
(data_arg.size == I2C_SMBUS_PROC_CALL))
datasize = sizeof(data_arg.data->word);
else /* size == smbus block, i2c block, or block proc. call */
datasize = sizeof(data_arg.data->block);
if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
(data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
(data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) ||
(data_arg.read_write == I2C_SMBUS_WRITE)) {
if (copy_from_user(&temp, data_arg.data, datasize))
return -EFAULT;
}
if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
/* Convert old I2C block commands to the new
convention. This preserves binary compatibility. */
data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA;
if (data_arg.read_write == I2C_SMBUS_READ)
temp.block[0] = I2C_SMBUS_BLOCK_MAX;
}
res = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
data_arg.read_write, data_arg.command, data_arg.size, &temp);
if (!res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
(data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
(data_arg.read_write == I2C_SMBUS_READ))) {
if (copy_to_user(data_arg.data, &temp, datasize))
return -EFAULT;
}
return res;
}
static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct i2c_client *client = file->private_data;
unsigned long funcs;
dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
cmd, arg);
switch (cmd) {
case I2C_SLAVE:
case I2C_SLAVE_FORCE:
/* NOTE: devices set up to work with "new style" drivers
* can't use I2C_SLAVE, even when the device node is not
* bound to a driver. Only I2C_SLAVE_FORCE will work.
*
* Setting the PEC flag here won't affect kernel drivers,
* which will be using the i2c_client node registered with
* the driver model core. Likewise, when that client has
* the PEC flag already set, the i2c-dev driver won't see
* (or use) this setting.
*/
if ((arg > 0x3ff) ||
(((client->flags & I2C_M_TEN) == 0) && arg > 0x7f))
return -EINVAL;
if (cmd == I2C_SLAVE && i2cdev_check_addr(client->adapter, arg))
return -EBUSY;
/* REVISIT: address could become busy later */
client->addr = arg;
return 0;
case I2C_TENBIT:
if (arg)
client->flags |= I2C_M_TEN;
else
client->flags &= ~I2C_M_TEN;
return 0;
case I2C_PEC:
if (arg)
client->flags |= I2C_CLIENT_PEC;
else
client->flags &= ~I2C_CLIENT_PEC;
return 0;
case I2C_FUNCS:
funcs = i2c_get_functionality(client->adapter);
return put_user(funcs, (unsigned long __user *)arg);
case I2C_RDWR:
return i2cdev_ioctl_rdrw(client, arg);
case I2C_SMBUS:
return i2cdev_ioctl_smbus(client, arg);
case I2C_RETRIES:
client->adapter->retries = arg;
break;
case I2C_TIMEOUT:
/* For historical reasons, user-space sets the timeout
* value in units of 10 ms.
*/
client->adapter->timeout = msecs_to_jiffies(arg * 10);
break;
default:
/* NOTE: returning a fault code here could cause trouble
* in buggy userspace code. Some old kernel bugs returned
* zero in this case, and userspace code might accidentally
* have depended on that bug.
*/
return -ENOTTY;
}
return 0;
}
static int i2cdev_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct i2c_client *client;
struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
i2c_dev = i2c_dev_get_by_minor(minor);
if (!i2c_dev)
return -ENODEV;
adap = i2c_get_adapter(i2c_dev->adap->nr);
if (!adap)
return -ENODEV;
/* This creates an anonymous i2c_client, which may later be
* pointed to some address using I2C_SLAVE or I2C_SLAVE_FORCE.
*
* This client is ** NEVER REGISTERED ** with the driver model
* or I2C core code!! It just holds private copies of addressing
* information and maybe a PEC flag.
*/
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client) {
i2c_put_adapter(adap);
return -ENOMEM;
}
snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr);
client->adapter = adap;
file->private_data = client;
return 0;
}
static int i2cdev_release(struct inode *inode, struct file *file)
{
struct i2c_client *client = file->private_data;
i2c_put_adapter(client->adapter);
kfree(client);
file->private_data = NULL;
return 0;
}
static const struct file_operations i2cdev_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = i2cdev_read,
.write = i2cdev_write,
.unlocked_ioctl = i2cdev_ioctl,
.open = i2cdev_open,
.release = i2cdev_release,
};
/* ------------------------------------------------------------------------- */
static struct class *i2c_dev_class;
static int i2cdev_attach_adapter(struct device *dev, void *dummy)
{
struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
int res;
if (dev->type != &i2c_adapter_type)
return 0;
adap = to_i2c_adapter(dev);
i2c_dev = get_free_i2c_dev(adap);
if (IS_ERR(i2c_dev))
return PTR_ERR(i2c_dev);
/* register this i2c device with the driver core */
i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
MKDEV(I2C_MAJOR, adap->nr), NULL,
"i2c-%d", adap->nr);
if (IS_ERR(i2c_dev->dev)) {
res = PTR_ERR(i2c_dev->dev);
goto error;
}
res = device_create_file(i2c_dev->dev, &dev_attr_name);
if (res)
goto error_destroy;
pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
adap->name, adap->nr);
return 0;
error_destroy:
device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
error:
return_i2c_dev(i2c_dev);
return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
{
struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
if (dev->type != &i2c_adapter_type)
return 0;
adap = to_i2c_adapter(dev);
i2c_dev = i2c_dev_get_by_minor(adap->nr);
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
device_remove_file(i2c_dev->dev, &dev_attr_name);
return_i2c_dev(i2c_dev);
device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
return 0;
}
static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
return i2cdev_attach_adapter(dev, NULL);
case BUS_NOTIFY_DEL_DEVICE:
return i2cdev_detach_adapter(dev, NULL);
}
return 0;
}
static struct notifier_block i2cdev_notifier = {
.notifier_call = i2cdev_notifier_call,
};
/* ------------------------------------------------------------------------- */
/*
* module load/unload record keeping
*/
static int __init i2c_dev_init(void)
{
int res;
printk(KERN_INFO "i2c /dev entries driver\n");
res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops);
if (res)
goto out;
i2c_dev_class = class_create(THIS_MODULE, "i2c-dev");
if (IS_ERR(i2c_dev_class)) {
res = PTR_ERR(i2c_dev_class);
goto out_unreg_chrdev;
}
/* Keep track of adapters which will be added or removed later */
res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
if (res)
goto out_unreg_class;
/* Bind to already existing adapters right away */
i2c_for_each_dev(NULL, i2cdev_attach_adapter);
return 0;
out_unreg_class:
class_destroy(i2c_dev_class);
out_unreg_chrdev:
unregister_chrdev(I2C_MAJOR, "i2c");
out:
printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__);
return res;
}
static void __exit i2c_dev_exit(void)
{
bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
i2c_for_each_dev(NULL, i2cdev_detach_adapter);
class_destroy(i2c_dev_class);
unregister_chrdev(I2C_MAJOR, "i2c");
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
"Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C /dev entries driver");
MODULE_LICENSE("GPL");
module_init(i2c_dev_init);
module_exit(i2c_dev_exit);
| gpl-2.0 |
vantjnh1991/F160-JB | arch/mips/jz4740/gpio.c | 4945 | 13857 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 platform GPIO support
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/mach-jz4740/base.h>
#include "irq.h"
#define JZ4740_GPIO_BASE_A (32*0)
#define JZ4740_GPIO_BASE_B (32*1)
#define JZ4740_GPIO_BASE_C (32*2)
#define JZ4740_GPIO_BASE_D (32*3)
#define JZ4740_GPIO_NUM_A 32
#define JZ4740_GPIO_NUM_B 32
#define JZ4740_GPIO_NUM_C 31
#define JZ4740_GPIO_NUM_D 32
#define JZ4740_IRQ_GPIO_BASE_A (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_A)
#define JZ4740_IRQ_GPIO_BASE_B (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_B)
#define JZ4740_IRQ_GPIO_BASE_C (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_C)
#define JZ4740_IRQ_GPIO_BASE_D (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_D)
#define JZ_REG_GPIO_PIN 0x00
#define JZ_REG_GPIO_DATA 0x10
#define JZ_REG_GPIO_DATA_SET 0x14
#define JZ_REG_GPIO_DATA_CLEAR 0x18
#define JZ_REG_GPIO_MASK 0x20
#define JZ_REG_GPIO_MASK_SET 0x24
#define JZ_REG_GPIO_MASK_CLEAR 0x28
#define JZ_REG_GPIO_PULL 0x30
#define JZ_REG_GPIO_PULL_SET 0x34
#define JZ_REG_GPIO_PULL_CLEAR 0x38
#define JZ_REG_GPIO_FUNC 0x40
#define JZ_REG_GPIO_FUNC_SET 0x44
#define JZ_REG_GPIO_FUNC_CLEAR 0x48
#define JZ_REG_GPIO_SELECT 0x50
#define JZ_REG_GPIO_SELECT_SET 0x54
#define JZ_REG_GPIO_SELECT_CLEAR 0x58
#define JZ_REG_GPIO_DIRECTION 0x60
#define JZ_REG_GPIO_DIRECTION_SET 0x64
#define JZ_REG_GPIO_DIRECTION_CLEAR 0x68
#define JZ_REG_GPIO_TRIGGER 0x70
#define JZ_REG_GPIO_TRIGGER_SET 0x74
#define JZ_REG_GPIO_TRIGGER_CLEAR 0x78
#define JZ_REG_GPIO_FLAG 0x80
#define JZ_REG_GPIO_FLAG_CLEAR 0x14
#define GPIO_TO_BIT(gpio) BIT(gpio & 0x1f)
#define GPIO_TO_REG(gpio, reg) (gpio_to_jz_gpio_chip(gpio)->base + (reg))
#define CHIP_TO_REG(chip, reg) (gpio_chip_to_jz_gpio_chip(chip)->base + (reg))
struct jz_gpio_chip {
unsigned int irq;
unsigned int irq_base;
uint32_t edge_trigger_both;
void __iomem *base;
struct gpio_chip gpio_chip;
};
static struct jz_gpio_chip jz4740_gpio_chips[];
static inline struct jz_gpio_chip *gpio_to_jz_gpio_chip(unsigned int gpio)
{
return &jz4740_gpio_chips[gpio >> 5];
}
static inline struct jz_gpio_chip *gpio_chip_to_jz_gpio_chip(struct gpio_chip *gpio_chip)
{
return container_of(gpio_chip, struct jz_gpio_chip, gpio_chip);
}
static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
return gc->private;
}
static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg)
{
writel(GPIO_TO_BIT(gpio), GPIO_TO_REG(gpio, reg));
}
int jz_gpio_set_function(int gpio, enum jz_gpio_function function)
{
if (function == JZ_GPIO_FUNC_NONE) {
jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_CLEAR);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
} else {
jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_SET);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
switch (function) {
case JZ_GPIO_FUNC1:
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
break;
case JZ_GPIO_FUNC3:
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_SET);
case JZ_GPIO_FUNC2: /* Falltrough */
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_SET);
break;
default:
BUG();
break;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(jz_gpio_set_function);
int jz_gpio_bulk_request(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
int ret;
for (i = 0; i < num; ++i, ++request) {
ret = gpio_request(request->gpio, request->name);
if (ret)
goto err;
jz_gpio_set_function(request->gpio, request->function);
}
return 0;
err:
for (--request; i > 0; --i, --request) {
gpio_free(request->gpio);
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
}
return ret;
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_request);
void jz_gpio_bulk_free(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request) {
gpio_free(request->gpio);
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
}
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_free);
void jz_gpio_bulk_suspend(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request) {
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_PULL_SET);
}
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_suspend);
void jz_gpio_bulk_resume(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request)
jz_gpio_set_function(request->gpio, request->function);
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_resume);
void jz_gpio_enable_pullup(unsigned gpio)
{
jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_CLEAR);
}
EXPORT_SYMBOL_GPL(jz_gpio_enable_pullup);
void jz_gpio_disable_pullup(unsigned gpio)
{
jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_SET);
}
EXPORT_SYMBOL_GPL(jz_gpio_disable_pullup);
static int jz_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
return !!(readl(CHIP_TO_REG(chip, JZ_REG_GPIO_PIN)) & BIT(gpio));
}
static void jz_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value)
{
uint32_t __iomem *reg = CHIP_TO_REG(chip, JZ_REG_GPIO_DATA_SET);
reg += !value;
writel(BIT(gpio), reg);
}
static int jz_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_SET));
jz_gpio_set_value(chip, gpio, value);
return 0;
}
static int jz_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_CLEAR));
return 0;
}
int jz_gpio_port_direction_input(int port, uint32_t mask)
{
writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR));
return 0;
}
EXPORT_SYMBOL(jz_gpio_port_direction_input);
int jz_gpio_port_direction_output(int port, uint32_t mask)
{
writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_SET));
return 0;
}
EXPORT_SYMBOL(jz_gpio_port_direction_output);
void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask)
{
writel(~value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_CLEAR));
writel(value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_SET));
}
EXPORT_SYMBOL(jz_gpio_port_set_value);
uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
{
uint32_t value = readl(GPIO_TO_REG(port, JZ_REG_GPIO_PIN));
return value & mask;
}
EXPORT_SYMBOL(jz_gpio_port_get_value);
int gpio_to_irq(unsigned gpio)
{
return JZ4740_IRQ_GPIO(0) + gpio;
}
EXPORT_SYMBOL_GPL(gpio_to_irq);
int irq_to_gpio(unsigned irq)
{
return irq - JZ4740_IRQ_GPIO(0);
}
EXPORT_SYMBOL_GPL(irq_to_gpio);
#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
{
uint32_t value;
void __iomem *reg;
uint32_t mask = IRQ_TO_BIT(irq);
if (!(chip->edge_trigger_both & mask))
return;
reg = chip->base;
value = readl(chip->base + JZ_REG_GPIO_PIN);
if (value & mask)
reg += JZ_REG_GPIO_DIRECTION_CLEAR;
else
reg += JZ_REG_GPIO_DIRECTION_SET;
writel(mask, reg);
}
static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
{
uint32_t flag;
unsigned int gpio_irq;
struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc);
flag = readl(chip->base + JZ_REG_GPIO_FLAG);
if (!flag)
return;
gpio_irq = chip->irq_base + __fls(flag);
jz_gpio_check_trigger_both(chip, gpio_irq);
generic_handle_irq(gpio_irq);
};
static inline void jz_gpio_set_irq_bit(struct irq_data *data, unsigned int reg)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
writel(IRQ_TO_BIT(data->irq), chip->base + reg);
}
static void jz_gpio_irq_unmask(struct irq_data *data)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
jz_gpio_check_trigger_both(chip, data->irq);
irq_gc_unmask_enable_reg(data);
};
/* TODO: Check if function is gpio */
static unsigned int jz_gpio_irq_startup(struct irq_data *data)
{
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_SET);
jz_gpio_irq_unmask(data);
return 0;
}
static void jz_gpio_irq_shutdown(struct irq_data *data)
{
irq_gc_mask_disable_reg(data);
/* Set direction to input */
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_CLEAR);
}
static int jz_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
unsigned int irq = data->irq;
if (flow_type == IRQ_TYPE_EDGE_BOTH) {
uint32_t value = readl(chip->base + JZ_REG_GPIO_PIN);
if (value & IRQ_TO_BIT(irq))
flow_type = IRQ_TYPE_EDGE_FALLING;
else
flow_type = IRQ_TYPE_EDGE_RISING;
chip->edge_trigger_both |= IRQ_TO_BIT(irq);
} else {
chip->edge_trigger_both &= ~IRQ_TO_BIT(irq);
}
switch (flow_type) {
case IRQ_TYPE_EDGE_RISING:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
break;
case IRQ_TYPE_EDGE_FALLING:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
break;
case IRQ_TYPE_LEVEL_HIGH:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
break;
case IRQ_TYPE_LEVEL_LOW:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
break;
default:
return -EINVAL;
}
return 0;
}
static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
irq_gc_set_wake(data, on);
irq_set_irq_wake(chip->irq, on);
return 0;
}
#define JZ4740_GPIO_CHIP(_bank) { \
.irq_base = JZ4740_IRQ_GPIO_BASE_ ## _bank, \
.gpio_chip = { \
.label = "Bank " # _bank, \
.owner = THIS_MODULE, \
.set = jz_gpio_set_value, \
.get = jz_gpio_get_value, \
.direction_output = jz_gpio_direction_output, \
.direction_input = jz_gpio_direction_input, \
.base = JZ4740_GPIO_BASE_ ## _bank, \
.ngpio = JZ4740_GPIO_NUM_ ## _bank, \
}, \
}
static struct jz_gpio_chip jz4740_gpio_chips[] = {
JZ4740_GPIO_CHIP(A),
JZ4740_GPIO_CHIP(B),
JZ4740_GPIO_CHIP(C),
JZ4740_GPIO_CHIP(D),
};
static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100);
chip->irq = JZ4740_IRQ_INTC_GPIO(id);
irq_set_handler_data(chip->irq, chip);
irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base,
chip->base, handle_level_irq);
gc->wake_enabled = IRQ_MSK(chip->gpio_chip.ngpio);
gc->private = chip;
ct = gc->chip_types;
ct->regs.enable = JZ_REG_GPIO_MASK_CLEAR;
ct->regs.disable = JZ_REG_GPIO_MASK_SET;
ct->regs.ack = JZ_REG_GPIO_FLAG_CLEAR;
ct->chip.name = "GPIO";
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_unmask = jz_gpio_irq_unmask;
ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_suspend = jz4740_irq_suspend;
ct->chip.irq_resume = jz4740_irq_resume;
ct->chip.irq_startup = jz_gpio_irq_startup;
ct->chip.irq_shutdown = jz_gpio_irq_shutdown;
ct->chip.irq_set_type = jz_gpio_irq_set_type;
ct->chip.irq_set_wake = jz_gpio_irq_set_wake;
ct->chip.flags = IRQCHIP_SET_TYPE_MASKED;
irq_setup_generic_chip(gc, IRQ_MSK(chip->gpio_chip.ngpio),
IRQ_GC_INIT_NESTED_LOCK, 0, IRQ_NOPROBE | IRQ_LEVEL);
gpiochip_add(&chip->gpio_chip);
}
static int __init jz4740_gpio_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
printk(KERN_INFO "JZ4740 GPIO initialized\n");
return 0;
}
arch_initcall(jz4740_gpio_init);
#ifdef CONFIG_DEBUG_FS
static inline void gpio_seq_reg(struct seq_file *s, struct jz_gpio_chip *chip,
const char *name, unsigned int reg)
{
seq_printf(s, "\t%s: %08x\n", name, readl(chip->base + reg));
}
static int gpio_regs_show(struct seq_file *s, void *unused)
{
struct jz_gpio_chip *chip = jz4740_gpio_chips;
int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i, ++chip) {
seq_printf(s, "==GPIO %d==\n", i);
gpio_seq_reg(s, chip, "Pin", JZ_REG_GPIO_PIN);
gpio_seq_reg(s, chip, "Data", JZ_REG_GPIO_DATA);
gpio_seq_reg(s, chip, "Mask", JZ_REG_GPIO_MASK);
gpio_seq_reg(s, chip, "Pull", JZ_REG_GPIO_PULL);
gpio_seq_reg(s, chip, "Func", JZ_REG_GPIO_FUNC);
gpio_seq_reg(s, chip, "Select", JZ_REG_GPIO_SELECT);
gpio_seq_reg(s, chip, "Direction", JZ_REG_GPIO_DIRECTION);
gpio_seq_reg(s, chip, "Trigger", JZ_REG_GPIO_TRIGGER);
gpio_seq_reg(s, chip, "Flag", JZ_REG_GPIO_FLAG);
}
return 0;
}
static int gpio_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, gpio_regs_show, NULL);
}
static const struct file_operations gpio_regs_operations = {
.open = gpio_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init gpio_debugfs_init(void)
{
(void) debugfs_create_file("jz_regs_gpio", S_IFREG | S_IRUGO,
NULL, NULL, &gpio_regs_operations);
return 0;
}
subsys_initcall(gpio_debugfs_init);
#endif
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_jf | arch/mips/jz4740/gpio.c | 4945 | 13857 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 platform GPIO support
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/mach-jz4740/base.h>
#include "irq.h"
#define JZ4740_GPIO_BASE_A (32*0)
#define JZ4740_GPIO_BASE_B (32*1)
#define JZ4740_GPIO_BASE_C (32*2)
#define JZ4740_GPIO_BASE_D (32*3)
#define JZ4740_GPIO_NUM_A 32
#define JZ4740_GPIO_NUM_B 32
#define JZ4740_GPIO_NUM_C 31
#define JZ4740_GPIO_NUM_D 32
#define JZ4740_IRQ_GPIO_BASE_A (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_A)
#define JZ4740_IRQ_GPIO_BASE_B (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_B)
#define JZ4740_IRQ_GPIO_BASE_C (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_C)
#define JZ4740_IRQ_GPIO_BASE_D (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_D)
#define JZ_REG_GPIO_PIN 0x00
#define JZ_REG_GPIO_DATA 0x10
#define JZ_REG_GPIO_DATA_SET 0x14
#define JZ_REG_GPIO_DATA_CLEAR 0x18
#define JZ_REG_GPIO_MASK 0x20
#define JZ_REG_GPIO_MASK_SET 0x24
#define JZ_REG_GPIO_MASK_CLEAR 0x28
#define JZ_REG_GPIO_PULL 0x30
#define JZ_REG_GPIO_PULL_SET 0x34
#define JZ_REG_GPIO_PULL_CLEAR 0x38
#define JZ_REG_GPIO_FUNC 0x40
#define JZ_REG_GPIO_FUNC_SET 0x44
#define JZ_REG_GPIO_FUNC_CLEAR 0x48
#define JZ_REG_GPIO_SELECT 0x50
#define JZ_REG_GPIO_SELECT_SET 0x54
#define JZ_REG_GPIO_SELECT_CLEAR 0x58
#define JZ_REG_GPIO_DIRECTION 0x60
#define JZ_REG_GPIO_DIRECTION_SET 0x64
#define JZ_REG_GPIO_DIRECTION_CLEAR 0x68
#define JZ_REG_GPIO_TRIGGER 0x70
#define JZ_REG_GPIO_TRIGGER_SET 0x74
#define JZ_REG_GPIO_TRIGGER_CLEAR 0x78
#define JZ_REG_GPIO_FLAG 0x80
#define JZ_REG_GPIO_FLAG_CLEAR 0x14
#define GPIO_TO_BIT(gpio) BIT(gpio & 0x1f)
#define GPIO_TO_REG(gpio, reg) (gpio_to_jz_gpio_chip(gpio)->base + (reg))
#define CHIP_TO_REG(chip, reg) (gpio_chip_to_jz_gpio_chip(chip)->base + (reg))
struct jz_gpio_chip {
unsigned int irq;
unsigned int irq_base;
uint32_t edge_trigger_both;
void __iomem *base;
struct gpio_chip gpio_chip;
};
static struct jz_gpio_chip jz4740_gpio_chips[];
static inline struct jz_gpio_chip *gpio_to_jz_gpio_chip(unsigned int gpio)
{
return &jz4740_gpio_chips[gpio >> 5];
}
static inline struct jz_gpio_chip *gpio_chip_to_jz_gpio_chip(struct gpio_chip *gpio_chip)
{
return container_of(gpio_chip, struct jz_gpio_chip, gpio_chip);
}
static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
return gc->private;
}
static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg)
{
writel(GPIO_TO_BIT(gpio), GPIO_TO_REG(gpio, reg));
}
int jz_gpio_set_function(int gpio, enum jz_gpio_function function)
{
if (function == JZ_GPIO_FUNC_NONE) {
jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_CLEAR);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
} else {
jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_SET);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
switch (function) {
case JZ_GPIO_FUNC1:
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
break;
case JZ_GPIO_FUNC3:
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_SET);
case JZ_GPIO_FUNC2: /* Falltrough */
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_SET);
break;
default:
BUG();
break;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(jz_gpio_set_function);
int jz_gpio_bulk_request(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
int ret;
for (i = 0; i < num; ++i, ++request) {
ret = gpio_request(request->gpio, request->name);
if (ret)
goto err;
jz_gpio_set_function(request->gpio, request->function);
}
return 0;
err:
for (--request; i > 0; --i, --request) {
gpio_free(request->gpio);
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
}
return ret;
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_request);
void jz_gpio_bulk_free(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request) {
gpio_free(request->gpio);
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
}
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_free);
void jz_gpio_bulk_suspend(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request) {
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_PULL_SET);
}
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_suspend);
void jz_gpio_bulk_resume(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request)
jz_gpio_set_function(request->gpio, request->function);
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_resume);
void jz_gpio_enable_pullup(unsigned gpio)
{
jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_CLEAR);
}
EXPORT_SYMBOL_GPL(jz_gpio_enable_pullup);
void jz_gpio_disable_pullup(unsigned gpio)
{
jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_SET);
}
EXPORT_SYMBOL_GPL(jz_gpio_disable_pullup);
static int jz_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
return !!(readl(CHIP_TO_REG(chip, JZ_REG_GPIO_PIN)) & BIT(gpio));
}
static void jz_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value)
{
uint32_t __iomem *reg = CHIP_TO_REG(chip, JZ_REG_GPIO_DATA_SET);
reg += !value;
writel(BIT(gpio), reg);
}
static int jz_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_SET));
jz_gpio_set_value(chip, gpio, value);
return 0;
}
static int jz_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_CLEAR));
return 0;
}
int jz_gpio_port_direction_input(int port, uint32_t mask)
{
writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR));
return 0;
}
EXPORT_SYMBOL(jz_gpio_port_direction_input);
int jz_gpio_port_direction_output(int port, uint32_t mask)
{
writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_SET));
return 0;
}
EXPORT_SYMBOL(jz_gpio_port_direction_output);
void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask)
{
writel(~value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_CLEAR));
writel(value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_SET));
}
EXPORT_SYMBOL(jz_gpio_port_set_value);
uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
{
uint32_t value = readl(GPIO_TO_REG(port, JZ_REG_GPIO_PIN));
return value & mask;
}
EXPORT_SYMBOL(jz_gpio_port_get_value);
int gpio_to_irq(unsigned gpio)
{
return JZ4740_IRQ_GPIO(0) + gpio;
}
EXPORT_SYMBOL_GPL(gpio_to_irq);
int irq_to_gpio(unsigned irq)
{
return irq - JZ4740_IRQ_GPIO(0);
}
EXPORT_SYMBOL_GPL(irq_to_gpio);
#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
{
uint32_t value;
void __iomem *reg;
uint32_t mask = IRQ_TO_BIT(irq);
if (!(chip->edge_trigger_both & mask))
return;
reg = chip->base;
value = readl(chip->base + JZ_REG_GPIO_PIN);
if (value & mask)
reg += JZ_REG_GPIO_DIRECTION_CLEAR;
else
reg += JZ_REG_GPIO_DIRECTION_SET;
writel(mask, reg);
}
static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
{
uint32_t flag;
unsigned int gpio_irq;
struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc);
flag = readl(chip->base + JZ_REG_GPIO_FLAG);
if (!flag)
return;
gpio_irq = chip->irq_base + __fls(flag);
jz_gpio_check_trigger_both(chip, gpio_irq);
generic_handle_irq(gpio_irq);
};
static inline void jz_gpio_set_irq_bit(struct irq_data *data, unsigned int reg)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
writel(IRQ_TO_BIT(data->irq), chip->base + reg);
}
static void jz_gpio_irq_unmask(struct irq_data *data)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
jz_gpio_check_trigger_both(chip, data->irq);
irq_gc_unmask_enable_reg(data);
};
/* TODO: Check if function is gpio */
static unsigned int jz_gpio_irq_startup(struct irq_data *data)
{
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_SET);
jz_gpio_irq_unmask(data);
return 0;
}
static void jz_gpio_irq_shutdown(struct irq_data *data)
{
irq_gc_mask_disable_reg(data);
/* Set direction to input */
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_CLEAR);
}
static int jz_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
unsigned int irq = data->irq;
if (flow_type == IRQ_TYPE_EDGE_BOTH) {
uint32_t value = readl(chip->base + JZ_REG_GPIO_PIN);
if (value & IRQ_TO_BIT(irq))
flow_type = IRQ_TYPE_EDGE_FALLING;
else
flow_type = IRQ_TYPE_EDGE_RISING;
chip->edge_trigger_both |= IRQ_TO_BIT(irq);
} else {
chip->edge_trigger_both &= ~IRQ_TO_BIT(irq);
}
switch (flow_type) {
case IRQ_TYPE_EDGE_RISING:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
break;
case IRQ_TYPE_EDGE_FALLING:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
break;
case IRQ_TYPE_LEVEL_HIGH:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
break;
case IRQ_TYPE_LEVEL_LOW:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
break;
default:
return -EINVAL;
}
return 0;
}
static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
irq_gc_set_wake(data, on);
irq_set_irq_wake(chip->irq, on);
return 0;
}
#define JZ4740_GPIO_CHIP(_bank) { \
.irq_base = JZ4740_IRQ_GPIO_BASE_ ## _bank, \
.gpio_chip = { \
.label = "Bank " # _bank, \
.owner = THIS_MODULE, \
.set = jz_gpio_set_value, \
.get = jz_gpio_get_value, \
.direction_output = jz_gpio_direction_output, \
.direction_input = jz_gpio_direction_input, \
.base = JZ4740_GPIO_BASE_ ## _bank, \
.ngpio = JZ4740_GPIO_NUM_ ## _bank, \
}, \
}
static struct jz_gpio_chip jz4740_gpio_chips[] = {
JZ4740_GPIO_CHIP(A),
JZ4740_GPIO_CHIP(B),
JZ4740_GPIO_CHIP(C),
JZ4740_GPIO_CHIP(D),
};
static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100);
chip->irq = JZ4740_IRQ_INTC_GPIO(id);
irq_set_handler_data(chip->irq, chip);
irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base,
chip->base, handle_level_irq);
gc->wake_enabled = IRQ_MSK(chip->gpio_chip.ngpio);
gc->private = chip;
ct = gc->chip_types;
ct->regs.enable = JZ_REG_GPIO_MASK_CLEAR;
ct->regs.disable = JZ_REG_GPIO_MASK_SET;
ct->regs.ack = JZ_REG_GPIO_FLAG_CLEAR;
ct->chip.name = "GPIO";
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_unmask = jz_gpio_irq_unmask;
ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_suspend = jz4740_irq_suspend;
ct->chip.irq_resume = jz4740_irq_resume;
ct->chip.irq_startup = jz_gpio_irq_startup;
ct->chip.irq_shutdown = jz_gpio_irq_shutdown;
ct->chip.irq_set_type = jz_gpio_irq_set_type;
ct->chip.irq_set_wake = jz_gpio_irq_set_wake;
ct->chip.flags = IRQCHIP_SET_TYPE_MASKED;
irq_setup_generic_chip(gc, IRQ_MSK(chip->gpio_chip.ngpio),
IRQ_GC_INIT_NESTED_LOCK, 0, IRQ_NOPROBE | IRQ_LEVEL);
gpiochip_add(&chip->gpio_chip);
}
static int __init jz4740_gpio_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
printk(KERN_INFO "JZ4740 GPIO initialized\n");
return 0;
}
arch_initcall(jz4740_gpio_init);
#ifdef CONFIG_DEBUG_FS
static inline void gpio_seq_reg(struct seq_file *s, struct jz_gpio_chip *chip,
const char *name, unsigned int reg)
{
seq_printf(s, "\t%s: %08x\n", name, readl(chip->base + reg));
}
static int gpio_regs_show(struct seq_file *s, void *unused)
{
struct jz_gpio_chip *chip = jz4740_gpio_chips;
int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i, ++chip) {
seq_printf(s, "==GPIO %d==\n", i);
gpio_seq_reg(s, chip, "Pin", JZ_REG_GPIO_PIN);
gpio_seq_reg(s, chip, "Data", JZ_REG_GPIO_DATA);
gpio_seq_reg(s, chip, "Mask", JZ_REG_GPIO_MASK);
gpio_seq_reg(s, chip, "Pull", JZ_REG_GPIO_PULL);
gpio_seq_reg(s, chip, "Func", JZ_REG_GPIO_FUNC);
gpio_seq_reg(s, chip, "Select", JZ_REG_GPIO_SELECT);
gpio_seq_reg(s, chip, "Direction", JZ_REG_GPIO_DIRECTION);
gpio_seq_reg(s, chip, "Trigger", JZ_REG_GPIO_TRIGGER);
gpio_seq_reg(s, chip, "Flag", JZ_REG_GPIO_FLAG);
}
return 0;
}
static int gpio_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, gpio_regs_show, NULL);
}
static const struct file_operations gpio_regs_operations = {
.open = gpio_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init gpio_debugfs_init(void)
{
(void) debugfs_create_file("jz_regs_gpio", S_IFREG | S_IRUGO,
NULL, NULL, &gpio_regs_operations);
return 0;
}
subsys_initcall(gpio_debugfs_init);
#endif
| gpl-2.0 |
rudij7/green_machine_bacon | arch/mips/jz4740/irq.c | 4945 | 4022 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 platform IRQ support
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/timex.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/mipsregs.h>
#include <asm/irq_cpu.h>
#include <asm/mach-jz4740/base.h>
static void __iomem *jz_intc_base;
#define JZ_REG_INTC_STATUS 0x00
#define JZ_REG_INTC_MASK 0x04
#define JZ_REG_INTC_SET_MASK 0x08
#define JZ_REG_INTC_CLEAR_MASK 0x0c
#define JZ_REG_INTC_PENDING 0x10
static irqreturn_t jz4740_cascade(int irq, void *data)
{
uint32_t irq_reg;
irq_reg = readl(jz_intc_base + JZ_REG_INTC_PENDING);
if (irq_reg)
generic_handle_irq(__fls(irq_reg) + JZ4740_IRQ_BASE);
return IRQ_HANDLED;
}
static void jz4740_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask)
{
struct irq_chip_regs *regs = &gc->chip_types->regs;
writel(mask, gc->reg_base + regs->enable);
writel(~mask, gc->reg_base + regs->disable);
}
void jz4740_irq_suspend(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
jz4740_irq_set_mask(gc, gc->wake_active);
}
void jz4740_irq_resume(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
jz4740_irq_set_mask(gc, gc->mask_cache);
}
static struct irqaction jz4740_cascade_action = {
.handler = jz4740_cascade,
.name = "JZ4740 cascade interrupt",
};
void __init arch_init_irq(void)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
mips_cpu_irq_init();
jz_intc_base = ioremap(JZ4740_INTC_BASE_ADDR, 0x14);
/* Mask all irqs */
writel(0xffffffff, jz_intc_base + JZ_REG_INTC_SET_MASK);
gc = irq_alloc_generic_chip("INTC", 1, JZ4740_IRQ_BASE, jz_intc_base,
handle_level_irq);
gc->wake_enabled = IRQ_MSK(32);
ct = gc->chip_types;
ct->regs.enable = JZ_REG_INTC_CLEAR_MASK;
ct->regs.disable = JZ_REG_INTC_SET_MASK;
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
ct->chip.irq_set_wake = irq_gc_set_wake;
ct->chip.irq_suspend = jz4740_irq_suspend;
ct->chip.irq_resume = jz4740_irq_resume;
irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
setup_irq(2, &jz4740_cascade_action);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP2)
do_IRQ(2);
else if (pending & STATUSF_IP3)
do_IRQ(3);
else
spurious_interrupt();
}
#ifdef CONFIG_DEBUG_FS
static inline void intc_seq_reg(struct seq_file *s, const char *name,
unsigned int reg)
{
seq_printf(s, "%s:\t\t%08x\n", name, readl(jz_intc_base + reg));
}
static int intc_regs_show(struct seq_file *s, void *unused)
{
intc_seq_reg(s, "Status", JZ_REG_INTC_STATUS);
intc_seq_reg(s, "Mask", JZ_REG_INTC_MASK);
intc_seq_reg(s, "Pending", JZ_REG_INTC_PENDING);
return 0;
}
static int intc_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, intc_regs_show, NULL);
}
static const struct file_operations intc_regs_operations = {
.open = intc_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init intc_debugfs_init(void)
{
(void) debugfs_create_file("jz_regs_intc", S_IFREG | S_IRUGO,
NULL, NULL, &intc_regs_operations);
return 0;
}
subsys_initcall(intc_debugfs_init);
#endif
| gpl-2.0 |
Jazz-823/kernel_sony_togari-216- | arch/mips/jz4740/gpio.c | 4945 | 13857 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 platform GPIO support
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/mach-jz4740/base.h>
#include "irq.h"
#define JZ4740_GPIO_BASE_A (32*0)
#define JZ4740_GPIO_BASE_B (32*1)
#define JZ4740_GPIO_BASE_C (32*2)
#define JZ4740_GPIO_BASE_D (32*3)
#define JZ4740_GPIO_NUM_A 32
#define JZ4740_GPIO_NUM_B 32
#define JZ4740_GPIO_NUM_C 31
#define JZ4740_GPIO_NUM_D 32
#define JZ4740_IRQ_GPIO_BASE_A (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_A)
#define JZ4740_IRQ_GPIO_BASE_B (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_B)
#define JZ4740_IRQ_GPIO_BASE_C (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_C)
#define JZ4740_IRQ_GPIO_BASE_D (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_D)
#define JZ_REG_GPIO_PIN 0x00
#define JZ_REG_GPIO_DATA 0x10
#define JZ_REG_GPIO_DATA_SET 0x14
#define JZ_REG_GPIO_DATA_CLEAR 0x18
#define JZ_REG_GPIO_MASK 0x20
#define JZ_REG_GPIO_MASK_SET 0x24
#define JZ_REG_GPIO_MASK_CLEAR 0x28
#define JZ_REG_GPIO_PULL 0x30
#define JZ_REG_GPIO_PULL_SET 0x34
#define JZ_REG_GPIO_PULL_CLEAR 0x38
#define JZ_REG_GPIO_FUNC 0x40
#define JZ_REG_GPIO_FUNC_SET 0x44
#define JZ_REG_GPIO_FUNC_CLEAR 0x48
#define JZ_REG_GPIO_SELECT 0x50
#define JZ_REG_GPIO_SELECT_SET 0x54
#define JZ_REG_GPIO_SELECT_CLEAR 0x58
#define JZ_REG_GPIO_DIRECTION 0x60
#define JZ_REG_GPIO_DIRECTION_SET 0x64
#define JZ_REG_GPIO_DIRECTION_CLEAR 0x68
#define JZ_REG_GPIO_TRIGGER 0x70
#define JZ_REG_GPIO_TRIGGER_SET 0x74
#define JZ_REG_GPIO_TRIGGER_CLEAR 0x78
#define JZ_REG_GPIO_FLAG 0x80
#define JZ_REG_GPIO_FLAG_CLEAR 0x14
#define GPIO_TO_BIT(gpio) BIT(gpio & 0x1f)
#define GPIO_TO_REG(gpio, reg) (gpio_to_jz_gpio_chip(gpio)->base + (reg))
#define CHIP_TO_REG(chip, reg) (gpio_chip_to_jz_gpio_chip(chip)->base + (reg))
struct jz_gpio_chip {
unsigned int irq;
unsigned int irq_base;
uint32_t edge_trigger_both;
void __iomem *base;
struct gpio_chip gpio_chip;
};
static struct jz_gpio_chip jz4740_gpio_chips[];
static inline struct jz_gpio_chip *gpio_to_jz_gpio_chip(unsigned int gpio)
{
return &jz4740_gpio_chips[gpio >> 5];
}
static inline struct jz_gpio_chip *gpio_chip_to_jz_gpio_chip(struct gpio_chip *gpio_chip)
{
return container_of(gpio_chip, struct jz_gpio_chip, gpio_chip);
}
static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
return gc->private;
}
static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg)
{
writel(GPIO_TO_BIT(gpio), GPIO_TO_REG(gpio, reg));
}
int jz_gpio_set_function(int gpio, enum jz_gpio_function function)
{
if (function == JZ_GPIO_FUNC_NONE) {
jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_CLEAR);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
} else {
jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_SET);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
switch (function) {
case JZ_GPIO_FUNC1:
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
break;
case JZ_GPIO_FUNC3:
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_SET);
case JZ_GPIO_FUNC2: /* Falltrough */
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_SET);
break;
default:
BUG();
break;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(jz_gpio_set_function);
int jz_gpio_bulk_request(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
int ret;
for (i = 0; i < num; ++i, ++request) {
ret = gpio_request(request->gpio, request->name);
if (ret)
goto err;
jz_gpio_set_function(request->gpio, request->function);
}
return 0;
err:
for (--request; i > 0; --i, --request) {
gpio_free(request->gpio);
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
}
return ret;
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_request);
void jz_gpio_bulk_free(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request) {
gpio_free(request->gpio);
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
}
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_free);
void jz_gpio_bulk_suspend(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request) {
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_PULL_SET);
}
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_suspend);
void jz_gpio_bulk_resume(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request)
jz_gpio_set_function(request->gpio, request->function);
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_resume);
void jz_gpio_enable_pullup(unsigned gpio)
{
jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_CLEAR);
}
EXPORT_SYMBOL_GPL(jz_gpio_enable_pullup);
void jz_gpio_disable_pullup(unsigned gpio)
{
jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_SET);
}
EXPORT_SYMBOL_GPL(jz_gpio_disable_pullup);
static int jz_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
return !!(readl(CHIP_TO_REG(chip, JZ_REG_GPIO_PIN)) & BIT(gpio));
}
static void jz_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value)
{
uint32_t __iomem *reg = CHIP_TO_REG(chip, JZ_REG_GPIO_DATA_SET);
reg += !value;
writel(BIT(gpio), reg);
}
static int jz_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_SET));
jz_gpio_set_value(chip, gpio, value);
return 0;
}
static int jz_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_CLEAR));
return 0;
}
int jz_gpio_port_direction_input(int port, uint32_t mask)
{
writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR));
return 0;
}
EXPORT_SYMBOL(jz_gpio_port_direction_input);
int jz_gpio_port_direction_output(int port, uint32_t mask)
{
writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_SET));
return 0;
}
EXPORT_SYMBOL(jz_gpio_port_direction_output);
void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask)
{
writel(~value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_CLEAR));
writel(value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_SET));
}
EXPORT_SYMBOL(jz_gpio_port_set_value);
uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
{
uint32_t value = readl(GPIO_TO_REG(port, JZ_REG_GPIO_PIN));
return value & mask;
}
EXPORT_SYMBOL(jz_gpio_port_get_value);
int gpio_to_irq(unsigned gpio)
{
return JZ4740_IRQ_GPIO(0) + gpio;
}
EXPORT_SYMBOL_GPL(gpio_to_irq);
int irq_to_gpio(unsigned irq)
{
return irq - JZ4740_IRQ_GPIO(0);
}
EXPORT_SYMBOL_GPL(irq_to_gpio);
#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
{
uint32_t value;
void __iomem *reg;
uint32_t mask = IRQ_TO_BIT(irq);
if (!(chip->edge_trigger_both & mask))
return;
reg = chip->base;
value = readl(chip->base + JZ_REG_GPIO_PIN);
if (value & mask)
reg += JZ_REG_GPIO_DIRECTION_CLEAR;
else
reg += JZ_REG_GPIO_DIRECTION_SET;
writel(mask, reg);
}
static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
{
uint32_t flag;
unsigned int gpio_irq;
struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc);
flag = readl(chip->base + JZ_REG_GPIO_FLAG);
if (!flag)
return;
gpio_irq = chip->irq_base + __fls(flag);
jz_gpio_check_trigger_both(chip, gpio_irq);
generic_handle_irq(gpio_irq);
};
static inline void jz_gpio_set_irq_bit(struct irq_data *data, unsigned int reg)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
writel(IRQ_TO_BIT(data->irq), chip->base + reg);
}
static void jz_gpio_irq_unmask(struct irq_data *data)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
jz_gpio_check_trigger_both(chip, data->irq);
irq_gc_unmask_enable_reg(data);
};
/* TODO: Check if function is gpio */
static unsigned int jz_gpio_irq_startup(struct irq_data *data)
{
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_SET);
jz_gpio_irq_unmask(data);
return 0;
}
static void jz_gpio_irq_shutdown(struct irq_data *data)
{
irq_gc_mask_disable_reg(data);
/* Set direction to input */
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_CLEAR);
}
static int jz_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
unsigned int irq = data->irq;
if (flow_type == IRQ_TYPE_EDGE_BOTH) {
uint32_t value = readl(chip->base + JZ_REG_GPIO_PIN);
if (value & IRQ_TO_BIT(irq))
flow_type = IRQ_TYPE_EDGE_FALLING;
else
flow_type = IRQ_TYPE_EDGE_RISING;
chip->edge_trigger_both |= IRQ_TO_BIT(irq);
} else {
chip->edge_trigger_both &= ~IRQ_TO_BIT(irq);
}
switch (flow_type) {
case IRQ_TYPE_EDGE_RISING:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
break;
case IRQ_TYPE_EDGE_FALLING:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
break;
case IRQ_TYPE_LEVEL_HIGH:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
break;
case IRQ_TYPE_LEVEL_LOW:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
break;
default:
return -EINVAL;
}
return 0;
}
static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
irq_gc_set_wake(data, on);
irq_set_irq_wake(chip->irq, on);
return 0;
}
#define JZ4740_GPIO_CHIP(_bank) { \
.irq_base = JZ4740_IRQ_GPIO_BASE_ ## _bank, \
.gpio_chip = { \
.label = "Bank " # _bank, \
.owner = THIS_MODULE, \
.set = jz_gpio_set_value, \
.get = jz_gpio_get_value, \
.direction_output = jz_gpio_direction_output, \
.direction_input = jz_gpio_direction_input, \
.base = JZ4740_GPIO_BASE_ ## _bank, \
.ngpio = JZ4740_GPIO_NUM_ ## _bank, \
}, \
}
static struct jz_gpio_chip jz4740_gpio_chips[] = {
JZ4740_GPIO_CHIP(A),
JZ4740_GPIO_CHIP(B),
JZ4740_GPIO_CHIP(C),
JZ4740_GPIO_CHIP(D),
};
static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100);
chip->irq = JZ4740_IRQ_INTC_GPIO(id);
irq_set_handler_data(chip->irq, chip);
irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base,
chip->base, handle_level_irq);
gc->wake_enabled = IRQ_MSK(chip->gpio_chip.ngpio);
gc->private = chip;
ct = gc->chip_types;
ct->regs.enable = JZ_REG_GPIO_MASK_CLEAR;
ct->regs.disable = JZ_REG_GPIO_MASK_SET;
ct->regs.ack = JZ_REG_GPIO_FLAG_CLEAR;
ct->chip.name = "GPIO";
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_unmask = jz_gpio_irq_unmask;
ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_suspend = jz4740_irq_suspend;
ct->chip.irq_resume = jz4740_irq_resume;
ct->chip.irq_startup = jz_gpio_irq_startup;
ct->chip.irq_shutdown = jz_gpio_irq_shutdown;
ct->chip.irq_set_type = jz_gpio_irq_set_type;
ct->chip.irq_set_wake = jz_gpio_irq_set_wake;
ct->chip.flags = IRQCHIP_SET_TYPE_MASKED;
irq_setup_generic_chip(gc, IRQ_MSK(chip->gpio_chip.ngpio),
IRQ_GC_INIT_NESTED_LOCK, 0, IRQ_NOPROBE | IRQ_LEVEL);
gpiochip_add(&chip->gpio_chip);
}
static int __init jz4740_gpio_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
printk(KERN_INFO "JZ4740 GPIO initialized\n");
return 0;
}
arch_initcall(jz4740_gpio_init);
#ifdef CONFIG_DEBUG_FS
static inline void gpio_seq_reg(struct seq_file *s, struct jz_gpio_chip *chip,
const char *name, unsigned int reg)
{
seq_printf(s, "\t%s: %08x\n", name, readl(chip->base + reg));
}
static int gpio_regs_show(struct seq_file *s, void *unused)
{
struct jz_gpio_chip *chip = jz4740_gpio_chips;
int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i, ++chip) {
seq_printf(s, "==GPIO %d==\n", i);
gpio_seq_reg(s, chip, "Pin", JZ_REG_GPIO_PIN);
gpio_seq_reg(s, chip, "Data", JZ_REG_GPIO_DATA);
gpio_seq_reg(s, chip, "Mask", JZ_REG_GPIO_MASK);
gpio_seq_reg(s, chip, "Pull", JZ_REG_GPIO_PULL);
gpio_seq_reg(s, chip, "Func", JZ_REG_GPIO_FUNC);
gpio_seq_reg(s, chip, "Select", JZ_REG_GPIO_SELECT);
gpio_seq_reg(s, chip, "Direction", JZ_REG_GPIO_DIRECTION);
gpio_seq_reg(s, chip, "Trigger", JZ_REG_GPIO_TRIGGER);
gpio_seq_reg(s, chip, "Flag", JZ_REG_GPIO_FLAG);
}
return 0;
}
static int gpio_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, gpio_regs_show, NULL);
}
static const struct file_operations gpio_regs_operations = {
.open = gpio_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init gpio_debugfs_init(void)
{
(void) debugfs_create_file("jz_regs_gpio", S_IFREG | S_IRUGO,
NULL, NULL, &gpio_regs_operations);
return 0;
}
subsys_initcall(gpio_debugfs_init);
#endif
| gpl-2.0 |
whdgmawkd/furnace_kernel_lge_hammerhead | drivers/mmc/host/via-sdmmc.c | 5201 | 36078 | /*
* drivers/mmc/host/via-sdmmc.c - VIA SD/MMC Card Reader driver
* Copyright (c) 2008, VIA Technologies Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/mmc/host.h>
#define DRV_NAME "via_sdmmc"
#define PCI_DEVICE_ID_VIA_9530 0x9530
#define VIA_CRDR_SDC_OFF 0x200
#define VIA_CRDR_DDMA_OFF 0x400
#define VIA_CRDR_PCICTRL_OFF 0x600
#define VIA_CRDR_MIN_CLOCK 375000
#define VIA_CRDR_MAX_CLOCK 48000000
/*
* PCI registers
*/
#define VIA_CRDR_PCI_WORK_MODE 0x40
#define VIA_CRDR_PCI_DBG_MODE 0x41
/*
* SDC MMIO Registers
*/
#define VIA_CRDR_SDCTRL 0x0
#define VIA_CRDR_SDCTRL_START 0x01
#define VIA_CRDR_SDCTRL_WRITE 0x04
#define VIA_CRDR_SDCTRL_SINGLE_WR 0x10
#define VIA_CRDR_SDCTRL_SINGLE_RD 0x20
#define VIA_CRDR_SDCTRL_MULTI_WR 0x30
#define VIA_CRDR_SDCTRL_MULTI_RD 0x40
#define VIA_CRDR_SDCTRL_STOP 0x70
#define VIA_CRDR_SDCTRL_RSP_NONE 0x0
#define VIA_CRDR_SDCTRL_RSP_R1 0x10000
#define VIA_CRDR_SDCTRL_RSP_R2 0x20000
#define VIA_CRDR_SDCTRL_RSP_R3 0x30000
#define VIA_CRDR_SDCTRL_RSP_R1B 0x90000
#define VIA_CRDR_SDCARG 0x4
#define VIA_CRDR_SDBUSMODE 0x8
#define VIA_CRDR_SDMODE_4BIT 0x02
#define VIA_CRDR_SDMODE_CLK_ON 0x40
#define VIA_CRDR_SDBLKLEN 0xc
/*
* Bit 0 -Bit 10 : Block length. So, the maximum block length should be 2048.
* Bit 11 - Bit 13 : Reserved.
* GPIDET : Select GPI pin to detect card, GPI means CR_CD# in top design.
* INTEN : Enable SD host interrupt.
* Bit 16 - Bit 31 : Block count. So, the maximun block count should be 65536.
*/
#define VIA_CRDR_SDBLKLEN_GPIDET 0x2000
#define VIA_CRDR_SDBLKLEN_INTEN 0x8000
#define VIA_CRDR_MAX_BLOCK_COUNT 65536
#define VIA_CRDR_MAX_BLOCK_LENGTH 2048
#define VIA_CRDR_SDRESP0 0x10
#define VIA_CRDR_SDRESP1 0x14
#define VIA_CRDR_SDRESP2 0x18
#define VIA_CRDR_SDRESP3 0x1c
#define VIA_CRDR_SDCURBLKCNT 0x20
#define VIA_CRDR_SDINTMASK 0x24
/*
* MBDIE : Multiple Blocks transfer Done Interrupt Enable
* BDDIE : Block Data transfer Done Interrupt Enable
* CIRIE : Card Insertion or Removal Interrupt Enable
* CRDIE : Command-Response transfer Done Interrupt Enable
* CRTOIE : Command-Response response TimeOut Interrupt Enable
* ASCRDIE : Auto Stop Command-Response transfer Done Interrupt Enable
* DTIE : Data access Timeout Interrupt Enable
* SCIE : reSponse CRC error Interrupt Enable
* RCIE : Read data CRC error Interrupt Enable
* WCIE : Write data CRC error Interrupt Enable
*/
#define VIA_CRDR_SDINTMASK_MBDIE 0x10
#define VIA_CRDR_SDINTMASK_BDDIE 0x20
#define VIA_CRDR_SDINTMASK_CIRIE 0x80
#define VIA_CRDR_SDINTMASK_CRDIE 0x200
#define VIA_CRDR_SDINTMASK_CRTOIE 0x400
#define VIA_CRDR_SDINTMASK_ASCRDIE 0x800
#define VIA_CRDR_SDINTMASK_DTIE 0x1000
#define VIA_CRDR_SDINTMASK_SCIE 0x2000
#define VIA_CRDR_SDINTMASK_RCIE 0x4000
#define VIA_CRDR_SDINTMASK_WCIE 0x8000
#define VIA_CRDR_SDACTIVE_INTMASK \
(VIA_CRDR_SDINTMASK_MBDIE | VIA_CRDR_SDINTMASK_CIRIE \
| VIA_CRDR_SDINTMASK_CRDIE | VIA_CRDR_SDINTMASK_CRTOIE \
| VIA_CRDR_SDINTMASK_DTIE | VIA_CRDR_SDINTMASK_SCIE \
| VIA_CRDR_SDINTMASK_RCIE | VIA_CRDR_SDINTMASK_WCIE)
#define VIA_CRDR_SDSTATUS 0x28
/*
* CECC : Reserved
* WP : SD card Write Protect status
* SLOTD : Reserved
* SLOTG : SD SLOT status(Gpi pin status)
* MBD : Multiple Blocks transfer Done interrupt status
* BDD : Block Data transfer Done interrupt status
* CD : Reserved
* CIR : Card Insertion or Removal interrupt detected on GPI pin
* IO : Reserved
* CRD : Command-Response transfer Done interrupt status
* CRTO : Command-Response response TimeOut interrupt status
* ASCRDIE : Auto Stop Command-Response transfer Done interrupt status
* DT : Data access Timeout interrupt status
* SC : reSponse CRC error interrupt status
* RC : Read data CRC error interrupt status
* WC : Write data CRC error interrupt status
*/
#define VIA_CRDR_SDSTS_CECC 0x01
#define VIA_CRDR_SDSTS_WP 0x02
#define VIA_CRDR_SDSTS_SLOTD 0x04
#define VIA_CRDR_SDSTS_SLOTG 0x08
#define VIA_CRDR_SDSTS_MBD 0x10
#define VIA_CRDR_SDSTS_BDD 0x20
#define VIA_CRDR_SDSTS_CD 0x40
#define VIA_CRDR_SDSTS_CIR 0x80
#define VIA_CRDR_SDSTS_IO 0x100
#define VIA_CRDR_SDSTS_CRD 0x200
#define VIA_CRDR_SDSTS_CRTO 0x400
#define VIA_CRDR_SDSTS_ASCRDIE 0x800
#define VIA_CRDR_SDSTS_DT 0x1000
#define VIA_CRDR_SDSTS_SC 0x2000
#define VIA_CRDR_SDSTS_RC 0x4000
#define VIA_CRDR_SDSTS_WC 0x8000
#define VIA_CRDR_SDSTS_IGN_MASK\
(VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_IO)
#define VIA_CRDR_SDSTS_INT_MASK \
(VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD | VIA_CRDR_SDSTS_CD \
| VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_IO | VIA_CRDR_SDSTS_CRD \
| VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
| VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
#define VIA_CRDR_SDSTS_W1C_MASK \
(VIA_CRDR_SDSTS_CECC | VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_BDD \
| VIA_CRDR_SDSTS_CD | VIA_CRDR_SDSTS_CIR | VIA_CRDR_SDSTS_CRD \
| VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_ASCRDIE | VIA_CRDR_SDSTS_DT \
| VIA_CRDR_SDSTS_SC | VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
#define VIA_CRDR_SDSTS_CMD_MASK \
(VIA_CRDR_SDSTS_CRD | VIA_CRDR_SDSTS_CRTO | VIA_CRDR_SDSTS_SC)
#define VIA_CRDR_SDSTS_DATA_MASK\
(VIA_CRDR_SDSTS_MBD | VIA_CRDR_SDSTS_DT \
| VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC)
#define VIA_CRDR_SDSTATUS2 0x2a
/*
* CFE : Enable SD host automatic Clock FReezing
*/
#define VIA_CRDR_SDSTS_CFE 0x80
#define VIA_CRDR_SDRSPTMO 0x2C
#define VIA_CRDR_SDCLKSEL 0x30
#define VIA_CRDR_SDEXTCTRL 0x34
#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SD 0x01
#define VIS_CRDR_SDEXTCTRL_SHIFT_9 0x02
#define VIS_CRDR_SDEXTCTRL_MMC_8BIT 0x04
#define VIS_CRDR_SDEXTCTRL_RELD_BLK 0x08
#define VIS_CRDR_SDEXTCTRL_BAD_CMDA 0x10
#define VIS_CRDR_SDEXTCTRL_BAD_DATA 0x20
#define VIS_CRDR_SDEXTCTRL_AUTOSTOP_SPI 0x40
#define VIA_CRDR_SDEXTCTRL_HISPD 0x80
/* 0x38-0xFF reserved */
/*
* Data DMA Control Registers
*/
#define VIA_CRDR_DMABASEADD 0x0
#define VIA_CRDR_DMACOUNTER 0x4
#define VIA_CRDR_DMACTRL 0x8
/*
* DIR :Transaction Direction
* 0 : From card to memory
* 1 : From memory to card
*/
#define VIA_CRDR_DMACTRL_DIR 0x100
#define VIA_CRDR_DMACTRL_ENIRQ 0x10000
#define VIA_CRDR_DMACTRL_SFTRST 0x1000000
#define VIA_CRDR_DMASTS 0xc
#define VIA_CRDR_DMASTART 0x10
/*0x14-0xFF reserved*/
/*
* PCI Control Registers
*/
/*0x0 - 0x1 reserved*/
#define VIA_CRDR_PCICLKGATT 0x2
/*
* SFTRST :
* 0 : Soft reset all the controller and it will be de-asserted automatically
* 1 : Soft reset is de-asserted
*/
#define VIA_CRDR_PCICLKGATT_SFTRST 0x01
/*
* 3V3 : Pad power select
* 0 : 1.8V
* 1 : 3.3V
* NOTE : No mater what the actual value should be, this bit always
* read as 0. This is a hardware bug.
*/
#define VIA_CRDR_PCICLKGATT_3V3 0x10
/*
* PAD_PWRON : Pad Power on/off select
* 0 : Power off
* 1 : Power on
* NOTE : No mater what the actual value should be, this bit always
* read as 0. This is a hardware bug.
*/
#define VIA_CRDR_PCICLKGATT_PAD_PWRON 0x20
#define VIA_CRDR_PCISDCCLK 0x5
#define VIA_CRDR_PCIDMACLK 0x7
#define VIA_CRDR_PCIDMACLK_SDC 0x2
#define VIA_CRDR_PCIINTCTRL 0x8
#define VIA_CRDR_PCIINTCTRL_SDCIRQEN 0x04
#define VIA_CRDR_PCIINTSTATUS 0x9
#define VIA_CRDR_PCIINTSTATUS_SDC 0x04
#define VIA_CRDR_PCITMOCTRL 0xa
#define VIA_CRDR_PCITMOCTRL_NO 0x0
#define VIA_CRDR_PCITMOCTRL_32US 0x1
#define VIA_CRDR_PCITMOCTRL_256US 0x2
#define VIA_CRDR_PCITMOCTRL_1024US 0x3
#define VIA_CRDR_PCITMOCTRL_256MS 0x4
#define VIA_CRDR_PCITMOCTRL_512MS 0x5
#define VIA_CRDR_PCITMOCTRL_1024MS 0x6
/*0xB-0xFF reserved*/
enum PCI_HOST_CLK_CONTROL {
PCI_CLK_375K = 0x03,
PCI_CLK_8M = 0x04,
PCI_CLK_12M = 0x00,
PCI_CLK_16M = 0x05,
PCI_CLK_24M = 0x01,
PCI_CLK_33M = 0x06,
PCI_CLK_48M = 0x02
};
struct sdhcreg {
u32 sdcontrol_reg;
u32 sdcmdarg_reg;
u32 sdbusmode_reg;
u32 sdblklen_reg;
u32 sdresp_reg[4];
u32 sdcurblkcnt_reg;
u32 sdintmask_reg;
u32 sdstatus_reg;
u32 sdrsptmo_reg;
u32 sdclksel_reg;
u32 sdextctrl_reg;
};
struct pcictrlreg {
u8 reserve[2];
u8 pciclkgat_reg;
u8 pcinfcclk_reg;
u8 pcimscclk_reg;
u8 pcisdclk_reg;
u8 pcicaclk_reg;
u8 pcidmaclk_reg;
u8 pciintctrl_reg;
u8 pciintstatus_reg;
u8 pcitmoctrl_reg;
u8 Resv;
};
struct via_crdr_mmc_host {
struct mmc_host *mmc;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
void __iomem *mmiobase;
void __iomem *sdhc_mmiobase;
void __iomem *ddma_mmiobase;
void __iomem *pcictrl_mmiobase;
struct pcictrlreg pm_pcictrl_reg;
struct sdhcreg pm_sdhc_reg;
struct work_struct carddet_work;
struct tasklet_struct finish_tasklet;
struct timer_list timer;
spinlock_t lock;
u8 power;
int reject;
unsigned int quirks;
};
/* some devices need a very long delay for power to stabilize */
#define VIA_CRDR_QUIRK_300MS_PWRDELAY 0x0001
static struct pci_device_id via_ids[] = {
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_9530,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0,},
{0,}
};
MODULE_DEVICE_TABLE(pci, via_ids);
static void via_print_sdchc(struct via_crdr_mmc_host *host)
{
void __iomem *addrbase = host->sdhc_mmiobase;
pr_debug("SDC MMIO Registers:\n");
pr_debug("SDCONTROL=%08x, SDCMDARG=%08x, SDBUSMODE=%08x\n",
readl(addrbase + VIA_CRDR_SDCTRL),
readl(addrbase + VIA_CRDR_SDCARG),
readl(addrbase + VIA_CRDR_SDBUSMODE));
pr_debug("SDBLKLEN=%08x, SDCURBLKCNT=%08x, SDINTMASK=%08x\n",
readl(addrbase + VIA_CRDR_SDBLKLEN),
readl(addrbase + VIA_CRDR_SDCURBLKCNT),
readl(addrbase + VIA_CRDR_SDINTMASK));
pr_debug("SDSTATUS=%08x, SDCLKSEL=%08x, SDEXTCTRL=%08x\n",
readl(addrbase + VIA_CRDR_SDSTATUS),
readl(addrbase + VIA_CRDR_SDCLKSEL),
readl(addrbase + VIA_CRDR_SDEXTCTRL));
}
static void via_print_pcictrl(struct via_crdr_mmc_host *host)
{
void __iomem *addrbase = host->pcictrl_mmiobase;
pr_debug("PCI Control Registers:\n");
pr_debug("PCICLKGATT=%02x, PCISDCCLK=%02x, PCIDMACLK=%02x\n",
readb(addrbase + VIA_CRDR_PCICLKGATT),
readb(addrbase + VIA_CRDR_PCISDCCLK),
readb(addrbase + VIA_CRDR_PCIDMACLK));
pr_debug("PCIINTCTRL=%02x, PCIINTSTATUS=%02x\n",
readb(addrbase + VIA_CRDR_PCIINTCTRL),
readb(addrbase + VIA_CRDR_PCIINTSTATUS));
}
static void via_save_pcictrlreg(struct via_crdr_mmc_host *host)
{
struct pcictrlreg *pm_pcictrl_reg;
void __iomem *addrbase;
pm_pcictrl_reg = &(host->pm_pcictrl_reg);
addrbase = host->pcictrl_mmiobase;
pm_pcictrl_reg->pciclkgat_reg = readb(addrbase + VIA_CRDR_PCICLKGATT);
pm_pcictrl_reg->pciclkgat_reg |=
VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
pm_pcictrl_reg->pcisdclk_reg = readb(addrbase + VIA_CRDR_PCISDCCLK);
pm_pcictrl_reg->pcidmaclk_reg = readb(addrbase + VIA_CRDR_PCIDMACLK);
pm_pcictrl_reg->pciintctrl_reg = readb(addrbase + VIA_CRDR_PCIINTCTRL);
pm_pcictrl_reg->pciintstatus_reg =
readb(addrbase + VIA_CRDR_PCIINTSTATUS);
pm_pcictrl_reg->pcitmoctrl_reg = readb(addrbase + VIA_CRDR_PCITMOCTRL);
}
static void via_restore_pcictrlreg(struct via_crdr_mmc_host *host)
{
struct pcictrlreg *pm_pcictrl_reg;
void __iomem *addrbase;
pm_pcictrl_reg = &(host->pm_pcictrl_reg);
addrbase = host->pcictrl_mmiobase;
writeb(pm_pcictrl_reg->pciclkgat_reg, addrbase + VIA_CRDR_PCICLKGATT);
writeb(pm_pcictrl_reg->pcisdclk_reg, addrbase + VIA_CRDR_PCISDCCLK);
writeb(pm_pcictrl_reg->pcidmaclk_reg, addrbase + VIA_CRDR_PCIDMACLK);
writeb(pm_pcictrl_reg->pciintctrl_reg, addrbase + VIA_CRDR_PCIINTCTRL);
writeb(pm_pcictrl_reg->pciintstatus_reg,
addrbase + VIA_CRDR_PCIINTSTATUS);
writeb(pm_pcictrl_reg->pcitmoctrl_reg, addrbase + VIA_CRDR_PCITMOCTRL);
}
static void via_save_sdcreg(struct via_crdr_mmc_host *host)
{
struct sdhcreg *pm_sdhc_reg;
void __iomem *addrbase;
pm_sdhc_reg = &(host->pm_sdhc_reg);
addrbase = host->sdhc_mmiobase;
pm_sdhc_reg->sdcontrol_reg = readl(addrbase + VIA_CRDR_SDCTRL);
pm_sdhc_reg->sdcmdarg_reg = readl(addrbase + VIA_CRDR_SDCARG);
pm_sdhc_reg->sdbusmode_reg = readl(addrbase + VIA_CRDR_SDBUSMODE);
pm_sdhc_reg->sdblklen_reg = readl(addrbase + VIA_CRDR_SDBLKLEN);
pm_sdhc_reg->sdcurblkcnt_reg = readl(addrbase + VIA_CRDR_SDCURBLKCNT);
pm_sdhc_reg->sdintmask_reg = readl(addrbase + VIA_CRDR_SDINTMASK);
pm_sdhc_reg->sdstatus_reg = readl(addrbase + VIA_CRDR_SDSTATUS);
pm_sdhc_reg->sdrsptmo_reg = readl(addrbase + VIA_CRDR_SDRSPTMO);
pm_sdhc_reg->sdclksel_reg = readl(addrbase + VIA_CRDR_SDCLKSEL);
pm_sdhc_reg->sdextctrl_reg = readl(addrbase + VIA_CRDR_SDEXTCTRL);
}
static void via_restore_sdcreg(struct via_crdr_mmc_host *host)
{
struct sdhcreg *pm_sdhc_reg;
void __iomem *addrbase;
pm_sdhc_reg = &(host->pm_sdhc_reg);
addrbase = host->sdhc_mmiobase;
writel(pm_sdhc_reg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
writel(pm_sdhc_reg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
writel(pm_sdhc_reg->sdbusmode_reg, addrbase + VIA_CRDR_SDBUSMODE);
writel(pm_sdhc_reg->sdblklen_reg, addrbase + VIA_CRDR_SDBLKLEN);
writel(pm_sdhc_reg->sdcurblkcnt_reg, addrbase + VIA_CRDR_SDCURBLKCNT);
writel(pm_sdhc_reg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
writel(pm_sdhc_reg->sdstatus_reg, addrbase + VIA_CRDR_SDSTATUS);
writel(pm_sdhc_reg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
writel(pm_sdhc_reg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
writel(pm_sdhc_reg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
}
static void via_pwron_sleep(struct via_crdr_mmc_host *sdhost)
{
if (sdhost->quirks & VIA_CRDR_QUIRK_300MS_PWRDELAY)
msleep(300);
else
msleep(3);
}
static void via_set_ddma(struct via_crdr_mmc_host *host,
dma_addr_t dmaaddr, u32 count, int dir, int enirq)
{
void __iomem *addrbase;
u32 ctrl_data = 0;
if (enirq)
ctrl_data |= VIA_CRDR_DMACTRL_ENIRQ;
if (dir)
ctrl_data |= VIA_CRDR_DMACTRL_DIR;
addrbase = host->ddma_mmiobase;
writel(dmaaddr, addrbase + VIA_CRDR_DMABASEADD);
writel(count, addrbase + VIA_CRDR_DMACOUNTER);
writel(ctrl_data, addrbase + VIA_CRDR_DMACTRL);
writel(0x01, addrbase + VIA_CRDR_DMASTART);
/* It seems that our DMA can not work normally with 375kHz clock */
/* FIXME: don't brute-force 8MHz but use PIO at 375kHz !! */
addrbase = host->pcictrl_mmiobase;
if (readb(addrbase + VIA_CRDR_PCISDCCLK) == PCI_CLK_375K) {
dev_info(host->mmc->parent, "forcing card speed to 8MHz\n");
writeb(PCI_CLK_8M, addrbase + VIA_CRDR_PCISDCCLK);
}
}
static void via_sdc_preparedata(struct via_crdr_mmc_host *host,
struct mmc_data *data)
{
void __iomem *addrbase;
u32 blk_reg;
int count;
WARN_ON(host->data);
/* Sanity checks */
BUG_ON(data->blksz > host->mmc->max_blk_size);
BUG_ON(data->blocks > host->mmc->max_blk_count);
host->data = data;
count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
((data->flags & MMC_DATA_READ) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
BUG_ON(count != 1);
via_set_ddma(host, sg_dma_address(data->sg), sg_dma_len(data->sg),
(data->flags & MMC_DATA_WRITE) ? 1 : 0, 1);
addrbase = host->sdhc_mmiobase;
blk_reg = data->blksz - 1;
blk_reg |= VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
blk_reg |= (data->blocks) << 16;
writel(blk_reg, addrbase + VIA_CRDR_SDBLKLEN);
}
static void via_sdc_get_response(struct via_crdr_mmc_host *host,
struct mmc_command *cmd)
{
void __iomem *addrbase = host->sdhc_mmiobase;
u32 dwdata0 = readl(addrbase + VIA_CRDR_SDRESP0);
u32 dwdata1 = readl(addrbase + VIA_CRDR_SDRESP1);
u32 dwdata2 = readl(addrbase + VIA_CRDR_SDRESP2);
u32 dwdata3 = readl(addrbase + VIA_CRDR_SDRESP3);
if (cmd->flags & MMC_RSP_136) {
cmd->resp[0] = ((u8) (dwdata1)) |
(((u8) (dwdata0 >> 24)) << 8) |
(((u8) (dwdata0 >> 16)) << 16) |
(((u8) (dwdata0 >> 8)) << 24);
cmd->resp[1] = ((u8) (dwdata2)) |
(((u8) (dwdata1 >> 24)) << 8) |
(((u8) (dwdata1 >> 16)) << 16) |
(((u8) (dwdata1 >> 8)) << 24);
cmd->resp[2] = ((u8) (dwdata3)) |
(((u8) (dwdata2 >> 24)) << 8) |
(((u8) (dwdata2 >> 16)) << 16) |
(((u8) (dwdata2 >> 8)) << 24);
cmd->resp[3] = 0xff |
((((u8) (dwdata3 >> 24))) << 8) |
(((u8) (dwdata3 >> 16)) << 16) |
(((u8) (dwdata3 >> 8)) << 24);
} else {
dwdata0 >>= 8;
cmd->resp[0] = ((dwdata0 & 0xff) << 24) |
(((dwdata0 >> 8) & 0xff) << 16) |
(((dwdata0 >> 16) & 0xff) << 8) | (dwdata1 & 0xff);
dwdata1 >>= 8;
cmd->resp[1] = ((dwdata1 & 0xff) << 24) |
(((dwdata1 >> 8) & 0xff) << 16) |
(((dwdata1 >> 16) & 0xff) << 8);
}
}
static void via_sdc_send_command(struct via_crdr_mmc_host *host,
struct mmc_command *cmd)
{
void __iomem *addrbase;
struct mmc_data *data;
u32 cmdctrl = 0;
WARN_ON(host->cmd);
data = cmd->data;
mod_timer(&host->timer, jiffies + HZ);
host->cmd = cmd;
/*Command index*/
cmdctrl = cmd->opcode << 8;
/*Response type*/
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE:
cmdctrl |= VIA_CRDR_SDCTRL_RSP_NONE;
break;
case MMC_RSP_R1:
cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1;
break;
case MMC_RSP_R1B:
cmdctrl |= VIA_CRDR_SDCTRL_RSP_R1B;
break;
case MMC_RSP_R2:
cmdctrl |= VIA_CRDR_SDCTRL_RSP_R2;
break;
case MMC_RSP_R3:
cmdctrl |= VIA_CRDR_SDCTRL_RSP_R3;
break;
default:
pr_err("%s: cmd->flag is not valid\n", mmc_hostname(host->mmc));
break;
}
if (!(cmd->data))
goto nodata;
via_sdc_preparedata(host, data);
/*Command control*/
if (data->blocks > 1) {
if (data->flags & MMC_DATA_WRITE) {
cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
cmdctrl |= VIA_CRDR_SDCTRL_MULTI_WR;
} else {
cmdctrl |= VIA_CRDR_SDCTRL_MULTI_RD;
}
} else {
if (data->flags & MMC_DATA_WRITE) {
cmdctrl |= VIA_CRDR_SDCTRL_WRITE;
cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_WR;
} else {
cmdctrl |= VIA_CRDR_SDCTRL_SINGLE_RD;
}
}
nodata:
if (cmd == host->mrq->stop)
cmdctrl |= VIA_CRDR_SDCTRL_STOP;
cmdctrl |= VIA_CRDR_SDCTRL_START;
addrbase = host->sdhc_mmiobase;
writel(cmd->arg, addrbase + VIA_CRDR_SDCARG);
writel(cmdctrl, addrbase + VIA_CRDR_SDCTRL);
}
static void via_sdc_finish_data(struct via_crdr_mmc_host *host)
{
struct mmc_data *data;
BUG_ON(!host->data);
data = host->data;
host->data = NULL;
if (data->error)
data->bytes_xfered = 0;
else
data->bytes_xfered = data->blocks * data->blksz;
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
((data->flags & MMC_DATA_READ) ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
if (data->stop)
via_sdc_send_command(host, data->stop);
else
tasklet_schedule(&host->finish_tasklet);
}
static void via_sdc_finish_command(struct via_crdr_mmc_host *host)
{
via_sdc_get_response(host, host->cmd);
host->cmd->error = 0;
if (!host->cmd->data)
tasklet_schedule(&host->finish_tasklet);
host->cmd = NULL;
}
static void via_sdc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
void __iomem *addrbase;
struct via_crdr_mmc_host *host;
unsigned long flags;
u16 status;
host = mmc_priv(mmc);
spin_lock_irqsave(&host->lock, flags);
addrbase = host->pcictrl_mmiobase;
writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
status &= VIA_CRDR_SDSTS_W1C_MASK;
writew(status, host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
WARN_ON(host->mrq != NULL);
host->mrq = mrq;
status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
if (!(status & VIA_CRDR_SDSTS_SLOTG) || host->reject) {
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
} else {
via_sdc_send_command(host, mrq->cmd);
}
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
static void via_sdc_set_power(struct via_crdr_mmc_host *host,
unsigned short power, unsigned int on)
{
unsigned long flags;
u8 gatt;
spin_lock_irqsave(&host->lock, flags);
host->power = (1 << power);
gatt = readb(host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
if (host->power == MMC_VDD_165_195)
gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
else
gatt |= VIA_CRDR_PCICLKGATT_3V3;
if (on)
gatt |= VIA_CRDR_PCICLKGATT_PAD_PWRON;
else
gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
via_pwron_sleep(host);
}
static void via_sdc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct via_crdr_mmc_host *host;
unsigned long flags;
void __iomem *addrbase;
u32 org_data, sdextctrl;
u8 clock;
host = mmc_priv(mmc);
spin_lock_irqsave(&host->lock, flags);
addrbase = host->sdhc_mmiobase;
org_data = readl(addrbase + VIA_CRDR_SDBUSMODE);
sdextctrl = readl(addrbase + VIA_CRDR_SDEXTCTRL);
if (ios->bus_width == MMC_BUS_WIDTH_1)
org_data &= ~VIA_CRDR_SDMODE_4BIT;
else
org_data |= VIA_CRDR_SDMODE_4BIT;
if (ios->power_mode == MMC_POWER_OFF)
org_data &= ~VIA_CRDR_SDMODE_CLK_ON;
else
org_data |= VIA_CRDR_SDMODE_CLK_ON;
if (ios->timing == MMC_TIMING_SD_HS)
sdextctrl |= VIA_CRDR_SDEXTCTRL_HISPD;
else
sdextctrl &= ~VIA_CRDR_SDEXTCTRL_HISPD;
writel(org_data, addrbase + VIA_CRDR_SDBUSMODE);
writel(sdextctrl, addrbase + VIA_CRDR_SDEXTCTRL);
if (ios->clock >= 48000000)
clock = PCI_CLK_48M;
else if (ios->clock >= 33000000)
clock = PCI_CLK_33M;
else if (ios->clock >= 24000000)
clock = PCI_CLK_24M;
else if (ios->clock >= 16000000)
clock = PCI_CLK_16M;
else if (ios->clock >= 12000000)
clock = PCI_CLK_12M;
else if (ios->clock >= 8000000)
clock = PCI_CLK_8M;
else
clock = PCI_CLK_375K;
addrbase = host->pcictrl_mmiobase;
if (readb(addrbase + VIA_CRDR_PCISDCCLK) != clock)
writeb(clock, addrbase + VIA_CRDR_PCISDCCLK);
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
if (ios->power_mode != MMC_POWER_OFF)
via_sdc_set_power(host, ios->vdd, 1);
else
via_sdc_set_power(host, ios->vdd, 0);
}
static int via_sdc_get_ro(struct mmc_host *mmc)
{
struct via_crdr_mmc_host *host;
unsigned long flags;
u16 status;
host = mmc_priv(mmc);
spin_lock_irqsave(&host->lock, flags);
status = readw(host->sdhc_mmiobase + VIA_CRDR_SDSTATUS);
spin_unlock_irqrestore(&host->lock, flags);
return !(status & VIA_CRDR_SDSTS_WP);
}
static const struct mmc_host_ops via_sdc_ops = {
.request = via_sdc_request,
.set_ios = via_sdc_set_ios,
.get_ro = via_sdc_get_ro,
};
static void via_reset_pcictrl(struct via_crdr_mmc_host *host)
{
unsigned long flags;
u8 gatt;
spin_lock_irqsave(&host->lock, flags);
via_save_pcictrlreg(host);
via_save_sdcreg(host);
spin_unlock_irqrestore(&host->lock, flags);
gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
if (host->power == MMC_VDD_165_195)
gatt &= VIA_CRDR_PCICLKGATT_3V3;
else
gatt |= VIA_CRDR_PCICLKGATT_3V3;
writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
via_pwron_sleep(host);
gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
writeb(gatt, host->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
msleep(3);
spin_lock_irqsave(&host->lock, flags);
via_restore_pcictrlreg(host);
via_restore_sdcreg(host);
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
static void via_sdc_cmd_isr(struct via_crdr_mmc_host *host, u16 intmask)
{
BUG_ON(intmask == 0);
if (!host->cmd) {
pr_err("%s: Got command interrupt 0x%x even "
"though no command operation was in progress.\n",
mmc_hostname(host->mmc), intmask);
return;
}
if (intmask & VIA_CRDR_SDSTS_CRTO)
host->cmd->error = -ETIMEDOUT;
else if (intmask & VIA_CRDR_SDSTS_SC)
host->cmd->error = -EILSEQ;
if (host->cmd->error)
tasklet_schedule(&host->finish_tasklet);
else if (intmask & VIA_CRDR_SDSTS_CRD)
via_sdc_finish_command(host);
}
static void via_sdc_data_isr(struct via_crdr_mmc_host *host, u16 intmask)
{
BUG_ON(intmask == 0);
if (intmask & VIA_CRDR_SDSTS_DT)
host->data->error = -ETIMEDOUT;
else if (intmask & (VIA_CRDR_SDSTS_RC | VIA_CRDR_SDSTS_WC))
host->data->error = -EILSEQ;
via_sdc_finish_data(host);
}
static irqreturn_t via_sdc_isr(int irq, void *dev_id)
{
struct via_crdr_mmc_host *sdhost = dev_id;
void __iomem *addrbase;
u8 pci_status;
u16 sd_status;
irqreturn_t result;
if (!sdhost)
return IRQ_NONE;
spin_lock(&sdhost->lock);
addrbase = sdhost->pcictrl_mmiobase;
pci_status = readb(addrbase + VIA_CRDR_PCIINTSTATUS);
if (!(pci_status & VIA_CRDR_PCIINTSTATUS_SDC)) {
result = IRQ_NONE;
goto out;
}
addrbase = sdhost->sdhc_mmiobase;
sd_status = readw(addrbase + VIA_CRDR_SDSTATUS);
sd_status &= VIA_CRDR_SDSTS_INT_MASK;
sd_status &= ~VIA_CRDR_SDSTS_IGN_MASK;
if (!sd_status) {
result = IRQ_NONE;
goto out;
}
if (sd_status & VIA_CRDR_SDSTS_CIR) {
writew(sd_status & VIA_CRDR_SDSTS_CIR,
addrbase + VIA_CRDR_SDSTATUS);
schedule_work(&sdhost->carddet_work);
}
sd_status &= ~VIA_CRDR_SDSTS_CIR;
if (sd_status & VIA_CRDR_SDSTS_CMD_MASK) {
writew(sd_status & VIA_CRDR_SDSTS_CMD_MASK,
addrbase + VIA_CRDR_SDSTATUS);
via_sdc_cmd_isr(sdhost, sd_status & VIA_CRDR_SDSTS_CMD_MASK);
}
if (sd_status & VIA_CRDR_SDSTS_DATA_MASK) {
writew(sd_status & VIA_CRDR_SDSTS_DATA_MASK,
addrbase + VIA_CRDR_SDSTATUS);
via_sdc_data_isr(sdhost, sd_status & VIA_CRDR_SDSTS_DATA_MASK);
}
sd_status &= ~(VIA_CRDR_SDSTS_CMD_MASK | VIA_CRDR_SDSTS_DATA_MASK);
if (sd_status) {
pr_err("%s: Unexpected interrupt 0x%x\n",
mmc_hostname(sdhost->mmc), sd_status);
writew(sd_status, addrbase + VIA_CRDR_SDSTATUS);
}
result = IRQ_HANDLED;
mmiowb();
out:
spin_unlock(&sdhost->lock);
return result;
}
static void via_sdc_timeout(unsigned long ulongdata)
{
struct via_crdr_mmc_host *sdhost;
unsigned long flags;
sdhost = (struct via_crdr_mmc_host *)ulongdata;
spin_lock_irqsave(&sdhost->lock, flags);
if (sdhost->mrq) {
pr_err("%s: Timeout waiting for hardware interrupt."
"cmd:0x%x\n", mmc_hostname(sdhost->mmc),
sdhost->mrq->cmd->opcode);
if (sdhost->data) {
writel(VIA_CRDR_DMACTRL_SFTRST,
sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
sdhost->data->error = -ETIMEDOUT;
via_sdc_finish_data(sdhost);
} else {
if (sdhost->cmd)
sdhost->cmd->error = -ETIMEDOUT;
else
sdhost->mrq->cmd->error = -ETIMEDOUT;
tasklet_schedule(&sdhost->finish_tasklet);
}
}
mmiowb();
spin_unlock_irqrestore(&sdhost->lock, flags);
}
static void via_sdc_tasklet_finish(unsigned long param)
{
struct via_crdr_mmc_host *host;
unsigned long flags;
struct mmc_request *mrq;
host = (struct via_crdr_mmc_host *)param;
spin_lock_irqsave(&host->lock, flags);
del_timer(&host->timer);
mrq = host->mrq;
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
spin_unlock_irqrestore(&host->lock, flags);
mmc_request_done(host->mmc, mrq);
}
static void via_sdc_card_detect(struct work_struct *work)
{
struct via_crdr_mmc_host *host;
void __iomem *addrbase;
unsigned long flags;
u16 status;
host = container_of(work, struct via_crdr_mmc_host, carddet_work);
addrbase = host->ddma_mmiobase;
writel(VIA_CRDR_DMACTRL_SFTRST, addrbase + VIA_CRDR_DMACTRL);
spin_lock_irqsave(&host->lock, flags);
addrbase = host->pcictrl_mmiobase;
writeb(VIA_CRDR_PCIDMACLK_SDC, addrbase + VIA_CRDR_PCIDMACLK);
addrbase = host->sdhc_mmiobase;
status = readw(addrbase + VIA_CRDR_SDSTATUS);
if (!(status & VIA_CRDR_SDSTS_SLOTG)) {
if (host->mrq) {
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
}
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
via_reset_pcictrl(host);
spin_lock_irqsave(&host->lock, flags);
}
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
via_print_pcictrl(host);
via_print_sdchc(host);
mmc_detect_change(host->mmc, msecs_to_jiffies(500));
}
static void via_init_mmc_host(struct via_crdr_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
void __iomem *addrbase;
u32 lenreg;
u32 status;
init_timer(&host->timer);
host->timer.data = (unsigned long)host;
host->timer.function = via_sdc_timeout;
spin_lock_init(&host->lock);
mmc->f_min = VIA_CRDR_MIN_CLOCK;
mmc->f_max = VIA_CRDR_MAX_CLOCK;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED;
mmc->ops = &via_sdc_ops;
/*Hardware cannot do scatter lists*/
mmc->max_segs = 1;
mmc->max_blk_size = VIA_CRDR_MAX_BLOCK_LENGTH;
mmc->max_blk_count = VIA_CRDR_MAX_BLOCK_COUNT;
mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_req_size = mmc->max_seg_size;
INIT_WORK(&host->carddet_work, via_sdc_card_detect);
tasklet_init(&host->finish_tasklet, via_sdc_tasklet_finish,
(unsigned long)host);
addrbase = host->sdhc_mmiobase;
writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
msleep(1);
lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
status = readw(addrbase + VIA_CRDR_SDSTATUS);
status &= VIA_CRDR_SDSTS_W1C_MASK;
writew(status, addrbase + VIA_CRDR_SDSTATUS);
status = readw(addrbase + VIA_CRDR_SDSTATUS2);
status |= VIA_CRDR_SDSTS_CFE;
writew(status, addrbase + VIA_CRDR_SDSTATUS2);
writeb(0x0, addrbase + VIA_CRDR_SDEXTCTRL);
writel(VIA_CRDR_SDACTIVE_INTMASK, addrbase + VIA_CRDR_SDINTMASK);
msleep(1);
}
static int __devinit via_sd_probe(struct pci_dev *pcidev,
const struct pci_device_id *id)
{
struct mmc_host *mmc;
struct via_crdr_mmc_host *sdhost;
u32 base, len;
u8 gatt;
int ret;
pr_info(DRV_NAME
": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
(int)pcidev->revision);
ret = pci_enable_device(pcidev);
if (ret)
return ret;
ret = pci_request_regions(pcidev, DRV_NAME);
if (ret)
goto disable;
pci_write_config_byte(pcidev, VIA_CRDR_PCI_WORK_MODE, 0);
pci_write_config_byte(pcidev, VIA_CRDR_PCI_DBG_MODE, 0);
mmc = mmc_alloc_host(sizeof(struct via_crdr_mmc_host), &pcidev->dev);
if (!mmc) {
ret = -ENOMEM;
goto release;
}
sdhost = mmc_priv(mmc);
sdhost->mmc = mmc;
dev_set_drvdata(&pcidev->dev, sdhost);
len = pci_resource_len(pcidev, 0);
base = pci_resource_start(pcidev, 0);
sdhost->mmiobase = ioremap_nocache(base, len);
if (!sdhost->mmiobase) {
ret = -ENOMEM;
goto free_mmc_host;
}
sdhost->sdhc_mmiobase =
sdhost->mmiobase + VIA_CRDR_SDC_OFF;
sdhost->ddma_mmiobase =
sdhost->mmiobase + VIA_CRDR_DDMA_OFF;
sdhost->pcictrl_mmiobase =
sdhost->mmiobase + VIA_CRDR_PCICTRL_OFF;
sdhost->power = MMC_VDD_165_195;
gatt = VIA_CRDR_PCICLKGATT_3V3 | VIA_CRDR_PCICLKGATT_PAD_PWRON;
writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
via_pwron_sleep(sdhost);
gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
msleep(3);
via_init_mmc_host(sdhost);
ret =
request_irq(pcidev->irq, via_sdc_isr, IRQF_SHARED, DRV_NAME,
sdhost);
if (ret)
goto unmap;
writeb(VIA_CRDR_PCIINTCTRL_SDCIRQEN,
sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
writeb(VIA_CRDR_PCITMOCTRL_1024MS,
sdhost->pcictrl_mmiobase + VIA_CRDR_PCITMOCTRL);
/* device-specific quirks */
if (pcidev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
pcidev->subsystem_device == 0x3891)
sdhost->quirks = VIA_CRDR_QUIRK_300MS_PWRDELAY;
mmc_add_host(mmc);
return 0;
unmap:
iounmap(sdhost->mmiobase);
free_mmc_host:
dev_set_drvdata(&pcidev->dev, NULL);
mmc_free_host(mmc);
release:
pci_release_regions(pcidev);
disable:
pci_disable_device(pcidev);
return ret;
}
static void __devexit via_sd_remove(struct pci_dev *pcidev)
{
struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev);
unsigned long flags;
u8 gatt;
spin_lock_irqsave(&sdhost->lock, flags);
/* Ensure we don't accept more commands from mmc layer */
sdhost->reject = 1;
/* Disable generating further interrupts */
writeb(0x0, sdhost->pcictrl_mmiobase + VIA_CRDR_PCIINTCTRL);
mmiowb();
if (sdhost->mrq) {
pr_err("%s: Controller removed during "
"transfer\n", mmc_hostname(sdhost->mmc));
/* make sure all DMA is stopped */
writel(VIA_CRDR_DMACTRL_SFTRST,
sdhost->ddma_mmiobase + VIA_CRDR_DMACTRL);
mmiowb();
sdhost->mrq->cmd->error = -ENOMEDIUM;
if (sdhost->mrq->stop)
sdhost->mrq->stop->error = -ENOMEDIUM;
tasklet_schedule(&sdhost->finish_tasklet);
}
spin_unlock_irqrestore(&sdhost->lock, flags);
mmc_remove_host(sdhost->mmc);
free_irq(pcidev->irq, sdhost);
del_timer_sync(&sdhost->timer);
tasklet_kill(&sdhost->finish_tasklet);
/* switch off power */
gatt = readb(sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
gatt &= ~VIA_CRDR_PCICLKGATT_PAD_PWRON;
writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
iounmap(sdhost->mmiobase);
dev_set_drvdata(&pcidev->dev, NULL);
mmc_free_host(sdhost->mmc);
pci_release_regions(pcidev);
pci_disable_device(pcidev);
pr_info(DRV_NAME
": VIA SDMMC controller at %s [%04x:%04x] has been removed\n",
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
}
#ifdef CONFIG_PM
static void via_init_sdc_pm(struct via_crdr_mmc_host *host)
{
struct sdhcreg *pm_sdhcreg;
void __iomem *addrbase;
u32 lenreg;
u16 status;
pm_sdhcreg = &(host->pm_sdhc_reg);
addrbase = host->sdhc_mmiobase;
writel(0x0, addrbase + VIA_CRDR_SDINTMASK);
lenreg = VIA_CRDR_SDBLKLEN_GPIDET | VIA_CRDR_SDBLKLEN_INTEN;
writel(lenreg, addrbase + VIA_CRDR_SDBLKLEN);
status = readw(addrbase + VIA_CRDR_SDSTATUS);
status &= VIA_CRDR_SDSTS_W1C_MASK;
writew(status, addrbase + VIA_CRDR_SDSTATUS);
status = readw(addrbase + VIA_CRDR_SDSTATUS2);
status |= VIA_CRDR_SDSTS_CFE;
writew(status, addrbase + VIA_CRDR_SDSTATUS2);
writel(pm_sdhcreg->sdcontrol_reg, addrbase + VIA_CRDR_SDCTRL);
writel(pm_sdhcreg->sdcmdarg_reg, addrbase + VIA_CRDR_SDCARG);
writel(pm_sdhcreg->sdintmask_reg, addrbase + VIA_CRDR_SDINTMASK);
writel(pm_sdhcreg->sdrsptmo_reg, addrbase + VIA_CRDR_SDRSPTMO);
writel(pm_sdhcreg->sdclksel_reg, addrbase + VIA_CRDR_SDCLKSEL);
writel(pm_sdhcreg->sdextctrl_reg, addrbase + VIA_CRDR_SDEXTCTRL);
via_print_pcictrl(host);
via_print_sdchc(host);
}
static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)
{
struct via_crdr_mmc_host *host;
int ret = 0;
host = pci_get_drvdata(pcidev);
via_save_pcictrlreg(host);
via_save_sdcreg(host);
ret = mmc_suspend_host(host->mmc);
pci_save_state(pcidev);
pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);
pci_disable_device(pcidev);
pci_set_power_state(pcidev, pci_choose_state(pcidev, state));
return ret;
}
static int via_sd_resume(struct pci_dev *pcidev)
{
struct via_crdr_mmc_host *sdhost;
int ret = 0;
u8 gatt;
sdhost = pci_get_drvdata(pcidev);
gatt = VIA_CRDR_PCICLKGATT_PAD_PWRON;
if (sdhost->power == MMC_VDD_165_195)
gatt &= ~VIA_CRDR_PCICLKGATT_3V3;
else
gatt |= VIA_CRDR_PCICLKGATT_3V3;
writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
via_pwron_sleep(sdhost);
gatt |= VIA_CRDR_PCICLKGATT_SFTRST;
writeb(gatt, sdhost->pcictrl_mmiobase + VIA_CRDR_PCICLKGATT);
msleep(3);
msleep(100);
pci_set_power_state(pcidev, PCI_D0);
pci_restore_state(pcidev);
ret = pci_enable_device(pcidev);
if (ret)
return ret;
via_restore_pcictrlreg(sdhost);
via_init_sdc_pm(sdhost);
ret = mmc_resume_host(sdhost->mmc);
return ret;
}
#else /* CONFIG_PM */
#define via_sd_suspend NULL
#define via_sd_resume NULL
#endif /* CONFIG_PM */
static struct pci_driver via_sd_driver = {
.name = DRV_NAME,
.id_table = via_ids,
.probe = via_sd_probe,
.remove = __devexit_p(via_sd_remove),
.suspend = via_sd_suspend,
.resume = via_sd_resume,
};
static int __init via_sd_drv_init(void)
{
pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver "
"(C) 2008 VIA Technologies, Inc.\n");
return pci_register_driver(&via_sd_driver);
}
static void __exit via_sd_drv_exit(void)
{
pci_unregister_driver(&via_sd_driver);
}
module_init(via_sd_drv_init);
module_exit(via_sd_drv_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("VIA Technologies Inc.");
MODULE_DESCRIPTION("VIA SD/MMC Card Interface driver");
| gpl-2.0 |
imoseyon/leanKernel-note3 | net/sched/cls_route.c | 5457 | 12512 | /*
* net/sched/cls_route.c ROUTE4 classifier.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <net/dst.h>
#include <net/route.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
/*
* 1. For now we assume that route tags < 256.
* It allows to use direct table lookups, instead of hash tables.
* 2. For now we assume that "from TAG" and "fromdev DEV" statements
* are mutually exclusive.
* 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
*/
struct route4_fastmap {
struct route4_filter *filter;
u32 id;
int iif;
};
struct route4_head {
struct route4_fastmap fastmap[16];
struct route4_bucket *table[256 + 1];
};
struct route4_bucket {
/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
struct route4_filter *ht[16 + 16 + 1];
};
struct route4_filter {
struct route4_filter *next;
u32 id;
int iif;
struct tcf_result res;
struct tcf_exts exts;
u32 handle;
struct route4_bucket *bkt;
};
#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
static const struct tcf_ext_map route_ext_map = {
.police = TCA_ROUTE4_POLICE,
.action = TCA_ROUTE4_ACT
};
static inline int route4_fastmap_hash(u32 id, int iif)
{
return id & 0xF;
}
static void
route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
{
spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
spin_lock_bh(root_lock);
memset(head->fastmap, 0, sizeof(head->fastmap));
spin_unlock_bh(root_lock);
}
static void
route4_set_fastmap(struct route4_head *head, u32 id, int iif,
struct route4_filter *f)
{
int h = route4_fastmap_hash(id, iif);
head->fastmap[h].id = id;
head->fastmap[h].iif = iif;
head->fastmap[h].filter = f;
}
static inline int route4_hash_to(u32 id)
{
return id & 0xFF;
}
static inline int route4_hash_from(u32 id)
{
return (id >> 16) & 0xF;
}
static inline int route4_hash_iif(int iif)
{
return 16 + ((iif >> 16) & 0xF);
}
static inline int route4_hash_wild(void)
{
return 32;
}
#define ROUTE4_APPLY_RESULT() \
{ \
*res = f->res; \
if (tcf_exts_is_available(&f->exts)) { \
int r = tcf_exts_exec(skb, &f->exts, res); \
if (r < 0) { \
dont_cache = 1; \
continue; \
} \
return r; \
} else if (!dont_cache) \
route4_set_fastmap(head, id, iif, f); \
return 0; \
}
static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
struct route4_head *head = (struct route4_head *)tp->root;
struct dst_entry *dst;
struct route4_bucket *b;
struct route4_filter *f;
u32 id, h;
int iif, dont_cache = 0;
dst = skb_dst(skb);
if (!dst)
goto failure;
id = dst->tclassid;
if (head == NULL)
goto old_method;
iif = ((struct rtable *)dst)->rt_iif;
h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id &&
iif == head->fastmap[h].iif &&
(f = head->fastmap[h].filter) != NULL) {
if (f == ROUTE4_FAILURE)
goto failure;
*res = f->res;
return 0;
}
h = route4_hash_to(id);
restart:
b = head->table[h];
if (b) {
for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
if (f->id == id)
ROUTE4_APPLY_RESULT();
for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
if (f->iif == iif)
ROUTE4_APPLY_RESULT();
for (f = b->ht[route4_hash_wild()]; f; f = f->next)
ROUTE4_APPLY_RESULT();
}
if (h < 256) {
h = 256;
id &= ~0xFFFF;
goto restart;
}
if (!dont_cache)
route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
failure:
return -1;
old_method:
if (id && (TC_H_MAJ(id) == 0 ||
!(TC_H_MAJ(id^tp->q->handle)))) {
res->classid = id;
res->class = 0;
return 0;
}
return -1;
}
static inline u32 to_hash(u32 id)
{
u32 h = id & 0xFF;
if (id & 0x8000)
h += 256;
return h;
}
static inline u32 from_hash(u32 id)
{
id &= 0xFFFF;
if (id == 0xFFFF)
return 32;
if (!(id & 0x8000)) {
if (id > 255)
return 256;
return id & 0xF;
}
return 16 + (id & 0xF);
}
static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
{
struct route4_head *head = (struct route4_head *)tp->root;
struct route4_bucket *b;
struct route4_filter *f;
unsigned int h1, h2;
if (!head)
return 0;
h1 = to_hash(handle);
if (h1 > 256)
return 0;
h2 = from_hash(handle >> 16);
if (h2 > 32)
return 0;
b = head->table[h1];
if (b) {
for (f = b->ht[h2]; f; f = f->next)
if (f->handle == handle)
return (unsigned long)f;
}
return 0;
}
static void route4_put(struct tcf_proto *tp, unsigned long f)
{
}
static int route4_init(struct tcf_proto *tp)
{
return 0;
}
static void
route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
{
tcf_unbind_filter(tp, &f->res);
tcf_exts_destroy(tp, &f->exts);
kfree(f);
}
static void route4_destroy(struct tcf_proto *tp)
{
struct route4_head *head = tp->root;
int h1, h2;
if (head == NULL)
return;
for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b;
b = head->table[h1];
if (b) {
for (h2 = 0; h2 <= 32; h2++) {
struct route4_filter *f;
while ((f = b->ht[h2]) != NULL) {
b->ht[h2] = f->next;
route4_delete_filter(tp, f);
}
}
kfree(b);
}
}
kfree(head);
}
static int route4_delete(struct tcf_proto *tp, unsigned long arg)
{
struct route4_head *head = (struct route4_head *)tp->root;
struct route4_filter **fp, *f = (struct route4_filter *)arg;
unsigned int h = 0;
struct route4_bucket *b;
int i;
if (!head || !f)
return -EINVAL;
h = f->handle;
b = f->bkt;
for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
tcf_tree_lock(tp);
*fp = f->next;
tcf_tree_unlock(tp);
route4_reset_fastmap(tp->q, head, f->id);
route4_delete_filter(tp, f);
/* Strip tree */
for (i = 0; i <= 32; i++)
if (b->ht[i])
return 0;
/* OK, session has no flows */
tcf_tree_lock(tp);
head->table[to_hash(h)] = NULL;
tcf_tree_unlock(tp);
kfree(b);
return 0;
}
}
return 0;
}
static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
[TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
[TCA_ROUTE4_TO] = { .type = NLA_U32 },
[TCA_ROUTE4_FROM] = { .type = NLA_U32 },
[TCA_ROUTE4_IIF] = { .type = NLA_U32 },
};
static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
struct route4_filter *f, u32 handle, struct route4_head *head,
struct nlattr **tb, struct nlattr *est, int new)
{
int err;
u32 id = 0, to = 0, nhandle = 0x8000;
struct route4_filter *fp;
unsigned int h1;
struct route4_bucket *b;
struct tcf_exts e;
err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
if (err < 0)
return err;
err = -EINVAL;
if (tb[TCA_ROUTE4_TO]) {
if (new && handle & 0x8000)
goto errout;
to = nla_get_u32(tb[TCA_ROUTE4_TO]);
if (to > 0xFF)
goto errout;
nhandle = to;
}
if (tb[TCA_ROUTE4_FROM]) {
if (tb[TCA_ROUTE4_IIF])
goto errout;
id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
if (id > 0xFF)
goto errout;
nhandle |= id << 16;
} else if (tb[TCA_ROUTE4_IIF]) {
id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
if (id > 0x7FFF)
goto errout;
nhandle |= (id | 0x8000) << 16;
} else
nhandle |= 0xFFFF << 16;
if (handle && new) {
nhandle |= handle & 0x7F00;
if (nhandle != handle)
goto errout;
}
h1 = to_hash(nhandle);
b = head->table[h1];
if (!b) {
err = -ENOBUFS;
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL)
goto errout;
tcf_tree_lock(tp);
head->table[h1] = b;
tcf_tree_unlock(tp);
} else {
unsigned int h2 = from_hash(nhandle >> 16);
err = -EEXIST;
for (fp = b->ht[h2]; fp; fp = fp->next)
if (fp->handle == f->handle)
goto errout;
}
tcf_tree_lock(tp);
if (tb[TCA_ROUTE4_TO])
f->id = to;
if (tb[TCA_ROUTE4_FROM])
f->id = to | id<<16;
else if (tb[TCA_ROUTE4_IIF])
f->iif = id;
f->handle = nhandle;
f->bkt = b;
tcf_tree_unlock(tp);
if (tb[TCA_ROUTE4_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
tcf_bind_filter(tp, &f->res, base);
}
tcf_exts_change(tp, &f->exts, &e);
return 0;
errout:
tcf_exts_destroy(tp, &e);
return err;
}
static int route4_change(struct tcf_proto *tp, unsigned long base,
u32 handle,
struct nlattr **tca,
unsigned long *arg)
{
struct route4_head *head = tp->root;
struct route4_filter *f, *f1, **fp;
struct route4_bucket *b;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_ROUTE4_MAX + 1];
unsigned int h, th;
u32 old_handle = 0;
int err;
if (opt == NULL)
return handle ? -EINVAL : 0;
err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
if (err < 0)
return err;
f = (struct route4_filter *)*arg;
if (f) {
if (f->handle != handle && handle)
return -EINVAL;
if (f->bkt)
old_handle = f->handle;
err = route4_set_parms(tp, base, f, handle, head, tb,
tca[TCA_RATE], 0);
if (err < 0)
return err;
goto reinsert;
}
err = -ENOBUFS;
if (head == NULL) {
head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
if (head == NULL)
goto errout;
tcf_tree_lock(tp);
tp->root = head;
tcf_tree_unlock(tp);
}
f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
if (f == NULL)
goto errout;
err = route4_set_parms(tp, base, f, handle, head, tb,
tca[TCA_RATE], 1);
if (err < 0)
goto errout;
reinsert:
h = from_hash(f->handle >> 16);
for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
if (f->handle < f1->handle)
break;
f->next = f1;
tcf_tree_lock(tp);
*fp = f;
if (old_handle && f->handle != old_handle) {
th = to_hash(old_handle);
h = from_hash(old_handle >> 16);
b = head->table[th];
if (b) {
for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
if (*fp == f) {
*fp = f->next;
break;
}
}
}
}
tcf_tree_unlock(tp);
route4_reset_fastmap(tp->q, head, f->id);
*arg = (unsigned long)f;
return 0;
errout:
kfree(f);
return err;
}
static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
struct route4_head *head = tp->root;
unsigned int h, h1;
if (head == NULL)
arg->stop = 1;
if (arg->stop)
return;
for (h = 0; h <= 256; h++) {
struct route4_bucket *b = head->table[h];
if (b) {
for (h1 = 0; h1 <= 32; h1++) {
struct route4_filter *f;
for (f = b->ht[h1]; f; f = f->next) {
if (arg->count < arg->skip) {
arg->count++;
continue;
}
if (arg->fn(tp, (unsigned long)f, arg) < 0) {
arg->stop = 1;
return;
}
arg->count++;
}
}
}
}
}
static int route4_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct route4_filter *f = (struct route4_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
u32 id;
if (f == NULL)
return skb->len;
t->tcm_handle = f->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (!(f->handle & 0x8000)) {
id = f->id & 0xFF;
NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
}
if (f->handle & 0x80000000) {
if ((f->handle >> 16) != 0xFFFF)
NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
} else {
id = f->id >> 16;
NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
}
if (f->res.classid)
NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
static struct tcf_proto_ops cls_route4_ops __read_mostly = {
.kind = "route",
.classify = route4_classify,
.init = route4_init,
.destroy = route4_destroy,
.get = route4_get,
.put = route4_put,
.change = route4_change,
.delete = route4_delete,
.walk = route4_walk,
.dump = route4_dump,
.owner = THIS_MODULE,
};
static int __init init_route4(void)
{
return register_tcf_proto_ops(&cls_route4_ops);
}
static void __exit exit_route4(void)
{
unregister_tcf_proto_ops(&cls_route4_ops);
}
module_init(init_route4)
module_exit(exit_route4)
MODULE_LICENSE("GPL");
| gpl-2.0 |
RenderBroken/msm8974_G2_render_kernel | arch/sh/kernel/cpu/sh2a/setup-mxg.c | 7505 | 6585 | /*
* Renesas MX-G (R8A03022BG) Setup
*
* Copyright (C) 2008, 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/sh_timer.h>
enum {
UNUSED = 0,
/* interrupt sources */
IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7,
IRQ8, IRQ9, IRQ10, IRQ11, IRQ12, IRQ13, IRQ14, IRQ15,
PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7,
SINT8, SINT7, SINT6, SINT5, SINT4, SINT3, SINT2, SINT1,
SCIF0, SCIF1,
MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3, MTU2_GROUP4, MTU2_GROUP5,
MTU2_TGI3B, MTU2_TGI3C,
/* interrupt groups */
PINT,
};
static struct intc_vect vectors[] __initdata = {
INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65),
INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67),
INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69),
INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71),
INTC_IRQ(IRQ8, 72), INTC_IRQ(IRQ9, 73),
INTC_IRQ(IRQ10, 74), INTC_IRQ(IRQ11, 75),
INTC_IRQ(IRQ12, 76), INTC_IRQ(IRQ13, 77),
INTC_IRQ(IRQ14, 78), INTC_IRQ(IRQ15, 79),
INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81),
INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83),
INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85),
INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87),
INTC_IRQ(SINT8, 94), INTC_IRQ(SINT7, 95),
INTC_IRQ(SINT6, 96), INTC_IRQ(SINT5, 97),
INTC_IRQ(SINT4, 98), INTC_IRQ(SINT3, 99),
INTC_IRQ(SINT2, 100), INTC_IRQ(SINT1, 101),
INTC_IRQ(SCIF0, 220), INTC_IRQ(SCIF0, 221),
INTC_IRQ(SCIF0, 222), INTC_IRQ(SCIF0, 223),
INTC_IRQ(SCIF1, 224), INTC_IRQ(SCIF1, 225),
INTC_IRQ(SCIF1, 226), INTC_IRQ(SCIF1, 227),
INTC_IRQ(MTU2_GROUP1, 228), INTC_IRQ(MTU2_GROUP1, 229),
INTC_IRQ(MTU2_GROUP1, 230), INTC_IRQ(MTU2_GROUP1, 231),
INTC_IRQ(MTU2_GROUP1, 232), INTC_IRQ(MTU2_GROUP1, 233),
INTC_IRQ(MTU2_GROUP2, 234), INTC_IRQ(MTU2_GROUP2, 235),
INTC_IRQ(MTU2_GROUP2, 236), INTC_IRQ(MTU2_GROUP2, 237),
INTC_IRQ(MTU2_GROUP2, 238), INTC_IRQ(MTU2_GROUP2, 239),
INTC_IRQ(MTU2_GROUP3, 240), INTC_IRQ(MTU2_GROUP3, 241),
INTC_IRQ(MTU2_GROUP3, 242), INTC_IRQ(MTU2_GROUP3, 243),
INTC_IRQ(MTU2_TGI3B, 244),
INTC_IRQ(MTU2_TGI3C, 245),
INTC_IRQ(MTU2_GROUP4, 246), INTC_IRQ(MTU2_GROUP4, 247),
INTC_IRQ(MTU2_GROUP4, 248), INTC_IRQ(MTU2_GROUP4, 249),
INTC_IRQ(MTU2_GROUP4, 250), INTC_IRQ(MTU2_GROUP4, 251),
INTC_IRQ(MTU2_GROUP5, 252), INTC_IRQ(MTU2_GROUP5, 253),
INTC_IRQ(MTU2_GROUP5, 254), INTC_IRQ(MTU2_GROUP5, 255),
};
static struct intc_group groups[] __initdata = {
INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3,
PINT4, PINT5, PINT6, PINT7),
};
static struct intc_prio_reg prio_registers[] __initdata = {
{ 0xfffd9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } },
{ 0xfffd941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } },
{ 0xfffd941c, 0, 16, 4, /* IPR03 */ { IRQ8, IRQ9, IRQ10, IRQ11 } },
{ 0xfffd941e, 0, 16, 4, /* IPR04 */ { IRQ12, IRQ13, IRQ14, IRQ15 } },
{ 0xfffd9420, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } },
{ 0xfffd9800, 0, 16, 4, /* IPR06 */ { } },
{ 0xfffd9802, 0, 16, 4, /* IPR07 */ { } },
{ 0xfffd9804, 0, 16, 4, /* IPR08 */ { } },
{ 0xfffd9806, 0, 16, 4, /* IPR09 */ { } },
{ 0xfffd9808, 0, 16, 4, /* IPR10 */ { } },
{ 0xfffd980a, 0, 16, 4, /* IPR11 */ { } },
{ 0xfffd980c, 0, 16, 4, /* IPR12 */ { } },
{ 0xfffd980e, 0, 16, 4, /* IPR13 */ { } },
{ 0xfffd9810, 0, 16, 4, /* IPR14 */ { 0, 0, 0, SCIF0 } },
{ 0xfffd9812, 0, 16, 4, /* IPR15 */
{ SCIF1, MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3 } },
{ 0xfffd9814, 0, 16, 4, /* IPR16 */
{ MTU2_TGI3B, MTU2_TGI3C, MTU2_GROUP4, MTU2_GROUP5 } },
};
static struct intc_mask_reg mask_registers[] __initdata = {
{ 0xfffd9408, 0, 16, /* PINTER */
{ 0, 0, 0, 0, 0, 0, 0, 0,
PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } },
};
static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups,
mask_registers, prio_registers, NULL);
static struct sh_timer_config mtu2_0_platform_data = {
.channel_offset = -0x80,
.timer_bit = 0,
.clockevent_rating = 200,
};
static struct resource mtu2_0_resources[] = {
[0] = {
.start = 0xff801300,
.end = 0xff801326,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 228,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device mtu2_0_device = {
.name = "sh_mtu2",
.id = 0,
.dev = {
.platform_data = &mtu2_0_platform_data,
},
.resource = mtu2_0_resources,
.num_resources = ARRAY_SIZE(mtu2_0_resources),
};
static struct sh_timer_config mtu2_1_platform_data = {
.channel_offset = -0x100,
.timer_bit = 1,
.clockevent_rating = 200,
};
static struct resource mtu2_1_resources[] = {
[0] = {
.start = 0xff801380,
.end = 0xff801390,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 234,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device mtu2_1_device = {
.name = "sh_mtu2",
.id = 1,
.dev = {
.platform_data = &mtu2_1_platform_data,
},
.resource = mtu2_1_resources,
.num_resources = ARRAY_SIZE(mtu2_1_resources),
};
static struct sh_timer_config mtu2_2_platform_data = {
.channel_offset = 0x80,
.timer_bit = 2,
.clockevent_rating = 200,
};
static struct resource mtu2_2_resources[] = {
[0] = {
.start = 0xff801000,
.end = 0xff80100a,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = 240,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device mtu2_2_device = {
.name = "sh_mtu2",
.id = 2,
.dev = {
.platform_data = &mtu2_2_platform_data,
},
.resource = mtu2_2_resources,
.num_resources = ARRAY_SIZE(mtu2_2_resources),
};
static struct plat_sci_port scif0_platform_data = {
.mapbase = 0xff804000,
.flags = UPF_BOOT_AUTOCONF,
.scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE,
.scbrr_algo_id = SCBRR_ALGO_2,
.type = PORT_SCIF,
.irqs = { 220, 220, 220, 220 },
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct platform_device *mxg_devices[] __initdata = {
&scif0_device,
&mtu2_0_device,
&mtu2_1_device,
&mtu2_2_device,
};
static int __init mxg_devices_setup(void)
{
return platform_add_devices(mxg_devices,
ARRAY_SIZE(mxg_devices));
}
arch_initcall(mxg_devices_setup);
void __init plat_irq_setup(void)
{
register_intc_controller(&intc_desc);
}
static struct platform_device *mxg_early_devices[] __initdata = {
&scif0_device,
&mtu2_0_device,
&mtu2_1_device,
&mtu2_2_device,
};
void __init plat_early_device_setup(void)
{
early_platform_add_devices(mxg_early_devices,
ARRAY_SIZE(mxg_early_devices));
}
| gpl-2.0 |
boa19861105/android_443_KitKat_kernel_htc_dlxpul | drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c | 8273 | 70692 | /**
@verbatim
Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
ADDI-DATA GmbH
Dieselstrasse 3
D-77833 Ottersweier
Tel: +19(0)7223/9493-0
Fax: +49(0)7223/9493-92
http://www.addi-data.com
info@addi-data.com
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
You should also find the complete GPL in the COPYING file accompanying this source code.
@endverbatim
*/
/*
+-----------------------------------------------------------------------+
| (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
+-----------------------------------------------------------------------+
| Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
| Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
+-----------------------------------------------------------------------+
| Project : API APCI1710 | Compiler : gcc |
| Module name : TOR.C | Version : 2.96 |
+-------------------------------+---------------------------------------+
| Project manager: Eric Stolz | Date : 02/12/2002 |
+-----------------------------------------------------------------------+
| Description : APCI-1710 tor counter module |
| |
| |
+-----------------------------------------------------------------------+
| UPDATES |
+-----------------------------------------------------------------------+
| Date | Author | Description of updates |
+----------+-----------+------------------------------------------------+
| 27/01/99 | S. Weber | 40 MHz implementation |
+-----------------------------------------------------------------------+
| 28/04/00 | S. Weber | Simple,double and quadruple mode implementation|
| | | Extern clock implementation |
+-----------------------------------------------------------------------+
| 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
| | | available |
+-----------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Included files |
+----------------------------------------------------------------------------+
*/
#include "APCI1710_Tor.h"
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_InitTorCounter |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_TorCounter, |
| unsigned char_ b_PCIInputClock, |
| unsigned char_ b_TimingUnit, |
| ULONG_ ul_TimingInterval, |
| PULONG_ pul_RealTimingInterval) |
+----------------------------------------------------------------------------+
| Task : Configure the selected tor counter (b_TorCounter) |
| from selected module (b_ModulNbr). |
| The ul_TimingInterval and ul_TimingUnit determine the |
| timing base for the measurement. |
| The pul_RealTimingInterval return the real timing |
| value. You must calling this function be for you call |
| any other function witch access of the tor counter. |
| |
+----------------------------------------------------------------------------+
| Input Parameters : |
|
CR_AREF unsigned char_ b_ModulNbr : Module number to configure |
| (0 to 3) |
| data[0] unsigned char_ b_TorCounter : Tor counter selection |
| (0 or 1). |
| data[1] unsigned char_ b_PCIInputClock : Selection from PCI bus clock|
| - APCI1710_30MHZ : |
| The PC have a PCI bus |
| clock from 30 MHz |
| - APCI1710_33MHZ : |
| The PC have a PCI bus |
| clock from 33 MHz |
| - APCI1710_40MHZ |
| The APCI-1710 have a |
| integrated 40Mhz |
| quartz. |
| - APCI1710_GATE_INPUT |
| Used the gate input for |
| the base clock. If you |
| have selected this option,|
| than it is not possibl to |
| used the gate input for |
| enabled the acquisition |
| data[2] unsigned char_ b_TimingUnit : Base timing unit (0 to 4) |
| 0 : ns |
| 1 : µs |
| 2 : ms |
| 3 : s |
| 4 : mn |
| data[3] ULONG_ ul_TimingInterval : Base timing value. |
+----------------------------------------------------------------------------+
| Output Parameters : PULONG_ pul_RealTimingInterval : Real base timing |
| data[0] value. |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a tor counter module |
| -4: Tor counter selection is wrong |
| -5: The selected PCI input clock is wrong |
| -6: Timing unit selection is wrong |
| -7: Base timing selection is wrong |
| -8: You can not used the 40MHz clock selection wich |
| this board |
| -9: You can not used the 40MHz clock selection wich |
| this TOR version |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnConfigInitTorCounter(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
{
int i_ReturnValue = 0;
unsigned int ul_TimerValue = 0;
unsigned int dw_Command;
double d_RealTimingInterval = 0;
unsigned char b_ModulNbr;
unsigned char b_TorCounter;
unsigned char b_PCIInputClock;
unsigned char b_TimingUnit;
unsigned int ul_TimingInterval;
unsigned int ul_RealTimingInterval = 0;
i_ReturnValue = insn->n;
b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
b_TorCounter = (unsigned char) data[0];
b_PCIInputClock = (unsigned char) data[1];
b_TimingUnit = (unsigned char) data[2];
ul_TimingInterval = (unsigned int) data[3];
printk("INPUT clock %d\n", b_PCIInputClock);
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/***********************/
/* Test if tor counter */
/***********************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
/**********************************/
/* Test the tor counter selection */
/**********************************/
if (b_TorCounter <= 1) {
/**************************/
/* Test the PCI bus clock */
/**************************/
if ((b_PCIInputClock == APCI1710_30MHZ) ||
(b_PCIInputClock == APCI1710_33MHZ) ||
(b_PCIInputClock == APCI1710_40MHZ) ||
(b_PCIInputClock ==
APCI1710_GATE_INPUT)) {
/************************/
/* Test the timing unit */
/************************/
if ((b_TimingUnit <= 4)
|| (b_PCIInputClock ==
APCI1710_GATE_INPUT)) {
/**********************************/
/* Test the base timing selection */
/**********************************/
if (((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 133) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 571230650UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 571230UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 571UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 9UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 121) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 519691043UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 519691UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 520UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 8UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 100) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 429496729UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 429496UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 429UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 7UL)) || ((b_PCIInputClock == APCI1710_GATE_INPUT) && (ul_TimingInterval >= 2))) {
/**************************/
/* Test the board version */
/**************************/
if (((b_PCIInputClock == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_PCIInputClock != APCI1710_40MHZ)) {
/************************/
/* Test the TOR version */
/************************/
if (((b_PCIInputClock == APCI1710_40MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3131)) || ((b_PCIInputClock == APCI1710_GATE_INPUT) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3132)) || (b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) {
/*********************************/
/* Test if not extern clock used */
/*********************************/
if (b_PCIInputClock != APCI1710_GATE_INPUT) {
fpu_begin
();
/****************************************/
/* Calculate the timer 0 division fator */
/****************************************/
switch (b_TimingUnit) {
/******/
/* ns */
/******/
case 0:
/******************/
/* Timer 0 factor */
/******************/
ul_TimerValue
=
(unsigned int)
(ul_TimingInterval
*
(0.00025 * b_PCIInputClock));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_TimingInterval * (0.00025 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
ul_TimerValue
=
ul_TimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealTimingInterval
=
(unsigned int)
(ul_TimerValue
/
(0.00025 * (double)b_PCIInputClock));
d_RealTimingInterval
=
(double)
ul_TimerValue
/
(0.00025
*
(double)
b_PCIInputClock);
if ((double)((double)ul_TimerValue / (0.00025 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
ul_RealTimingInterval
=
ul_RealTimingInterval
+
1;
}
ul_TimingInterval
=
ul_TimingInterval
-
1;
ul_TimerValue
=
ul_TimerValue
-
2;
if (b_PCIInputClock != APCI1710_40MHZ) {
ul_TimerValue
=
(unsigned int)
(
(double)
(ul_TimerValue)
*
1.007752288);
}
break;
/******/
/* æs */
/******/
case 1:
/******************/
/* Timer 0 factor */
/******************/
ul_TimerValue
=
(unsigned int)
(ul_TimingInterval
*
(0.25 * b_PCIInputClock));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_TimingInterval * (0.25 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
ul_TimerValue
=
ul_TimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealTimingInterval
=
(unsigned int)
(ul_TimerValue
/
(0.25 * (double)b_PCIInputClock));
d_RealTimingInterval
=
(double)
ul_TimerValue
/
(
(double)
0.25
*
(double)
b_PCIInputClock);
if ((double)((double)ul_TimerValue / (0.25 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
ul_RealTimingInterval
=
ul_RealTimingInterval
+
1;
}
ul_TimingInterval
=
ul_TimingInterval
-
1;
ul_TimerValue
=
ul_TimerValue
-
2;
if (b_PCIInputClock != APCI1710_40MHZ) {
ul_TimerValue
=
(unsigned int)
(
(double)
(ul_TimerValue)
*
1.007752288);
}
break;
/******/
/* ms */
/******/
case 2:
/******************/
/* Timer 0 factor */
/******************/
ul_TimerValue
=
ul_TimingInterval
*
(250.0
*
b_PCIInputClock);
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_TimingInterval * (250.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
ul_TimerValue
=
ul_TimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealTimingInterval
=
(unsigned int)
(ul_TimerValue
/
(250.0 * (double)b_PCIInputClock));
d_RealTimingInterval
=
(double)
ul_TimerValue
/
(250.0
*
(double)
b_PCIInputClock);
if ((double)((double)ul_TimerValue / (250.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
ul_RealTimingInterval
=
ul_RealTimingInterval
+
1;
}
ul_TimingInterval
=
ul_TimingInterval
-
1;
ul_TimerValue
=
ul_TimerValue
-
2;
if (b_PCIInputClock != APCI1710_40MHZ) {
ul_TimerValue
=
(unsigned int)
(
(double)
(ul_TimerValue)
*
1.007752288);
}
break;
/*****/
/* s */
/*****/
case 3:
/******************/
/* Timer 0 factor */
/******************/
ul_TimerValue
=
(unsigned int)
(ul_TimingInterval
*
(250000.0
*
b_PCIInputClock));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)ul_TimingInterval * (250000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
ul_TimerValue
=
ul_TimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealTimingInterval
=
(unsigned int)
(ul_TimerValue
/
(250000.0
*
(double)
b_PCIInputClock));
d_RealTimingInterval
=
(double)
ul_TimerValue
/
(250000.0
*
(double)
b_PCIInputClock);
if ((double)((double)ul_TimerValue / (250000.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
ul_RealTimingInterval
=
ul_RealTimingInterval
+
1;
}
ul_TimingInterval
=
ul_TimingInterval
-
1;
ul_TimerValue
=
ul_TimerValue
-
2;
if (b_PCIInputClock != APCI1710_40MHZ) {
ul_TimerValue
=
(unsigned int)
(
(double)
(ul_TimerValue)
*
1.007752288);
}
break;
/******/
/* mn */
/******/
case 4:
/******************/
/* Timer 0 factor */
/******************/
ul_TimerValue
=
(unsigned int)
(
(ul_TimingInterval
*
60)
*
(250000.0
*
b_PCIInputClock));
/*******************/
/* Round the value */
/*******************/
if ((double)((double)(ul_TimingInterval * 60.0) * (250000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
ul_TimerValue
=
ul_TimerValue
+
1;
}
/*****************************/
/* Calculate the real timing */
/*****************************/
ul_RealTimingInterval
=
(unsigned int)
(ul_TimerValue
/
(250000.0
*
(double)
b_PCIInputClock))
/
60;
d_RealTimingInterval
=
(
(double)
ul_TimerValue
/
(250000.0
*
(double)
b_PCIInputClock))
/
60.0;
if ((double)(((double)ul_TimerValue / (250000.0 * (double)b_PCIInputClock)) / 60.0) >= (double)((double)ul_RealTimingInterval + 0.5)) {
ul_RealTimingInterval
=
ul_RealTimingInterval
+
1;
}
ul_TimingInterval
=
ul_TimingInterval
-
1;
ul_TimerValue
=
ul_TimerValue
-
2;
if (b_PCIInputClock != APCI1710_40MHZ) {
ul_TimerValue
=
(unsigned int)
(
(double)
(ul_TimerValue)
*
1.007752288);
}
break;
}
fpu_end();
} /* if (b_PCIInputClock != APCI1710_GATE_INPUT) */
else {
/*************************************************************/
/* 2 Clock used for the overflow and the reload from counter */
/*************************************************************/
ul_TimerValue
=
ul_TimingInterval
-
2;
} /* if (b_PCIInputClock != APCI1710_GATE_INPUT) */
/****************************/
/* Save the PCI input clock */
/****************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_TorCounterModuleInfo.
b_PCIInputClock
=
b_PCIInputClock;
/************************/
/* Save the timing unit */
/************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_TorCounterModuleInfo.
s_TorCounterInfo
[b_TorCounter].
b_TimingUnit
=
b_TimingUnit;
/************************/
/* Save the base timing */
/************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_TorCounterModuleInfo.
s_TorCounterInfo
[b_TorCounter].
d_TimingInterval
=
d_RealTimingInterval;
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_TorCounterModuleInfo.
s_TorCounterInfo
[b_TorCounter].
ul_RealTimingInterval
=
ul_RealTimingInterval;
/*******************/
/* Get the command */
/*******************/
dw_Command
=
inl
(devpriv->
s_BoardInfos.
ui_Address
+
4
+
(16 * b_TorCounter) + (64 * b_ModulNbr));
dw_Command
=
(dw_Command
>>
4)
&
0xF;
/******************/
/* Test if 40 MHz */
/******************/
if (b_PCIInputClock == APCI1710_40MHZ) {
/****************************/
/* Set the 40 MHz selection */
/****************************/
dw_Command
=
dw_Command
|
0x10;
}
/*****************************/
/* Test if extern clock used */
/*****************************/
if (b_PCIInputClock == APCI1710_GATE_INPUT) {
/****************************/
/* Set the 40 MHz selection */
/****************************/
dw_Command
=
dw_Command
|
0x20;
}
/*************************/
/* Write the new command */
/*************************/
outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 4 + (16 * b_TorCounter) + (64 * b_ModulNbr));
/*******************/
/* Disable the tor */
/*******************/
outl(0, devpriv->s_BoardInfos.ui_Address + 8 + (16 * b_TorCounter) + (64 * b_ModulNbr));
/*************************/
/* Set the timer 1 value */
/*************************/
outl(ul_TimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (16 * b_TorCounter) + (64 * b_ModulNbr));
/*********************/
/* Tor counter init. */
/*********************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_TorCounterModuleInfo.
s_TorCounterInfo
[b_TorCounter].
b_TorCounterInit
=
1;
} else {
/***********************************************/
/* TOR version error for 40MHz clock selection */
/***********************************************/
DPRINTK("TOR version error for 40MHz clock selection\n");
i_ReturnValue
=
-9;
}
} else {
/**************************************************************/
/* You can not used the 40MHz clock selection wich this board */
/**************************************************************/
DPRINTK("You can not used the 40MHz clock selection wich this board\n");
i_ReturnValue =
-8;
}
} else {
/**********************************/
/* Base timing selection is wrong */
/**********************************/
DPRINTK("Base timing selection is wrong\n");
i_ReturnValue = -7;
}
} /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
else {
/**********************************/
/* Timing unit selection is wrong */
/**********************************/
DPRINTK("Timing unit selection is wrong\n");
i_ReturnValue = -6;
} /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
} /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
else {
/*****************************************/
/* The selected PCI input clock is wrong */
/*****************************************/
DPRINTK("The selected PCI input clock is wrong\n");
i_ReturnValue = -5;
} /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
} /* if (b_TorCounterMode >= 0 && b_TorCounterMode <= 7) */
else {
/**********************************/
/* Tor Counter selection is wrong */
/**********************************/
DPRINTK("Tor Counter selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_TorCounterMode >= 0 && b_TorCounterMode <= 7) */
} else {
/******************************************/
/* The module is not a tor counter module */
/******************************************/
DPRINTK("The module is not a tor counter module\n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error\n");
i_ReturnValue = -2;
}
data[0] = (unsigned int) ul_RealTimingInterval;
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_EnableTorCounter |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_TorCounter, |
| unsigned char_ b_InputMode, |
| unsigned char_ b_ExternGate, |
| unsigned char_ b_CycleMode, |
| unsigned char_ b_InterruptEnable) |
+----------------------------------------------------------------------------+
| Task : Enable the tor counter (b_TorCounter) from selected |
| module (b_ModulNbr). You must calling the |
| "i_APCI1710_InitTorCounter" function be for you call |
| this function. |
| If you enable the tor counter interrupt, the |
| tor counter generate a interrupt after the timing cycle|
| See function "i_APCI1710_SetBoardIntRoutineX" and the |
| Interrupt mask description chapter from this manual. |
| The b_CycleMode parameter determine if you will |
| measured a single or more cycle. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
| unsigned char_ b_TorCounter : Tor counter selection (0 or 1). |
| unsigned char_ b_InputMode : Input signal level selection |
| 0 : Tor count each low level |
| 1 : Tor count each high level|
| unsigned char_ b_ExternGate : Extern gate action selection |
| 0 : Extern gate signal not |
| used |
| 1 : Extern gate signal used. |
| If you selected the |
| single mode, each high |
| level signal start the |
| counter. |
| If you selected the |
| continuous mode, the |
| first high level signal |
| start the tor counter |
| |
| APCI1710_TOR_QUADRUPLE _MODE : |
| In the quadruple mode, the edge|
| analysis circuit generates a |
| counting pulse from each edge |
| of 2 signals which are phase |
| shifted in relation to each |
| other. |
| The gate input is used for the |
| signal B |
| |
| APCI1710_TOR_DOUBLE_MODE: |
| Functions in the same way as |
| the quadruple mode, except that|
| only two of the four edges are |
| analysed per period. |
| The gate input is used for the |
| signal B |
| |
| APCI1710_TOR_SIMPLE_MODE: |
| Functions in the same way as |
| the quadruple mode, except that|
| only one of the four edges is |
| analysed per period. |
| The gate input is used for the |
| signal B |
| |
| unsigned char_ b_CycleMode : Selected the tor counter |
| acquisition mode |
| unsigned char_ b_InterruptEnable : Enable or disable the |
| tor counter interrupt. |
| APCI1710_ENABLE: |
| Enable the tor counter |
| interrupt |
| APCI1710_DISABLE: |
| Disable the tor counter |
| interrupt |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a tor counter module |
| -4: Tor counter selection is wrong |
| -5: Tor counter not initialised see function |
| "i_APCI1710_InitTorCounter" |
| -6: Tor input signal selection is wrong |
| -7: Extern gate signal mode is wrong |
| -8: Tor counter acquisition mode cycle is wrong |
| -9: Interrupt parameter is wrong |
| -10:Interrupt function not initialised. |
| See function "i_APCI1710_SetBoardIntRoutineX" |
+----------------------------------------------------------------------------+
*/
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_DisableTorCounter |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_TorCounter) |
+----------------------------------------------------------------------------+
| Task : Disable the tor counter (b_TorCounter) from selected |
| module (b_ModulNbr). If you disable the tor counter |
| after a start cycle occur and you restart the tor |
| counter witch the " i_APCI1710_EnableTorCounter" |
| function, the status register is cleared |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
| unsigned char_ b_TorCounter : Tor counter selection (0 or 1). |
+----------------------------------------------------------------------------+
| Output Parameters : - |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a tor counter module |
| -4: Tor counter selection is wrong |
| -5: Tor counter not initialised see function |
| "i_APCI1710_InitTorCounter" |
| -6: Tor counter not enabled see function |
| "i_APCI1710_EnableTorCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnWriteEnableDisableTorCounter(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
{
int i_ReturnValue = 0;
unsigned int dw_Status;
unsigned int dw_DummyRead;
unsigned int dw_ConfigReg;
unsigned char b_ModulNbr, b_Action;
unsigned char b_TorCounter;
unsigned char b_InputMode;
unsigned char b_ExternGate;
unsigned char b_CycleMode;
unsigned char b_InterruptEnable;
b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
b_Action = (unsigned char) data[0]; /* enable or disable */
b_TorCounter = (unsigned char) data[1];
b_InputMode = (unsigned char) data[2];
b_ExternGate = (unsigned char) data[3];
b_CycleMode = (unsigned char) data[4];
b_InterruptEnable = (unsigned char) data[5];
i_ReturnValue = insn->n;
devpriv->tsk_Current = current; /* Save the current process task structure */
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/***********************/
/* Test if tor counter */
/***********************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
/**********************************/
/* Test the tor counter selection */
/**********************************/
if (b_TorCounter <= 1) {
switch (b_Action) /* Enable or Disable */
{
case APCI1710_ENABLE:
/***********************************/
/* Test if tor counter initialised */
/***********************************/
dw_Status =
inl(devpriv->s_BoardInfos.
ui_Address + 8 +
(16 * b_TorCounter) +
(64 * b_ModulNbr));
if (dw_Status & 0x10) {
/******************************/
/* Test the input signal mode */
/******************************/
if (b_InputMode == 0 ||
b_InputMode == 1 ||
b_InputMode ==
APCI1710_TOR_SIMPLE_MODE
|| b_InputMode ==
APCI1710_TOR_DOUBLE_MODE
|| b_InputMode ==
APCI1710_TOR_QUADRUPLE_MODE)
{
/************************************/
/* Test the extern gate signal mode */
/************************************/
if (b_ExternGate == 0
|| b_ExternGate
== 1
|| b_InputMode >
1) {
/*********************************/
/* Test the cycle mode parameter */
/*********************************/
if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) {
/***************************/
/* Test the interrupt flag */
/***************************/
if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) {
/***************************/
/* Save the interrupt mode */
/***************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_TorCounterModuleInfo.
s_TorCounterInfo
[b_TorCounter].
b_InterruptEnable
=
b_InterruptEnable;
/*******************/
/* Get the command */
/*******************/
dw_ConfigReg
=
inl
(devpriv->
s_BoardInfos.
ui_Address
+
4
+
(16 * b_TorCounter) + (64 * b_ModulNbr));
dw_ConfigReg
=
(dw_ConfigReg
>>
4)
&
0x30;
/********************************/
/* Test if not direct mode used */
/********************************/
if (b_InputMode > 1) {
/*******************************/
/* Extern gate can not be used */
/*******************************/
b_ExternGate
=
0;
/*******************************************/
/* Enable the extern gate for the Signal B */
/*******************************************/
dw_ConfigReg
=
dw_ConfigReg
|
0x40;
/***********************/
/* Test if simple mode */
/***********************/
if (b_InputMode == APCI1710_TOR_SIMPLE_MODE) {
/**************************/
/* Enable the sinple mode */
/**************************/
dw_ConfigReg
=
dw_ConfigReg
|
0x780;
} /* if (b_InputMode == APCI1710_TOR_SIMPLE_MODE) */
/***********************/
/* Test if double mode */
/***********************/
if (b_InputMode == APCI1710_TOR_DOUBLE_MODE) {
/**************************/
/* Enable the double mode */
/**************************/
dw_ConfigReg
=
dw_ConfigReg
|
0x180;
} /* if (b_InputMode == APCI1710_TOR_DOUBLE_MODE) */
b_InputMode
=
0;
} /* if (b_InputMode > 1) */
/*******************/
/* Set the command */
/*******************/
dw_ConfigReg
=
dw_ConfigReg
|
b_CycleMode
|
(b_InterruptEnable
*
2)
|
(b_InputMode
*
4)
|
(b_ExternGate
*
8);
/*****************************/
/* Clear the status register */
/*****************************/
dw_DummyRead
=
inl
(devpriv->
s_BoardInfos.
ui_Address
+
0
+
(16 * b_TorCounter) + (64 * b_ModulNbr));
/***************************************/
/* Clear the interrupt status register */
/***************************************/
dw_DummyRead
=
inl
(devpriv->
s_BoardInfos.
ui_Address
+
12
+
(16 * b_TorCounter) + (64 * b_ModulNbr));
/********************/
/* Set the commando */
/********************/
outl(dw_ConfigReg, devpriv->s_BoardInfos.ui_Address + 4 + (16 * b_TorCounter) + (64 * b_ModulNbr));
/****************/
/* Set the gate */
/****************/
outl(1, devpriv->s_BoardInfos.ui_Address + 8 + (16 * b_TorCounter) + (64 * b_ModulNbr));
} /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
else {
/********************************/
/* Interrupt parameter is wrong */
/********************************/
DPRINTK("Interrupt parameter is wrong\n");
i_ReturnValue
=
-9;
} /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
} /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
else {
/***********************************************/
/* Tor counter acquisition mode cycle is wrong */
/***********************************************/
DPRINTK("Tor counter acquisition mode cycle is wrong\n");
i_ReturnValue
=
-8;
} /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
} /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
else {
/***********************************/
/* Extern gate input mode is wrong */
/***********************************/
DPRINTK("Extern gate input mode is wrong\n");
i_ReturnValue =
-7;
} /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
} /* if (b_InputMode >= 0 && b_InputMode <= 1) */
else {
/***************************************/
/* Tor input signal selection is wrong */
/***************************************/
DPRINTK("Tor input signal selection is wrong\n");
i_ReturnValue = -6;
}
} else {
/*******************************/
/* Tor counter not initialised */
/*******************************/
DPRINTK("Tor counter not initialised\n");
i_ReturnValue = -5;
}
break;
case APCI1710_DISABLE:
/***********************************/
/* Test if tor counter initialised */
/***********************************/
dw_Status = inl(devpriv->s_BoardInfos.
ui_Address + 8 +
(16 * b_TorCounter) +
(64 * b_ModulNbr));
/*******************************/
/* Test if counter initialised */
/*******************************/
if (dw_Status & 0x10) {
/***************************/
/* Test if counter enabled */
/***************************/
if (dw_Status & 0x1) {
/****************************/
/* Clear the interrupt mode */
/****************************/
devpriv->
s_ModuleInfo
[b_ModulNbr].
s_TorCounterModuleInfo.
s_TorCounterInfo
[b_TorCounter].
b_InterruptEnable
=
APCI1710_DISABLE;
/******************/
/* Clear the gate */
/******************/
outl(0, devpriv->
s_BoardInfos.
ui_Address + 8 +
(16 * b_TorCounter) + (64 * b_ModulNbr));
} /* if (dw_Status & 0x1) */
else {
/***************************/
/* Tor counter not enabled */
/***************************/
DPRINTK("Tor counter not enabled \n");
i_ReturnValue = -6;
} /* if (dw_Status & 0x1) */
} /* if (dw_Status & 0x10) */
else {
/*******************************/
/* Tor counter not initialised */
/*******************************/
DPRINTK("Tor counter not initialised\n");
i_ReturnValue = -5;
} /* // if (dw_Status & 0x10) */
} /* switch */
} /* if (b_TorCounter <= 1) */
else {
/**********************************/
/* Tor counter selection is wrong */
/**********************************/
DPRINTK("Tor counter selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_TorCounter <= 1) */
} else {
/******************************************/
/* The module is not a tor counter module */
/******************************************/
DPRINTK("The module is not a tor counter module \n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error \n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_GetTorCounterInitialisation |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_TorCounter, |
| unsigned char *_ pb_TimingUnit, |
| PULONG_ pul_TimingInterval, |
| unsigned char *_ pb_InputMode, |
| unsigned char *_ pb_ExternGate, |
| unsigned char *_ pb_CycleMode, |
| unsigned char *_ pb_Enable, |
| unsigned char *_ pb_InterruptEnable)|
+----------------------------------------------------------------------------+
| Task : Enable the tor counter (b_TorCounter) from selected |
| module (b_ModulNbr). You must calling the |
| "i_APCI1710_InitTorCounter" function be for you call |
| this function. |
| If you enable the tor counter interrupt, the |
| tor counter generate a interrupt after the timing cycle|
| See function "i_APCI1710_SetBoardIntRoutineX" and the |
| Interrupt mask description chapter from this manual. |
| The b_CycleMode parameter determine if you will |
| measured a single or more cycle. |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
| unsigned char_ b_TorCounter : Tor counter selection (0 or 1)
b_ModulNbr = CR_AREF(insn->chanspec);
b_TorCounter = CR_CHAN(insn->chanspec);
. |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_TimingUnit : Base timing unit (0 to 4) |
| 0 : ns |
| 1 : µs |
| 2 : ms |
| 3 : s |
| 4 : mn |
| PULONG_ pul_TimingInterval : Base timing value. |
| unsigned char *_ pb_InputMode : Input signal level |
| selection |
| 0 : Tor count each low level |
| 1 : Tor count each high level|
| unsigned char *_ pb_ExternGate : Extern gate action |
| selection |
| 0 : Extern gate signal not |
| used |
| 1 : Extern gate signal used|
| unsigned char *_ pb_CycleMode : Tor counter acquisition |
| mode |
| unsigned char *_ pb_Enable : Indicate if the tor counter|
| is enabled or no |
| 0 : Tor counter disabled |
| 1 : Tor counter enabled |
| unsigned char *_ pb_InterruptEnable : Enable or disable the |
| tor counter interrupt. |
| APCI1710_ENABLE: |
| Enable the tor counter |
| interrupt |
| APCI1710_DISABLE: |
| Disable the tor counter |
| interrupt
pb_TimingUnit = (unsigned char *) &data[0];
pul_TimingInterval = (unsigned int *) &data[1];
pb_InputMode = (unsigned char *) &data[2];
pb_ExternGate = (unsigned char *) &data[3];
pb_CycleMode = (unsigned char *) &data[4];
pb_Enable = (unsigned char *) &data[5];
pb_InterruptEnable = (unsigned char *) &data[6];
|
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a tor counter module |
| -4: Tor counter selection is wrong |
| -5: Tor counter not initialised see function |
| "i_APCI1710_InitTorCounter" |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnReadGetTorCounterInitialisation(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
{
int i_ReturnValue = 0;
unsigned int dw_Status;
unsigned char b_ModulNbr;
unsigned char b_TorCounter;
unsigned char *pb_TimingUnit;
unsigned int *pul_TimingInterval;
unsigned char *pb_InputMode;
unsigned char *pb_ExternGate;
unsigned char *pb_CycleMode;
unsigned char *pb_Enable;
unsigned char *pb_InterruptEnable;
i_ReturnValue = insn->n;
b_ModulNbr = CR_AREF(insn->chanspec);
b_TorCounter = CR_CHAN(insn->chanspec);
pb_TimingUnit = (unsigned char *) &data[0];
pul_TimingInterval = (unsigned int *) &data[1];
pb_InputMode = (unsigned char *) &data[2];
pb_ExternGate = (unsigned char *) &data[3];
pb_CycleMode = (unsigned char *) &data[4];
pb_Enable = (unsigned char *) &data[5];
pb_InterruptEnable = (unsigned char *) &data[6];
/**************************/
/* Test the module number */
/**************************/
if (b_ModulNbr < 4) {
/***********************/
/* Test if tor counter */
/***********************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
/**********************************/
/* Test the tor counter selection */
/**********************************/
if (b_TorCounter <= 1) {
/***********************************/
/* Test if tor counter initialised */
/***********************************/
dw_Status = inl(devpriv->s_BoardInfos.
ui_Address + 8 + (16 * b_TorCounter) +
(64 * b_ModulNbr));
if (dw_Status & 0x10) {
*pb_Enable = dw_Status & 1;
/********************/
/* Get the commando */
/********************/
dw_Status = inl(devpriv->s_BoardInfos.
ui_Address + 4 +
(16 * b_TorCounter) +
(64 * b_ModulNbr));
*pb_CycleMode =
(unsigned char) ((dw_Status >> 4) & 1);
*pb_InterruptEnable =
(unsigned char) ((dw_Status >> 5) & 1);
/******************************************************/
/* Test if extern gate used for clock or for signal B */
/******************************************************/
if (dw_Status & 0x600) {
/*****************************************/
/* Test if extern gate used for signal B */
/*****************************************/
if (dw_Status & 0x400) {
/***********************/
/* Test if simple mode */
/***********************/
if ((dw_Status & 0x7800)
== 0x7800) {
*pb_InputMode =
APCI1710_TOR_SIMPLE_MODE;
}
/***********************/
/* Test if double mode */
/***********************/
if ((dw_Status & 0x7800)
== 0x1800) {
*pb_InputMode =
APCI1710_TOR_DOUBLE_MODE;
}
/**************************/
/* Test if quadruple mode */
/**************************/
if ((dw_Status & 0x7800)
== 0x0000) {
*pb_InputMode =
APCI1710_TOR_QUADRUPLE_MODE;
}
} /* if (dw_Status & 0x400) */
else {
*pb_InputMode = 1;
} /* // if (dw_Status & 0x400) */
/************************/
/* Extern gate not used */
/************************/
*pb_ExternGate = 0;
} /* if (dw_Status & 0x600) */
else {
*pb_InputMode =
(unsigned char) ((dw_Status >> 6)
& 1);
*pb_ExternGate =
(unsigned char) ((dw_Status >> 7)
& 1);
} /* if (dw_Status & 0x600) */
*pb_TimingUnit =
devpriv->
s_ModuleInfo[b_ModulNbr].
s_TorCounterModuleInfo.
s_TorCounterInfo[b_TorCounter].
b_TimingUnit;
*pul_TimingInterval =
devpriv->
s_ModuleInfo[b_ModulNbr].
s_TorCounterModuleInfo.
s_TorCounterInfo[b_TorCounter].
ul_RealTimingInterval;
} else {
/*******************************/
/* Tor counter not initialised */
/*******************************/
DPRINTK("Tor counter not initialised\n");
i_ReturnValue = -5;
}
} /* if (b_TorCounter <= 1) */
else {
/**********************************/
/* Tor counter selection is wrong */
/**********************************/
DPRINTK("Tor counter selection is wrong \n");
i_ReturnValue = -4;
} /* if (b_TorCounter <= 1) */
} else {
/******************************************/
/* The module is not a tor counter module */
/******************************************/
DPRINTK("The module is not a tor counter module\n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
/*
+----------------------------------------------------------------------------+
| Function Name : _INT_ i_APCI1710_ReadTorCounterValue |
| (unsigned char_ b_BoardHandle, |
| unsigned char_ b_ModulNbr, |
| unsigned char_ b_TorCounter, |
| unsigned int_ ui_TimeOut, |
| unsigned char *_ pb_TorCounterStatus, |
| PULONG_ pul_TorCounterValue) |
+----------------------------------------------------------------------------+
| Task case APCI1710_TOR_GETPROGRESSSTATUS: Return the tor counter
(b_TorCounter) status (pb_TorCounterStatus) from selected tor counter |
| module (b_ModulNbr).
case APCI1710_TOR_GETCOUNTERVALUE :
Return the tor counter (b_TorCounter) status |
| (pb_TorCounterStatus) and the timing value |
| (pul_TorCounterValue) after a conting cycle stop |
| from selected tor counter module (b_ModulNbr). |
+----------------------------------------------------------------------------+
| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
| unsigned char_ b_TorCounter : Tor counter selection (0 or 1).
b_ModulNbr = CR_AREF(insn->chanspec);
b_ReadType = (unsigned char) data[0];
b_TorCounter = (unsigned char) data[1];
ui_TimeOut = (unsigned int) data[2]; |
+----------------------------------------------------------------------------+
| Output Parameters : unsigned char *_ pb_TorCounterStatus : Return the tor counter |
| status. |
| 0 : Conting cycle not started|
| Software gate not set. |
| 1 : Conting cycle started. |
| Software gate set. |
| 2 : Conting cycle stopped. |
| The conting cycle is |
| terminate. |
| 3 : A overflow occur. You |
| must change the base |
| timing witch the |
| function |
| "i_APCI1710_InitTorCounter"|
| 4 : Timeeout occur |
| unsigned int * pul_TorCounterValue : Tor counter value.
pb_TorCounterStatus=(unsigned char *) &data[0];
pul_TorCounterValue=(unsigned int *) &data[1]; |
+----------------------------------------------------------------------------+
| Return Value : 0: No error |
| -1: The handle parameter of the board is wrong |
| -2: Module selection wrong |
| -3: The module is not a tor counter module |
| -4: Tor counter selection is wrong |
| -5: Tor counter not initialised see function |
| "i_APCI1710_InitTorCounter" |
| -6: Tor counter not enabled see function |
| "i_APCI1710_EnableTorCounter" |
| -7: Timeout parameter is wrong (0 to 65535) |
+----------------------------------------------------------------------------+
*/
int i_APCI1710_InsnBitsGetTorCounterProgressStatusAndValue(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data)
{
int i_ReturnValue = 0;
unsigned int dw_Status;
unsigned int dw_TimeOut = 0;
unsigned char b_ModulNbr;
unsigned char b_TorCounter;
unsigned char b_ReadType;
unsigned int ui_TimeOut;
unsigned char *pb_TorCounterStatus;
unsigned int *pul_TorCounterValue;
i_ReturnValue = insn->n;
b_ModulNbr = CR_AREF(insn->chanspec);
b_ReadType = (unsigned char) data[0];
b_TorCounter = (unsigned char) data[1];
ui_TimeOut = (unsigned int) data[2];
pb_TorCounterStatus = (unsigned char *) &data[0];
pul_TorCounterValue = (unsigned int *) &data[1];
/**************************/
/* Test the module number */
/**************************/
if (b_ReadType == APCI1710_TOR_READINTERRUPT) {
data[0] = devpriv->s_InterruptParameters.
s_FIFOInterruptParameters[devpriv->
s_InterruptParameters.ui_Read].b_OldModuleMask;
data[1] = devpriv->s_InterruptParameters.
s_FIFOInterruptParameters[devpriv->
s_InterruptParameters.ui_Read].ul_OldInterruptMask;
data[2] = devpriv->s_InterruptParameters.
s_FIFOInterruptParameters[devpriv->
s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
/**************************/
/* Increment the read FIFO */
/***************************/
devpriv->
s_InterruptParameters.
ui_Read = (devpriv->
s_InterruptParameters.
ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
return insn->n;
}
if (b_ModulNbr < 4) {
/***********************/
/* Test if tor counter */
/***********************/
if ((devpriv->s_BoardInfos.
dw_MolduleConfiguration[b_ModulNbr] &
0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
/**********************************/
/* Test the tor counter selection */
/**********************************/
if (b_TorCounter <= 1) {
/***********************************/
/* Test if tor counter initialised */
/***********************************/
dw_Status = inl(devpriv->s_BoardInfos.
ui_Address + 8 + (16 * b_TorCounter) +
(64 * b_ModulNbr));
/*******************************/
/* Test if counter initialised */
/*******************************/
if (dw_Status & 0x10) {
/***************************/
/* Test if counter enabled */
/***************************/
if (dw_Status & 0x1) {
switch (b_ReadType) {
case APCI1710_TOR_GETPROGRESSSTATUS:
/*******************/
/* Read the status */
/*******************/
dw_Status =
inl(devpriv->
s_BoardInfos.
ui_Address + 4 +
(16 * b_TorCounter) + (64 * b_ModulNbr));
dw_Status =
dw_Status & 0xF;
/*****************/
/* Test if start */
/*****************/
if (dw_Status & 1) {
if (dw_Status &
2) {
if (dw_Status & 4) {
/************************/
/* Tor counter overflow */
/************************/
*pb_TorCounterStatus
=
3;
} else {
/***********************/
/* Tor counter started */
/***********************/
*pb_TorCounterStatus
=
2;
}
} else {
/***********************/
/* Tor counter started */
/***********************/
*pb_TorCounterStatus
=
1;
}
} else {
/***************************/
/* Tor counter not started */
/***************************/
*pb_TorCounterStatus
= 0;
}
break;
case APCI1710_TOR_GETCOUNTERVALUE:
/*****************************/
/* Test the timout parameter */
/*****************************/
if ((ui_TimeOut >= 0)
&& (ui_TimeOut
<=
65535UL))
{
for (;;) {
/*******************/
/* Read the status */
/*******************/
dw_Status
=
inl
(devpriv->
s_BoardInfos.
ui_Address
+
4
+
(16 * b_TorCounter) + (64 * b_ModulNbr));
/********************/
/* Test if overflow */
/********************/
if ((dw_Status & 4) == 4) {
/******************/
/* Overflow occur */
/******************/
*pb_TorCounterStatus
=
3;
/******************/
/* Read the value */
/******************/
*pul_TorCounterValue
=
inl
(devpriv->
s_BoardInfos.
ui_Address
+
0
+
(16 * b_TorCounter) + (64 * b_ModulNbr));
break;
} /* if ((dw_Status & 4) == 4) */
else {
/*******************************/
/* Test if measurement stopped */
/*******************************/
if ((dw_Status & 2) == 2) {
/***********************/
/* A stop signal occur */
/***********************/
*pb_TorCounterStatus
=
2;
/******************/
/* Read the value */
/******************/
*pul_TorCounterValue
=
inl
(devpriv->
s_BoardInfos.
ui_Address
+
0
+
(16 * b_TorCounter) + (64 * b_ModulNbr));
break;
} /* if ((dw_Status & 2) == 2) */
else {
/*******************************/
/* Test if measurement started */
/*******************************/
if ((dw_Status & 1) == 1) {
/************************/
/* A start signal occur */
/************************/
*pb_TorCounterStatus
=
1;
} /* if ((dw_Status & 1) == 1) */
else {
/***************************/
/* Measurement not started */
/***************************/
*pb_TorCounterStatus
=
0;
} /* if ((dw_Status & 1) == 1) */
} /* if ((dw_Status & 2) == 2) */
} /* if ((dw_Status & 8) == 8) */
if (dw_TimeOut == ui_TimeOut) {
/*****************/
/* Timeout occur */
/*****************/
break;
} else {
/*************************/
/* Increment the timeout */
/*************************/
dw_TimeOut
=
dw_TimeOut
+
1;
mdelay(1000);
}
} /* for (;;) */
/*************************/
/* Test if timeout occur */
/*************************/
if ((*pb_TorCounterStatus != 3) && (dw_TimeOut == ui_TimeOut) && (ui_TimeOut != 0)) {
/*****************/
/* Timeout occur */
/*****************/
*pb_TorCounterStatus
=
4;
}
} else {
/******************************/
/* Timeout parameter is wrong */
/******************************/
DPRINTK("Timeout parameter is wrong\n");
i_ReturnValue =
-7;
}
break;
default:
printk("Inputs wrong\n");
} /* switch end */
} /* if (dw_Status & 0x1) */
else {
/***************************/
/* Tor counter not enabled */
/***************************/
DPRINTK("Tor counter not enabled\n");
i_ReturnValue = -6;
} /* if (dw_Status & 0x1) */
} else {
/*******************************/
/* Tor counter not initialised */
/*******************************/
DPRINTK("Tor counter not initialised\n");
i_ReturnValue = -5;
}
} /* if (b_TorCounter <= 1) */
else {
/**********************************/
/* Tor counter selection is wrong */
/**********************************/
DPRINTK("Tor counter selection is wrong\n");
i_ReturnValue = -4;
} /* if (b_TorCounter <= 1) */
} else {
/******************************************/
/* The module is not a tor counter module */
/******************************************/
DPRINTK("The module is not a tor counter module\n");
i_ReturnValue = -3;
}
} else {
/***********************/
/* Module number error */
/***********************/
DPRINTK("Module number error\n");
i_ReturnValue = -2;
}
return i_ReturnValue;
}
| gpl-2.0 |
ezterry/kernel-biff-testing | arch/arm/mach-bcmring/csp/chipc/chipcHw_reset.c | 9553 | 5948 | /*****************************************************************************
* Copyright 2003 - 2008 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
/* ---- Include Files ---------------------------------------------------- */
#include <csp/stdint.h>
#include <mach/csp/chipcHw_def.h>
#include <mach/csp/chipcHw_inline.h>
#include <csp/intcHw.h>
#include <csp/cache.h>
/* ---- Private Constants and Types --------------------------------------- */
/* ---- Private Variables ------------------------------------------------- */
void chipcHw_reset_run_from_aram(void);
typedef void (*RUNFUNC) (void);
/****************************************************************************/
/**
* @brief warmReset
*
* @note warmReset configures the clocks which are not reset back to the state
* required to execute on reset. To do so we need to copy the code into internal
* memory to change the ARM clock while we are not executing from DDR.
*/
/****************************************************************************/
void chipcHw_reset(uint32_t mask)
{
int i = 0;
RUNFUNC runFunc = (RUNFUNC) (unsigned long)MM_ADDR_IO_ARAM;
/* Disable all interrupts */
intcHw_irq_disable(INTCHW_INTC0, 0xffffffff);
intcHw_irq_disable(INTCHW_INTC1, 0xffffffff);
intcHw_irq_disable(INTCHW_SINTC, 0xffffffff);
{
REG_LOCAL_IRQ_SAVE;
if (mask & chipcHw_REG_SOFT_RESET_CHIP_SOFT) {
chipcHw_softReset(chipcHw_REG_SOFT_RESET_CHIP_SOFT);
}
/* Bypass the PLL clocks before reboot */
pChipcHw->UARTClock |= chipcHw_REG_PLL_CLOCK_BYPASS_SELECT;
pChipcHw->SPIClock |= chipcHw_REG_PLL_CLOCK_BYPASS_SELECT;
/* Copy the chipcHw_warmReset_run_from_aram function into ARAM */
do {
((uint32_t *) MM_IO_BASE_ARAM)[i] =
((uint32_t *) &chipcHw_reset_run_from_aram)[i];
i++;
} while (((uint32_t *) MM_IO_BASE_ARAM)[i - 1] != 0xe1a0f00f); /* 0xe1a0f00f == asm ("mov r15, r15"); */
CSP_CACHE_FLUSH_ALL;
/* run the function from ARAM */
runFunc();
/* Code will never get here, but include it to balance REG_LOCAL_IRQ_SAVE above */
REG_LOCAL_IRQ_RESTORE;
}
}
/* This function must run from internal memory */
void chipcHw_reset_run_from_aram(void)
{
/* Make sure, pipeline is filled with instructions coming from ARAM */
__asm (" nop \n\t"
" nop \n\t"
#if defined(__KERNEL__) && !defined(STANDALONE)
" MRC p15,#0x0,r0,c1,c0,#0 \n\t"
" BIC r0,r0,#0xd \n\t"
" MCR p15,#0x0,r0,c1,c0,#0 \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
#endif
" nop \n\t"
" nop \n\t"
/* Bypass the ARM clock and switch to XTAL clock */
" MOV r2,#0x80000000 \n\t"
" LDR r3,[r2,#8] \n\t"
" ORR r3,r3,#0x20000 \n\t"
" STR r3,[r2,#8] \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
" nop \n\t"
/* Issue reset */
" MOV r3,#0x2 \n\t"
" STR r3,[r2,#0x80] \n\t"
/* End here */
" MOV pc,pc \n\t");
/* 0xe1a0f00f == asm ("mov r15, r15"); */
}
| gpl-2.0 |
mostafa-z/GABRIEL_LP | drivers/char/ipmi/ipmi_poweroff.c | 10321 | 19855 | /*
* ipmi_poweroff.c
*
* MontaVista IPMI Poweroff extension to sys_reboot
*
* Author: MontaVista Software, Inc.
* Steven Dake <sdake@mvista.com>
* Corey Minyard <cminyard@mvista.com>
* source@mvista.com
*
* Copyright 2002,2004 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/completion.h>
#include <linux/pm.h>
#include <linux/kdev_t.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#define PFX "IPMI poweroff: "
static void ipmi_po_smi_gone(int if_num);
static void ipmi_po_new_smi(int if_num, struct device *device);
/* Definitions for controlling power off (if the system supports it). It
* conveniently matches the IPMI chassis control values. */
#define IPMI_CHASSIS_POWER_DOWN 0 /* power down, the default. */
#define IPMI_CHASSIS_POWER_CYCLE 0x02 /* power cycle */
/* the IPMI data command */
static int poweroff_powercycle;
/* Which interface to use, -1 means the first we see. */
static int ifnum_to_use = -1;
/* Our local state. */
static int ready;
static ipmi_user_t ipmi_user;
static int ipmi_ifnum;
static void (*specific_poweroff_func)(ipmi_user_t user);
/* Holds the old poweroff function so we can restore it on removal. */
static void (*old_poweroff_func)(void);
static int set_param_ifnum(const char *val, struct kernel_param *kp)
{
int rv = param_set_int(val, kp);
if (rv)
return rv;
if ((ifnum_to_use < 0) || (ifnum_to_use == ipmi_ifnum))
return 0;
ipmi_po_smi_gone(ipmi_ifnum);
ipmi_po_new_smi(ifnum_to_use, NULL);
return 0;
}
module_param_call(ifnum_to_use, set_param_ifnum, param_get_int,
&ifnum_to_use, 0644);
MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
"timer. Setting to -1 defaults to the first registered "
"interface");
/* parameter definition to allow user to flag power cycle */
module_param(poweroff_powercycle, int, 0644);
MODULE_PARM_DESC(poweroff_powercycle,
" Set to non-zero to enable power cycle instead of power"
" down. Power cycle is contingent on hardware support,"
" otherwise it defaults back to power down.");
/* Stuff from the get device id command. */
static unsigned int mfg_id;
static unsigned int prod_id;
static unsigned char capabilities;
static unsigned char ipmi_version;
/*
* We use our own messages for this operation, we don't let the system
* allocate them, since we may be in a panic situation. The whole
* thing is single-threaded, anyway, so multiple messages are not
* required.
*/
static atomic_t dummy_count = ATOMIC_INIT(0);
static void dummy_smi_free(struct ipmi_smi_msg *msg)
{
atomic_dec(&dummy_count);
}
static void dummy_recv_free(struct ipmi_recv_msg *msg)
{
atomic_dec(&dummy_count);
}
static struct ipmi_smi_msg halt_smi_msg = {
.done = dummy_smi_free
};
static struct ipmi_recv_msg halt_recv_msg = {
.done = dummy_recv_free
};
/*
* Code to send a message and wait for the response.
*/
static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
{
struct completion *comp = recv_msg->user_msg_data;
if (comp)
complete(comp);
}
static struct ipmi_user_hndl ipmi_poweroff_handler = {
.ipmi_recv_hndl = receive_handler
};
static int ipmi_request_wait_for_response(ipmi_user_t user,
struct ipmi_addr *addr,
struct kernel_ipmi_msg *send_msg)
{
int rv;
struct completion comp;
init_completion(&comp);
rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, &comp,
&halt_smi_msg, &halt_recv_msg, 0);
if (rv)
return rv;
wait_for_completion(&comp);
return halt_recv_msg.msg.data[0];
}
/* Wait for message to complete, spinning. */
static int ipmi_request_in_rc_mode(ipmi_user_t user,
struct ipmi_addr *addr,
struct kernel_ipmi_msg *send_msg)
{
int rv;
atomic_set(&dummy_count, 2);
rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
&halt_smi_msg, &halt_recv_msg, 0);
if (rv) {
atomic_set(&dummy_count, 0);
return rv;
}
/*
* Spin until our message is done.
*/
while (atomic_read(&dummy_count) > 0) {
ipmi_poll_interface(user);
cpu_relax();
}
return halt_recv_msg.msg.data[0];
}
/*
* ATCA Support
*/
#define IPMI_NETFN_ATCA 0x2c
#define IPMI_ATCA_SET_POWER_CMD 0x11
#define IPMI_ATCA_GET_ADDR_INFO_CMD 0x01
#define IPMI_PICMG_ID 0
#define IPMI_NETFN_OEM 0x2e
#define IPMI_ATCA_PPS_GRACEFUL_RESTART 0x11
#define IPMI_ATCA_PPS_IANA "\x00\x40\x0A"
#define IPMI_MOTOROLA_MANUFACTURER_ID 0x0000A1
#define IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID 0x0051
static void (*atca_oem_poweroff_hook)(ipmi_user_t user);
static void pps_poweroff_atca(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
printk(KERN_INFO PFX "PPS powerdown hook used");
send_msg.netfn = IPMI_NETFN_OEM;
send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
send_msg.data = IPMI_ATCA_PPS_IANA;
send_msg.data_len = 3;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
printk(KERN_ERR PFX "Unable to send ATCA ,"
" IPMI error 0x%x\n", rv);
}
return;
}
static int ipmi_atca_detect(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[1];
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
/*
* Use get address info to check and see if we are ATCA
*/
send_msg.netfn = IPMI_NETFN_ATCA;
send_msg.cmd = IPMI_ATCA_GET_ADDR_INFO_CMD;
data[0] = IPMI_PICMG_ID;
send_msg.data = data;
send_msg.data_len = sizeof(data);
rv = ipmi_request_wait_for_response(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
mfg_id, prod_id);
if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
&& (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
printk(KERN_INFO PFX
"Installing Pigeon Point Systems Poweroff Hook\n");
atca_oem_poweroff_hook = pps_poweroff_atca;
}
return !rv;
}
static void ipmi_poweroff_atca(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[4];
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
printk(KERN_INFO PFX "Powering down via ATCA power command\n");
/*
* Power down
*/
send_msg.netfn = IPMI_NETFN_ATCA;
send_msg.cmd = IPMI_ATCA_SET_POWER_CMD;
data[0] = IPMI_PICMG_ID;
data[1] = 0; /* FRU id */
data[2] = 0; /* Power Level */
data[3] = 0; /* Don't change saved presets */
send_msg.data = data;
send_msg.data_len = sizeof(data);
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
/*
* At this point, the system may be shutting down, and most
* serial drivers (if used) will have interrupts turned off
* it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
* return code
*/
if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
" IPMI error 0x%x\n", rv);
goto out;
}
if (atca_oem_poweroff_hook)
atca_oem_poweroff_hook(user);
out:
return;
}
/*
* CPI1 Support
*/
#define IPMI_NETFN_OEM_1 0xf8
#define OEM_GRP_CMD_SET_RESET_STATE 0x84
#define OEM_GRP_CMD_SET_POWER_STATE 0x82
#define IPMI_NETFN_OEM_8 0xf8
#define OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL 0x80
#define OEM_GRP_CMD_GET_SLOT_GA 0xa3
#define IPMI_NETFN_SENSOR_EVT 0x10
#define IPMI_CMD_GET_EVENT_RECEIVER 0x01
#define IPMI_CPI1_PRODUCT_ID 0x000157
#define IPMI_CPI1_MANUFACTURER_ID 0x0108
static int ipmi_cpi1_detect(ipmi_user_t user)
{
return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
&& (prod_id == IPMI_CPI1_PRODUCT_ID));
}
static void ipmi_poweroff_cpi1(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct ipmi_ipmb_addr ipmb_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[1];
int slot;
unsigned char hotswap_ipmb;
unsigned char aer_addr;
unsigned char aer_lun;
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
/*
* Get IPMI ipmb address
*/
send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
send_msg.cmd = OEM_GRP_CMD_GET_SLOT_GA;
send_msg.data = NULL;
send_msg.data_len = 0;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv)
goto out;
slot = halt_recv_msg.msg.data[1];
hotswap_ipmb = (slot > 9) ? (0xb0 + 2 * slot) : (0xae + 2 * slot);
/*
* Get active event receiver
*/
send_msg.netfn = IPMI_NETFN_SENSOR_EVT >> 2;
send_msg.cmd = IPMI_CMD_GET_EVENT_RECEIVER;
send_msg.data = NULL;
send_msg.data_len = 0;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv)
goto out;
aer_addr = halt_recv_msg.msg.data[1];
aer_lun = halt_recv_msg.msg.data[2];
/*
* Setup IPMB address target instead of local target
*/
ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
ipmb_addr.channel = 0;
ipmb_addr.slave_addr = aer_addr;
ipmb_addr.lun = aer_lun;
/*
* Send request hotswap control to remove blade from dpv
*/
send_msg.netfn = IPMI_NETFN_OEM_8 >> 2;
send_msg.cmd = OEM_GRP_CMD_REQUEST_HOTSWAP_CTRL;
send_msg.data = &hotswap_ipmb;
send_msg.data_len = 1;
ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &ipmb_addr,
&send_msg);
/*
* Set reset asserted
*/
send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
send_msg.cmd = OEM_GRP_CMD_SET_RESET_STATE;
send_msg.data = data;
data[0] = 1; /* Reset asserted state */
send_msg.data_len = 1;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv)
goto out;
/*
* Power down
*/
send_msg.netfn = IPMI_NETFN_OEM_1 >> 2;
send_msg.cmd = OEM_GRP_CMD_SET_POWER_STATE;
send_msg.data = data;
data[0] = 1; /* Power down state */
send_msg.data_len = 1;
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv)
goto out;
out:
return;
}
/*
* ipmi_dell_chassis_detect()
* Dell systems with IPMI < 1.5 don't set the chassis capability bit
* but they can handle a chassis poweroff or powercycle command.
*/
#define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
static int ipmi_dell_chassis_detect(ipmi_user_t user)
{
const char ipmi_version_major = ipmi_version & 0xF;
const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
const char mfr[3] = DELL_IANA_MFR_ID;
if (!memcmp(mfr, &mfg_id, sizeof(mfr)) &&
ipmi_version_major <= 1 &&
ipmi_version_minor < 5)
return 1;
return 0;
}
/*
* Standard chassis support
*/
#define IPMI_NETFN_CHASSIS_REQUEST 0
#define IPMI_CHASSIS_CONTROL_CMD 0x02
static int ipmi_chassis_detect(ipmi_user_t user)
{
/* Chassis support, use it. */
return (capabilities & 0x80);
}
static void ipmi_poweroff_chassis(ipmi_user_t user)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
unsigned char data[1];
/*
* Configure IPMI address for local access
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
powercyclefailed:
printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
(poweroff_powercycle ? "cycle" : "down"));
/*
* Power down
*/
send_msg.netfn = IPMI_NETFN_CHASSIS_REQUEST;
send_msg.cmd = IPMI_CHASSIS_CONTROL_CMD;
if (poweroff_powercycle)
data[0] = IPMI_CHASSIS_POWER_CYCLE;
else
data[0] = IPMI_CHASSIS_POWER_DOWN;
send_msg.data = data;
send_msg.data_len = sizeof(data);
rv = ipmi_request_in_rc_mode(user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv) {
if (poweroff_powercycle) {
/* power cycle failed, default to power down */
printk(KERN_ERR PFX "Unable to send chassis power " \
"cycle message, IPMI error 0x%x\n", rv);
poweroff_powercycle = 0;
goto powercyclefailed;
}
printk(KERN_ERR PFX "Unable to send chassis power " \
"down message, IPMI error 0x%x\n", rv);
}
}
/* Table of possible power off functions. */
struct poweroff_function {
char *platform_type;
int (*detect)(ipmi_user_t user);
void (*poweroff_func)(ipmi_user_t user);
};
static struct poweroff_function poweroff_functions[] = {
{ .platform_type = "ATCA",
.detect = ipmi_atca_detect,
.poweroff_func = ipmi_poweroff_atca },
{ .platform_type = "CPI1",
.detect = ipmi_cpi1_detect,
.poweroff_func = ipmi_poweroff_cpi1 },
{ .platform_type = "chassis",
.detect = ipmi_dell_chassis_detect,
.poweroff_func = ipmi_poweroff_chassis },
/* Chassis should generally be last, other things should override
it. */
{ .platform_type = "chassis",
.detect = ipmi_chassis_detect,
.poweroff_func = ipmi_poweroff_chassis },
};
#define NUM_PO_FUNCS (sizeof(poweroff_functions) \
/ sizeof(struct poweroff_function))
/* Called on a powerdown request. */
static void ipmi_poweroff_function(void)
{
if (!ready)
return;
/* Use run-to-completion mode, since interrupts may be off. */
specific_poweroff_func(ipmi_user);
}
/* Wait for an IPMI interface to be installed, the first one installed
will be grabbed by this code and used to perform the powerdown. */
static void ipmi_po_new_smi(int if_num, struct device *device)
{
struct ipmi_system_interface_addr smi_addr;
struct kernel_ipmi_msg send_msg;
int rv;
int i;
if (ready)
return;
if ((ifnum_to_use >= 0) && (ifnum_to_use != if_num))
return;
rv = ipmi_create_user(if_num, &ipmi_poweroff_handler, NULL,
&ipmi_user);
if (rv) {
printk(KERN_ERR PFX "could not create IPMI user, error %d\n",
rv);
return;
}
ipmi_ifnum = if_num;
/*
* Do a get device ide and store some results, since this is
* used by several functions.
*/
smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
smi_addr.channel = IPMI_BMC_CHANNEL;
smi_addr.lun = 0;
send_msg.netfn = IPMI_NETFN_APP_REQUEST;
send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
send_msg.data = NULL;
send_msg.data_len = 0;
rv = ipmi_request_wait_for_response(ipmi_user,
(struct ipmi_addr *) &smi_addr,
&send_msg);
if (rv) {
printk(KERN_ERR PFX "Unable to send IPMI get device id info,"
" IPMI error 0x%x\n", rv);
goto out_err;
}
if (halt_recv_msg.msg.data_len < 12) {
printk(KERN_ERR PFX "(chassis) IPMI get device id info too,"
" short, was %d bytes, needed %d bytes\n",
halt_recv_msg.msg.data_len, 12);
goto out_err;
}
mfg_id = (halt_recv_msg.msg.data[7]
| (halt_recv_msg.msg.data[8] << 8)
| (halt_recv_msg.msg.data[9] << 16));
prod_id = (halt_recv_msg.msg.data[10]
| (halt_recv_msg.msg.data[11] << 8));
capabilities = halt_recv_msg.msg.data[6];
ipmi_version = halt_recv_msg.msg.data[5];
/* Scan for a poweroff method */
for (i = 0; i < NUM_PO_FUNCS; i++) {
if (poweroff_functions[i].detect(ipmi_user))
goto found;
}
out_err:
printk(KERN_ERR PFX "Unable to find a poweroff function that"
" will work, giving up\n");
ipmi_destroy_user(ipmi_user);
return;
found:
printk(KERN_INFO PFX "Found a %s style poweroff function\n",
poweroff_functions[i].platform_type);
specific_poweroff_func = poweroff_functions[i].poweroff_func;
old_poweroff_func = pm_power_off;
pm_power_off = ipmi_poweroff_function;
ready = 1;
}
static void ipmi_po_smi_gone(int if_num)
{
if (!ready)
return;
if (ipmi_ifnum != if_num)
return;
ready = 0;
ipmi_destroy_user(ipmi_user);
pm_power_off = old_poweroff_func;
}
static struct ipmi_smi_watcher smi_watcher = {
.owner = THIS_MODULE,
.new_smi = ipmi_po_new_smi,
.smi_gone = ipmi_po_smi_gone
};
#ifdef CONFIG_PROC_FS
#include <linux/sysctl.h>
static ctl_table ipmi_table[] = {
{ .procname = "poweroff_powercycle",
.data = &poweroff_powercycle,
.maxlen = sizeof(poweroff_powercycle),
.mode = 0644,
.proc_handler = proc_dointvec },
{ }
};
static ctl_table ipmi_dir_table[] = {
{ .procname = "ipmi",
.mode = 0555,
.child = ipmi_table },
{ }
};
static ctl_table ipmi_root_table[] = {
{ .procname = "dev",
.mode = 0555,
.child = ipmi_dir_table },
{ }
};
static struct ctl_table_header *ipmi_table_header;
#endif /* CONFIG_PROC_FS */
/*
* Startup and shutdown functions.
*/
static int __init ipmi_poweroff_init(void)
{
int rv;
printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
" IPMI Powerdown via sys_reboot.\n");
if (poweroff_powercycle)
printk(KERN_INFO PFX "Power cycle is enabled.\n");
#ifdef CONFIG_PROC_FS
ipmi_table_header = register_sysctl_table(ipmi_root_table);
if (!ipmi_table_header) {
printk(KERN_ERR PFX "Unable to register powercycle sysctl\n");
rv = -ENOMEM;
goto out_err;
}
#endif
rv = ipmi_smi_watcher_register(&smi_watcher);
#ifdef CONFIG_PROC_FS
if (rv) {
unregister_sysctl_table(ipmi_table_header);
printk(KERN_ERR PFX "Unable to register SMI watcher: %d\n", rv);
goto out_err;
}
out_err:
#endif
return rv;
}
#ifdef MODULE
static void __exit ipmi_poweroff_cleanup(void)
{
int rv;
#ifdef CONFIG_PROC_FS
unregister_sysctl_table(ipmi_table_header);
#endif
ipmi_smi_watcher_unregister(&smi_watcher);
if (ready) {
rv = ipmi_destroy_user(ipmi_user);
if (rv)
printk(KERN_ERR PFX "could not cleanup the IPMI"
" user: 0x%x\n", rv);
pm_power_off = old_poweroff_func;
}
}
module_exit(ipmi_poweroff_cleanup);
#endif
module_init(ipmi_poweroff_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("IPMI Poweroff extension to sys_reboot");
| gpl-2.0 |
kamiyo/dolphin | Externals/wxWidgets3/src/gtk/clrpicker.cpp | 82 | 3145 | /////////////////////////////////////////////////////////////////////////////
// Name: src/gtk/clrpicker.cpp
// Purpose: implementation of wxColourButton
// Author: Francesco Montorsi
// Modified By:
// Created: 15/04/2006
// Copyright: (c) Francesco Montorsi
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// ----------------------------------------------------------------------------
// headers
// ----------------------------------------------------------------------------
// For compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#if wxUSE_COLOURPICKERCTRL
#include "wx/clrpicker.h"
#include <gtk/gtk.h>
// ============================================================================
// implementation
// ============================================================================
//-----------------------------------------------------------------------------
// "color-set"
//-----------------------------------------------------------------------------
extern "C" {
static void gtk_clrbutton_setcolor_callback(GtkColorButton *widget,
wxColourButton *p)
{
// update the m_colour member of the wxColourButton
wxASSERT(p);
#ifdef __WXGTK3__
GdkRGBA gdkColor;
gtk_color_button_get_rgba(widget, &gdkColor);
#else
GdkColor gdkColor;
gtk_color_button_get_color(widget, &gdkColor);
#endif
p->GTKSetColour(gdkColor);
// fire the colour-changed event
wxColourPickerEvent event(p, p->GetId(), p->GetColour());
p->HandleWindowEvent(event);
}
}
//-----------------------------------------------------------------------------
// wxColourButton
//-----------------------------------------------------------------------------
IMPLEMENT_DYNAMIC_CLASS(wxColourButton, wxButton)
bool wxColourButton::Create( wxWindow *parent, wxWindowID id,
const wxColour &col,
const wxPoint &pos, const wxSize &size,
long style, const wxValidator& validator,
const wxString &name )
{
if (!PreCreation( parent, pos, size ) ||
!wxControl::CreateBase(parent, id, pos, size, style, validator, name))
{
wxFAIL_MSG( wxT("wxColourButton creation failed") );
return false;
}
m_colour = col;
#ifdef __WXGTK3__
m_widget = gtk_color_button_new_with_rgba(m_colour);
#else
m_widget = gtk_color_button_new_with_color( m_colour.GetColor() );
#endif
g_object_ref(m_widget);
// GtkColourButton signals
g_signal_connect(m_widget, "color-set",
G_CALLBACK(gtk_clrbutton_setcolor_callback), this);
m_parent->DoAddChild( this );
PostCreation(size);
SetInitialSize(size);
return true;
}
wxColourButton::~wxColourButton()
{
}
void wxColourButton::UpdateColour()
{
#ifdef __WXGTK3__
gtk_color_button_set_rgba(GTK_COLOR_BUTTON(m_widget), m_colour);
#else
gtk_color_button_set_color(GTK_COLOR_BUTTON(m_widget), m_colour.GetColor());
#endif
}
#endif // wxUSE_COLOURPICKERCTRL
| gpl-2.0 |
cocasse/linux-h3 | arch/m68k/coldfire/m53xx.c | 82 | 15453 | /***************************************************************************/
/*
* m53xx.c -- platform support for ColdFire 53xx based boards
*
* Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
* Copyright (C) 2000, Lineo (www.lineo.com)
* Yaroslav Vinogradov yaroslav.vinogradov@freescale.com
* Copyright Freescale Semiconductor, Inc 2006
* Copyright (c) 2006, emlix, Sebastian Hess <shess@hessware.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/***************************************************************************/
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/mcfdma.h>
#include <asm/mcfwdebug.h>
#include <asm/mcfclk.h>
/***************************************************************************/
DEFINE_CLK(0, "flexbus", 2, MCF_CLK);
DEFINE_CLK(0, "mcfcan.0", 8, MCF_CLK);
DEFINE_CLK(0, "fec.0", 12, MCF_CLK);
DEFINE_CLK(0, "edma", 17, MCF_CLK);
DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
DEFINE_CLK(0, "intc.1", 19, MCF_CLK);
DEFINE_CLK(0, "iack.0", 21, MCF_CLK);
DEFINE_CLK(0, "mcfi2c.0", 22, MCF_CLK);
DEFINE_CLK(0, "mcfqspi.0", 23, MCF_CLK);
DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
DEFINE_CLK(0, "mcfuart.2", 26, MCF_BUSCLK);
DEFINE_CLK(0, "mcftmr.0", 28, MCF_CLK);
DEFINE_CLK(0, "mcftmr.1", 29, MCF_CLK);
DEFINE_CLK(0, "mcftmr.2", 30, MCF_CLK);
DEFINE_CLK(0, "mcftmr.3", 31, MCF_CLK);
DEFINE_CLK(0, "mcfpit.0", 32, MCF_CLK);
DEFINE_CLK(0, "mcfpit.1", 33, MCF_CLK);
DEFINE_CLK(0, "mcfpit.2", 34, MCF_CLK);
DEFINE_CLK(0, "mcfpit.3", 35, MCF_CLK);
DEFINE_CLK(0, "mcfpwm.0", 36, MCF_CLK);
DEFINE_CLK(0, "mcfeport.0", 37, MCF_CLK);
DEFINE_CLK(0, "mcfwdt.0", 38, MCF_CLK);
DEFINE_CLK(0, "sys.0", 40, MCF_BUSCLK);
DEFINE_CLK(0, "gpio.0", 41, MCF_BUSCLK);
DEFINE_CLK(0, "mcfrtc.0", 42, MCF_CLK);
DEFINE_CLK(0, "mcflcd.0", 43, MCF_CLK);
DEFINE_CLK(0, "mcfusb-otg.0", 44, MCF_CLK);
DEFINE_CLK(0, "mcfusb-host.0", 45, MCF_CLK);
DEFINE_CLK(0, "sdram.0", 46, MCF_CLK);
DEFINE_CLK(0, "ssi.0", 47, MCF_CLK);
DEFINE_CLK(0, "pll.0", 48, MCF_CLK);
DEFINE_CLK(1, "mdha.0", 32, MCF_CLK);
DEFINE_CLK(1, "skha.0", 33, MCF_CLK);
DEFINE_CLK(1, "rng.0", 34, MCF_CLK);
struct clk *mcf_clks[] = {
&__clk_0_2, /* flexbus */
&__clk_0_8, /* mcfcan.0 */
&__clk_0_12, /* fec.0 */
&__clk_0_17, /* edma */
&__clk_0_18, /* intc.0 */
&__clk_0_19, /* intc.1 */
&__clk_0_21, /* iack.0 */
&__clk_0_22, /* mcfi2c.0 */
&__clk_0_23, /* mcfqspi.0 */
&__clk_0_24, /* mcfuart.0 */
&__clk_0_25, /* mcfuart.1 */
&__clk_0_26, /* mcfuart.2 */
&__clk_0_28, /* mcftmr.0 */
&__clk_0_29, /* mcftmr.1 */
&__clk_0_30, /* mcftmr.2 */
&__clk_0_31, /* mcftmr.3 */
&__clk_0_32, /* mcfpit.0 */
&__clk_0_33, /* mcfpit.1 */
&__clk_0_34, /* mcfpit.2 */
&__clk_0_35, /* mcfpit.3 */
&__clk_0_36, /* mcfpwm.0 */
&__clk_0_37, /* mcfeport.0 */
&__clk_0_38, /* mcfwdt.0 */
&__clk_0_40, /* sys.0 */
&__clk_0_41, /* gpio.0 */
&__clk_0_42, /* mcfrtc.0 */
&__clk_0_43, /* mcflcd.0 */
&__clk_0_44, /* mcfusb-otg.0 */
&__clk_0_45, /* mcfusb-host.0 */
&__clk_0_46, /* sdram.0 */
&__clk_0_47, /* ssi.0 */
&__clk_0_48, /* pll.0 */
&__clk_1_32, /* mdha.0 */
&__clk_1_33, /* skha.0 */
&__clk_1_34, /* rng.0 */
NULL,
};
static struct clk * const enable_clks[] __initconst = {
&__clk_0_2, /* flexbus */
&__clk_0_18, /* intc.0 */
&__clk_0_19, /* intc.1 */
&__clk_0_21, /* iack.0 */
&__clk_0_24, /* mcfuart.0 */
&__clk_0_25, /* mcfuart.1 */
&__clk_0_26, /* mcfuart.2 */
&__clk_0_28, /* mcftmr.0 */
&__clk_0_29, /* mcftmr.1 */
&__clk_0_32, /* mcfpit.0 */
&__clk_0_33, /* mcfpit.1 */
&__clk_0_37, /* mcfeport.0 */
&__clk_0_40, /* sys.0 */
&__clk_0_41, /* gpio.0 */
&__clk_0_46, /* sdram.0 */
&__clk_0_48, /* pll.0 */
};
static struct clk * const disable_clks[] __initconst = {
&__clk_0_8, /* mcfcan.0 */
&__clk_0_12, /* fec.0 */
&__clk_0_17, /* edma */
&__clk_0_22, /* mcfi2c.0 */
&__clk_0_23, /* mcfqspi.0 */
&__clk_0_30, /* mcftmr.2 */
&__clk_0_31, /* mcftmr.3 */
&__clk_0_34, /* mcfpit.2 */
&__clk_0_35, /* mcfpit.3 */
&__clk_0_36, /* mcfpwm.0 */
&__clk_0_38, /* mcfwdt.0 */
&__clk_0_42, /* mcfrtc.0 */
&__clk_0_43, /* mcflcd.0 */
&__clk_0_44, /* mcfusb-otg.0 */
&__clk_0_45, /* mcfusb-host.0 */
&__clk_0_47, /* ssi.0 */
&__clk_1_32, /* mdha.0 */
&__clk_1_33, /* skha.0 */
&__clk_1_34, /* rng.0 */
};
static void __init m53xx_clk_init(void)
{
unsigned i;
/* make sure these clocks are enabled */
for (i = 0; i < ARRAY_SIZE(enable_clks); ++i)
__clk_init_enabled(enable_clks[i]);
/* make sure these clocks are disabled */
for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
__clk_init_disabled(disable_clks[i]);
}
/***************************************************************************/
static void __init m53xx_qspi_init(void)
{
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
/* setup QSPS pins for QSPI with gpio CS control */
writew(0x01f0, MCFGPIO_PAR_QSPI);
#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
}
/***************************************************************************/
static void __init m53xx_uarts_init(void)
{
/* UART GPIO initialization */
writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART);
}
/***************************************************************************/
static void __init m53xx_fec_init(void)
{
u8 v;
/* Set multi-function pins to ethernet mode for fec0 */
v = readb(MCFGPIO_PAR_FECI2C);
v |= MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC |
MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO;
writeb(v, MCFGPIO_PAR_FECI2C);
v = readb(MCFGPIO_PAR_FEC);
v = MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC | MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC;
writeb(v, MCFGPIO_PAR_FEC);
}
/***************************************************************************/
void __init config_BSP(char *commandp, int size)
{
#if !defined(CONFIG_BOOTPARAM)
/* Copy command line from FLASH to local buffer... */
memcpy(commandp, (char *) 0x4000, 4);
if(strncmp(commandp, "kcl ", 4) == 0){
memcpy(commandp, (char *) 0x4004, size);
commandp[size-1] = 0;
} else {
memset(commandp, 0, size);
}
#endif
mach_sched_init = hw_timer_init;
m53xx_clk_init();
m53xx_uarts_init();
m53xx_fec_init();
m53xx_qspi_init();
#ifdef CONFIG_BDM_DISABLE
/*
* Disable the BDM clocking. This also turns off most of the rest of
* the BDM device. This is good for EMC reasons. This option is not
* incompatible with the memory protection option.
*/
wdebug(MCFDEBUG_CSR, MCFDEBUG_CSR_PSTCLK);
#endif
}
/***************************************************************************/
/* Board initialization */
/***************************************************************************/
/*
* PLL min/max specifications
*/
#define MAX_FVCO 500000 /* KHz */
#define MAX_FSYS 80000 /* KHz */
#define MIN_FSYS 58333 /* KHz */
#define FREF 16000 /* KHz */
#define MAX_MFD 135 /* Multiplier */
#define MIN_MFD 88 /* Multiplier */
#define BUSDIV 6 /* Divider */
/*
* Low Power Divider specifications
*/
#define MIN_LPD (1 << 0) /* Divider (not encoded) */
#define MAX_LPD (1 << 15) /* Divider (not encoded) */
#define DEFAULT_LPD (1 << 1) /* Divider (not encoded) */
#define SYS_CLK_KHZ 80000
#define SYSTEM_PERIOD 12.5
/*
* SDRAM Timing Parameters
*/
#define SDRAM_BL 8 /* # of beats in a burst */
#define SDRAM_TWR 2 /* in clocks */
#define SDRAM_CASL 2.5 /* CASL in clocks */
#define SDRAM_TRCD 2 /* in clocks */
#define SDRAM_TRP 2 /* in clocks */
#define SDRAM_TRFC 7 /* in clocks */
#define SDRAM_TREFI 7800 /* in ns */
#define EXT_SRAM_ADDRESS (0xC0000000)
#define FLASH_ADDRESS (0x00000000)
#define SDRAM_ADDRESS (0x40000000)
#define NAND_FLASH_ADDRESS (0xD0000000)
void wtm_init(void);
void scm_init(void);
void gpio_init(void);
void fbcs_init(void);
void sdramc_init(void);
int clock_pll (int fsys, int flags);
int clock_limp (int);
int clock_exit_limp (void);
int get_sys_clock (void);
asmlinkage void __init sysinit(void)
{
clock_pll(0, 0);
wtm_init();
scm_init();
gpio_init();
fbcs_init();
sdramc_init();
}
void wtm_init(void)
{
/* Disable watchdog timer */
writew(0, MCF_WTM_WCR);
}
#define MCF_SCM_BCR_GBW (0x00000100)
#define MCF_SCM_BCR_GBR (0x00000200)
void scm_init(void)
{
/* All masters are trusted */
writel(0x77777777, MCF_SCM_MPR);
/* Allow supervisor/user, read/write, and trusted/untrusted
access to all slaves */
writel(0, MCF_SCM_PACRA);
writel(0, MCF_SCM_PACRB);
writel(0, MCF_SCM_PACRC);
writel(0, MCF_SCM_PACRD);
writel(0, MCF_SCM_PACRE);
writel(0, MCF_SCM_PACRF);
/* Enable bursts */
writel(MCF_SCM_BCR_GBR | MCF_SCM_BCR_GBW, MCF_SCM_BCR);
}
void fbcs_init(void)
{
writeb(0x3E, MCFGPIO_PAR_CS);
/* Latch chip select */
writel(0x10080000, MCF_FBCS1_CSAR);
writel(0x002A3780, MCF_FBCS1_CSCR);
writel(MCF_FBCS_CSMR_BAM_2M | MCF_FBCS_CSMR_V, MCF_FBCS1_CSMR);
/* Initialize latch to drive signals to inactive states */
writew(0xffff, 0x10080000);
/* External SRAM */
writel(EXT_SRAM_ADDRESS, MCF_FBCS1_CSAR);
writel(MCF_FBCS_CSCR_PS_16 |
MCF_FBCS_CSCR_AA |
MCF_FBCS_CSCR_SBM |
MCF_FBCS_CSCR_WS(1),
MCF_FBCS1_CSCR);
writel(MCF_FBCS_CSMR_BAM_512K | MCF_FBCS_CSMR_V, MCF_FBCS1_CSMR);
/* Boot Flash connected to FBCS0 */
writel(FLASH_ADDRESS, MCF_FBCS0_CSAR);
writel(MCF_FBCS_CSCR_PS_16 |
MCF_FBCS_CSCR_BEM |
MCF_FBCS_CSCR_AA |
MCF_FBCS_CSCR_SBM |
MCF_FBCS_CSCR_WS(7),
MCF_FBCS0_CSCR);
writel(MCF_FBCS_CSMR_BAM_32M | MCF_FBCS_CSMR_V, MCF_FBCS0_CSMR);
}
void sdramc_init(void)
{
/*
* Check to see if the SDRAM has already been initialized
* by a run control tool
*/
if (!(readl(MCF_SDRAMC_SDCR) & MCF_SDRAMC_SDCR_REF)) {
/* SDRAM chip select initialization */
/* Initialize SDRAM chip select */
writel(MCF_SDRAMC_SDCS_BA(SDRAM_ADDRESS) |
MCF_SDRAMC_SDCS_CSSZ(MCF_SDRAMC_SDCS_CSSZ_32MBYTE),
MCF_SDRAMC_SDCS0);
/*
* Basic configuration and initialization
*/
writel(MCF_SDRAMC_SDCFG1_SRD2RW((int)((SDRAM_CASL + 2) + 0.5)) |
MCF_SDRAMC_SDCFG1_SWT2RD(SDRAM_TWR + 1) |
MCF_SDRAMC_SDCFG1_RDLAT((int)((SDRAM_CASL * 2) + 2)) |
MCF_SDRAMC_SDCFG1_ACT2RW((int)(SDRAM_TRCD + 0.5)) |
MCF_SDRAMC_SDCFG1_PRE2ACT((int)(SDRAM_TRP + 0.5)) |
MCF_SDRAMC_SDCFG1_REF2ACT((int)(SDRAM_TRFC + 0.5)) |
MCF_SDRAMC_SDCFG1_WTLAT(3),
MCF_SDRAMC_SDCFG1);
writel(MCF_SDRAMC_SDCFG2_BRD2PRE(SDRAM_BL / 2 + 1) |
MCF_SDRAMC_SDCFG2_BWT2RW(SDRAM_BL / 2 + SDRAM_TWR) |
MCF_SDRAMC_SDCFG2_BRD2WT((int)((SDRAM_CASL + SDRAM_BL / 2 - 1.0) + 0.5)) |
MCF_SDRAMC_SDCFG2_BL(SDRAM_BL - 1),
MCF_SDRAMC_SDCFG2);
/*
* Precharge and enable write to SDMR
*/
writel(MCF_SDRAMC_SDCR_MODE_EN |
MCF_SDRAMC_SDCR_CKE |
MCF_SDRAMC_SDCR_DDR |
MCF_SDRAMC_SDCR_MUX(1) |
MCF_SDRAMC_SDCR_RCNT((int)(((SDRAM_TREFI / (SYSTEM_PERIOD * 64)) - 1) + 0.5)) |
MCF_SDRAMC_SDCR_PS_16 |
MCF_SDRAMC_SDCR_IPALL,
MCF_SDRAMC_SDCR);
/*
* Write extended mode register
*/
writel(MCF_SDRAMC_SDMR_BNKAD_LEMR |
MCF_SDRAMC_SDMR_AD(0x0) |
MCF_SDRAMC_SDMR_CMD,
MCF_SDRAMC_SDMR);
/*
* Write mode register and reset DLL
*/
writel(MCF_SDRAMC_SDMR_BNKAD_LMR |
MCF_SDRAMC_SDMR_AD(0x163) |
MCF_SDRAMC_SDMR_CMD,
MCF_SDRAMC_SDMR);
/*
* Execute a PALL command
*/
writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_IPALL, MCF_SDRAMC_SDCR);
/*
* Perform two REF cycles
*/
writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_IREF, MCF_SDRAMC_SDCR);
writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_IREF, MCF_SDRAMC_SDCR);
/*
* Write mode register and clear reset DLL
*/
writel(MCF_SDRAMC_SDMR_BNKAD_LMR |
MCF_SDRAMC_SDMR_AD(0x063) |
MCF_SDRAMC_SDMR_CMD,
MCF_SDRAMC_SDMR);
/*
* Enable auto refresh and lock SDMR
*/
writel(readl(MCF_SDRAMC_SDCR) & ~MCF_SDRAMC_SDCR_MODE_EN,
MCF_SDRAMC_SDCR);
writel(MCF_SDRAMC_SDCR_REF | MCF_SDRAMC_SDCR_DQS_OE(0xC),
MCF_SDRAMC_SDCR);
}
}
void gpio_init(void)
{
/* Enable UART0 pins */
writew(MCF_GPIO_PAR_UART_PAR_URXD0 | MCF_GPIO_PAR_UART_PAR_UTXD0,
MCFGPIO_PAR_UART);
/*
* Initialize TIN3 as a GPIO output to enable the write
* half of the latch.
*/
writeb(0x00, MCFGPIO_PAR_TIMER);
writeb(0x08, MCFGPIO_PDDR_TIMER);
writeb(0x00, MCFGPIO_PCLRR_TIMER);
}
int clock_pll(int fsys, int flags)
{
int fref, temp, fout, mfd;
u32 i;
fref = FREF;
if (fsys == 0) {
/* Return current PLL output */
mfd = readb(MCF_PLL_PFDR);
return (fref * mfd / (BUSDIV * 4));
}
/* Check bounds of requested system clock */
if (fsys > MAX_FSYS)
fsys = MAX_FSYS;
if (fsys < MIN_FSYS)
fsys = MIN_FSYS;
/* Multiplying by 100 when calculating the temp value,
and then dividing by 100 to calculate the mfd allows
for exact values without needing to include floating
point libraries. */
temp = 100 * fsys / fref;
mfd = 4 * BUSDIV * temp / 100;
/* Determine the output frequency for selected values */
fout = (fref * mfd / (BUSDIV * 4));
/*
* Check to see if the SDRAM has already been initialized.
* If it has then the SDRAM needs to be put into self refresh
* mode before reprogramming the PLL.
*/
if (readl(MCF_SDRAMC_SDCR) & MCF_SDRAMC_SDCR_REF)
/* Put SDRAM into self refresh mode */
writel(readl(MCF_SDRAMC_SDCR) & ~MCF_SDRAMC_SDCR_CKE,
MCF_SDRAMC_SDCR);
/*
* Initialize the PLL to generate the new system clock frequency.
* The device must be put into LIMP mode to reprogram the PLL.
*/
/* Enter LIMP mode */
clock_limp(DEFAULT_LPD);
/* Reprogram PLL for desired fsys */
writeb(MCF_PLL_PODR_CPUDIV(BUSDIV/3) | MCF_PLL_PODR_BUSDIV(BUSDIV),
MCF_PLL_PODR);
writeb(mfd, MCF_PLL_PFDR);
/* Exit LIMP mode */
clock_exit_limp();
/*
* Return the SDRAM to normal operation if it is in use.
*/
if (readl(MCF_SDRAMC_SDCR) & MCF_SDRAMC_SDCR_REF)
/* Exit self refresh mode */
writel(readl(MCF_SDRAMC_SDCR) | MCF_SDRAMC_SDCR_CKE,
MCF_SDRAMC_SDCR);
/* Errata - workaround for SDRAM opeartion after exiting LIMP mode */
writel(MCF_SDRAMC_REFRESH, MCF_SDRAMC_LIMP_FIX);
/* wait for DQS logic to relock */
for (i = 0; i < 0x200; i++)
;
return fout;
}
int clock_limp(int div)
{
u32 temp;
/* Check bounds of divider */
if (div < MIN_LPD)
div = MIN_LPD;
if (div > MAX_LPD)
div = MAX_LPD;
/* Save of the current value of the SSIDIV so we don't
overwrite the value*/
temp = readw(MCF_CCM_CDR) & MCF_CCM_CDR_SSIDIV(0xF);
/* Apply the divider to the system clock */
writew(MCF_CCM_CDR_LPDIV(div) | MCF_CCM_CDR_SSIDIV(temp), MCF_CCM_CDR);
writew(readw(MCF_CCM_MISCCR) | MCF_CCM_MISCCR_LIMP, MCF_CCM_MISCCR);
return (FREF/(3*(1 << div)));
}
int clock_exit_limp(void)
{
int fout;
/* Exit LIMP mode */
writew(readw(MCF_CCM_MISCCR) & ~MCF_CCM_MISCCR_LIMP, MCF_CCM_MISCCR);
/* Wait for PLL to lock */
while (!(readw(MCF_CCM_MISCCR) & MCF_CCM_MISCCR_PLL_LOCK))
;
fout = get_sys_clock();
return fout;
}
int get_sys_clock(void)
{
int divider;
/* Test to see if device is in LIMP mode */
if (readw(MCF_CCM_MISCCR) & MCF_CCM_MISCCR_LIMP) {
divider = readw(MCF_CCM_CDR) & MCF_CCM_CDR_LPDIV(0xF);
return (FREF/(2 << divider));
}
else
return (FREF * readb(MCF_PLL_PFDR)) / (BUSDIV * 4);
}
| gpl-2.0 |
mrlambchop/imx23-kernel | arch/arm/mach-at91/at91sam9260.c | 82 | 10813 | /*
* arch/arm/mach-at91/at91sam9260.c
*
* Copyright (C) 2006 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/module.h>
#include <asm/proc-fns.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/system_misc.h>
#include <mach/cpu.h>
#include <mach/at91_dbgu.h>
#include <mach/at91sam9260.h>
#include <mach/at91_aic.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
#include "soc.h"
#include "generic.h"
#include "clock.h"
#include "sam9_smc.h"
/* --------------------------------------------------------------------
* Clocks
* -------------------------------------------------------------------- */
/*
* The peripheral clocks.
*/
static struct clk pioA_clk = {
.name = "pioA_clk",
.pmc_mask = 1 << AT91SAM9260_ID_PIOA,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk pioB_clk = {
.name = "pioB_clk",
.pmc_mask = 1 << AT91SAM9260_ID_PIOB,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk pioC_clk = {
.name = "pioC_clk",
.pmc_mask = 1 << AT91SAM9260_ID_PIOC,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk adc_clk = {
.name = "adc_clk",
.pmc_mask = 1 << AT91SAM9260_ID_ADC,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk adc_op_clk = {
.name = "adc_op_clk",
.type = CLK_TYPE_PERIPHERAL,
.rate_hz = 5000000,
};
static struct clk usart0_clk = {
.name = "usart0_clk",
.pmc_mask = 1 << AT91SAM9260_ID_US0,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk usart1_clk = {
.name = "usart1_clk",
.pmc_mask = 1 << AT91SAM9260_ID_US1,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk usart2_clk = {
.name = "usart2_clk",
.pmc_mask = 1 << AT91SAM9260_ID_US2,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk mmc_clk = {
.name = "mci_clk",
.pmc_mask = 1 << AT91SAM9260_ID_MCI,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk udc_clk = {
.name = "udc_clk",
.pmc_mask = 1 << AT91SAM9260_ID_UDP,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk twi_clk = {
.name = "twi_clk",
.pmc_mask = 1 << AT91SAM9260_ID_TWI,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk spi0_clk = {
.name = "spi0_clk",
.pmc_mask = 1 << AT91SAM9260_ID_SPI0,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk spi1_clk = {
.name = "spi1_clk",
.pmc_mask = 1 << AT91SAM9260_ID_SPI1,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk ssc_clk = {
.name = "ssc_clk",
.pmc_mask = 1 << AT91SAM9260_ID_SSC,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk tc0_clk = {
.name = "tc0_clk",
.pmc_mask = 1 << AT91SAM9260_ID_TC0,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk tc1_clk = {
.name = "tc1_clk",
.pmc_mask = 1 << AT91SAM9260_ID_TC1,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk tc2_clk = {
.name = "tc2_clk",
.pmc_mask = 1 << AT91SAM9260_ID_TC2,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk ohci_clk = {
.name = "ohci_clk",
.pmc_mask = 1 << AT91SAM9260_ID_UHP,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk macb_clk = {
.name = "pclk",
.pmc_mask = 1 << AT91SAM9260_ID_EMAC,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk isi_clk = {
.name = "isi_clk",
.pmc_mask = 1 << AT91SAM9260_ID_ISI,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk usart3_clk = {
.name = "usart3_clk",
.pmc_mask = 1 << AT91SAM9260_ID_US3,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk usart4_clk = {
.name = "usart4_clk",
.pmc_mask = 1 << AT91SAM9260_ID_US4,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk usart5_clk = {
.name = "usart5_clk",
.pmc_mask = 1 << AT91SAM9260_ID_US5,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk tc3_clk = {
.name = "tc3_clk",
.pmc_mask = 1 << AT91SAM9260_ID_TC3,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk tc4_clk = {
.name = "tc4_clk",
.pmc_mask = 1 << AT91SAM9260_ID_TC4,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk tc5_clk = {
.name = "tc5_clk",
.pmc_mask = 1 << AT91SAM9260_ID_TC5,
.type = CLK_TYPE_PERIPHERAL,
};
static struct clk *periph_clocks[] __initdata = {
&pioA_clk,
&pioB_clk,
&pioC_clk,
&adc_clk,
&adc_op_clk,
&usart0_clk,
&usart1_clk,
&usart2_clk,
&mmc_clk,
&udc_clk,
&twi_clk,
&spi0_clk,
&spi1_clk,
&ssc_clk,
&tc0_clk,
&tc1_clk,
&tc2_clk,
&ohci_clk,
&macb_clk,
&isi_clk,
&usart3_clk,
&usart4_clk,
&usart5_clk,
&tc3_clk,
&tc4_clk,
&tc5_clk,
// irq0 .. irq2
};
static struct clk_lookup periph_clocks_lookups[] = {
/* One additional fake clock for macb_hclk */
CLKDEV_CON_ID("hclk", &macb_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
/* more usart lookup table for DT entries */
CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
CLKDEV_CON_DEV_ID("usart", "fffb0000.serial", &usart0_clk),
CLKDEV_CON_DEV_ID("usart", "fffb4000.serial", &usart1_clk),
CLKDEV_CON_DEV_ID("usart", "fffb8000.serial", &usart2_clk),
CLKDEV_CON_DEV_ID("usart", "fffd0000.serial", &usart3_clk),
CLKDEV_CON_DEV_ID("usart", "fffd4000.serial", &usart4_clk),
CLKDEV_CON_DEV_ID("usart", "fffd8000.serial", &usart5_clk),
/* more tc lookup table for DT entries */
CLKDEV_CON_DEV_ID("t0_clk", "fffa0000.timer", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "fffa0000.timer", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "fffa0000.timer", &tc2_clk),
CLKDEV_CON_DEV_ID("t0_clk", "fffdc000.timer", &tc3_clk),
CLKDEV_CON_DEV_ID("t1_clk", "fffdc000.timer", &tc4_clk),
CLKDEV_CON_DEV_ID("t2_clk", "fffdc000.timer", &tc5_clk),
CLKDEV_CON_DEV_ID("hclk", "500000.ohci", &ohci_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
CLKDEV_CON_ID("pioB", &pioB_clk),
CLKDEV_CON_ID("pioC", &pioC_clk),
};
static struct clk_lookup usart_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("usart", "atmel_usart.0", &mck),
CLKDEV_CON_DEV_ID("usart", "atmel_usart.1", &usart0_clk),
CLKDEV_CON_DEV_ID("usart", "atmel_usart.2", &usart1_clk),
CLKDEV_CON_DEV_ID("usart", "atmel_usart.3", &usart2_clk),
CLKDEV_CON_DEV_ID("usart", "atmel_usart.4", &usart3_clk),
CLKDEV_CON_DEV_ID("usart", "atmel_usart.5", &usart4_clk),
CLKDEV_CON_DEV_ID("usart", "atmel_usart.6", &usart5_clk),
};
/*
* The two programmable clocks.
* You must configure pin multiplexing to bring these signals out.
*/
static struct clk pck0 = {
.name = "pck0",
.pmc_mask = AT91_PMC_PCK0,
.type = CLK_TYPE_PROGRAMMABLE,
.id = 0,
};
static struct clk pck1 = {
.name = "pck1",
.pmc_mask = AT91_PMC_PCK1,
.type = CLK_TYPE_PROGRAMMABLE,
.id = 1,
};
static void __init at91sam9260_register_clocks(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(periph_clocks); i++)
clk_register(periph_clocks[i]);
clkdev_add_table(periph_clocks_lookups,
ARRAY_SIZE(periph_clocks_lookups));
clkdev_add_table(usart_clocks_lookups,
ARRAY_SIZE(usart_clocks_lookups));
clk_register(&pck0);
clk_register(&pck1);
}
/* --------------------------------------------------------------------
* GPIO
* -------------------------------------------------------------------- */
static struct at91_gpio_bank at91sam9260_gpio[] __initdata = {
{
.id = AT91SAM9260_ID_PIOA,
.regbase = AT91SAM9260_BASE_PIOA,
}, {
.id = AT91SAM9260_ID_PIOB,
.regbase = AT91SAM9260_BASE_PIOB,
}, {
.id = AT91SAM9260_ID_PIOC,
.regbase = AT91SAM9260_BASE_PIOC,
}
};
/* --------------------------------------------------------------------
* AT91SAM9260 processor initialization
* -------------------------------------------------------------------- */
static void __init at91sam9xe_map_io(void)
{
unsigned long sram_size;
switch (at91_soc_initdata.cidr & AT91_CIDR_SRAMSIZ) {
case AT91_CIDR_SRAMSIZ_32K:
sram_size = 2 * SZ_16K;
break;
case AT91_CIDR_SRAMSIZ_16K:
default:
sram_size = SZ_16K;
}
at91_init_sram(0, AT91SAM9XE_SRAM_BASE, sram_size);
}
static void __init at91sam9260_map_io(void)
{
if (cpu_is_at91sam9xe())
at91sam9xe_map_io();
else if (cpu_is_at91sam9g20())
at91_init_sram(0, AT91SAM9G20_SRAM_BASE, AT91SAM9G20_SRAM_SIZE);
else
at91_init_sram(0, AT91SAM9260_SRAM_BASE, AT91SAM9260_SRAM_SIZE);
}
static void __init at91sam9260_ioremap_registers(void)
{
at91_ioremap_shdwc(AT91SAM9260_BASE_SHDWC);
at91_ioremap_rstc(AT91SAM9260_BASE_RSTC);
at91_ioremap_ramc(0, AT91SAM9260_BASE_SDRAMC, 512);
at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT);
at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC);
at91_ioremap_matrix(AT91SAM9260_BASE_MATRIX);
}
static void __init at91sam9260_initialize(void)
{
arm_pm_idle = at91sam9_idle;
arm_pm_restart = at91sam9_alt_restart;
at91_extern_irq = (1 << AT91SAM9260_ID_IRQ0) | (1 << AT91SAM9260_ID_IRQ1)
| (1 << AT91SAM9260_ID_IRQ2);
/* Register GPIO subsystem */
at91_gpio_init(at91sam9260_gpio, 3);
}
/* --------------------------------------------------------------------
* Interrupt initialization
* -------------------------------------------------------------------- */
/*
* The default interrupt priority levels (0 = lowest, 7 = highest).
*/
static unsigned int at91sam9260_default_irq_priority[NR_AIC_IRQS] __initdata = {
7, /* Advanced Interrupt Controller */
7, /* System Peripherals */
1, /* Parallel IO Controller A */
1, /* Parallel IO Controller B */
1, /* Parallel IO Controller C */
0, /* Analog-to-Digital Converter */
5, /* USART 0 */
5, /* USART 1 */
5, /* USART 2 */
0, /* Multimedia Card Interface */
2, /* USB Device Port */
6, /* Two-Wire Interface */
5, /* Serial Peripheral Interface 0 */
5, /* Serial Peripheral Interface 1 */
5, /* Serial Synchronous Controller */
0,
0,
0, /* Timer Counter 0 */
0, /* Timer Counter 1 */
0, /* Timer Counter 2 */
2, /* USB Host port */
3, /* Ethernet */
0, /* Image Sensor Interface */
5, /* USART 3 */
5, /* USART 4 */
5, /* USART 5 */
0, /* Timer Counter 3 */
0, /* Timer Counter 4 */
0, /* Timer Counter 5 */
0, /* Advanced Interrupt Controller */
0, /* Advanced Interrupt Controller */
0, /* Advanced Interrupt Controller */
};
struct at91_init_soc __initdata at91sam9260_soc = {
.map_io = at91sam9260_map_io,
.default_irq_priority = at91sam9260_default_irq_priority,
.ioremap_registers = at91sam9260_ioremap_registers,
.register_clocks = at91sam9260_register_clocks,
.init = at91sam9260_initialize,
};
| gpl-2.0 |
wolfgar/xbmc | xbmc/android/jni/ApplicationInfo.cpp | 82 | 1524 | /*
* Copyright (C) 2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "ApplicationInfo.h"
#include "jutils/jutils-details.hpp"
using namespace jni;
CJNIApplicationInfo::CJNIApplicationInfo(const jhobject &object) : CJNIBase(object)
,sourceDir( jcast<std::string>(get_field<jhstring>(m_object, "sourceDir")))
,publicSourceDir( jcast<std::string>(get_field<jhstring>(m_object, "publicSourceDir")))
,dataDir( jcast<std::string>(get_field<jhstring>(m_object, "dataDir")))
,nativeLibraryDir(jcast<std::string>(get_field<jhstring>(m_object, "nativeLibraryDir")))
,packageName( jcast<std::string>(get_field<jhstring>(m_object, "packageName")))
,uid( get_field<int>(m_object, "uid"))
,targetSdkVersion(get_field<int>(m_object, "targetSdkVersion"))
,enabled( get_field<jboolean>(m_object, "enabled"))
{
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.